diff --git a/.gitignore b/.gitignore index 451f208d2b..faf122334c 100755 --- a/.gitignore +++ b/.gitignore @@ -13,7 +13,6 @@ TAGS akka.tmproj reports dist -build target deploy/*.jar data @@ -52,4 +51,4 @@ akka-tutorials/akka-tutorial-first/project/plugins/project/ akka-docs/exts/ _akka_cluster/ Makefile -akka.sublime-project \ No newline at end of file +akka.sublime-project diff --git a/akka-actor-tests/src/test/java/akka/dispatch/JavaFutureTests.java b/akka-actor-tests/src/test/java/akka/dispatch/JavaFutureTests.java index cdec7f5631..e908335666 100644 --- a/akka-actor-tests/src/test/java/akka/dispatch/JavaFutureTests.java +++ b/akka-actor-tests/src/test/java/akka/dispatch/JavaFutureTests.java @@ -4,13 +4,13 @@ import org.junit.Test; import static org.junit.Assert.*; import java.util.concurrent.Callable; import java.util.LinkedList; +import java.lang.Iterable; import akka.japi.Function; +import akka.japi.Function2; import akka.japi.Procedure; import scala.Some; import scala.Right; -import static akka.dispatch.Futures.future; -import static akka.dispatch.Futures.traverse; -import static akka.dispatch.Futures.sequence; +import static akka.dispatch.Futures.*; public class JavaFutureTests { @@ -44,9 +44,75 @@ public class JavaFutureTests { })); } - Future> futureList = sequence(listFutures); + Future> futureList = sequence(listFutures); assertEquals(futureList.get(), listExpected); } + // TODO: Improve this test, perhaps with an Actor + @Test public void foldForJavaApiMustWork() { + LinkedList> listFutures = new LinkedList>(); + StringBuilder expected = new StringBuilder(); + + for (int i = 0; i < 10; i++) { + expected.append("test"); + listFutures.add(future(new Callable() { + public String call() { + return "test"; + } + })); + } + + Future result = fold("", 15000,listFutures, new Function2(){ + public String apply(String r, String t) { + return r + t; + } + }); + + assertEquals(result.get(), expected.toString()); + } + + @Test public void reduceForJavaApiMustWork() { + LinkedList> listFutures = new LinkedList>(); + StringBuilder expected = new StringBuilder(); + + for (int i = 0; i < 10; i++) { + expected.append("test"); + listFutures.add(future(new Callable() { + public String call() { + return "test"; + } + })); + } + + Future result = reduce(listFutures, 15000, new Function2(){ + public String apply(String r, String t) { + return r + t; + } + }); + + assertEquals(result.get(), expected.toString()); + } + + @Test public void traverseForJavaApiMustWork() { + LinkedList listStrings = new LinkedList(); + LinkedList expectedStrings = new LinkedList(); + + for (int i = 0; i < 10; i++) { + expectedStrings.add("TEST"); + listStrings.add("test"); + } + + Future> result = traverse(listStrings, new Function>(){ + public Future apply(final String r) { + return future(new Callable() { + public String call() { + return r.toUpperCase(); + } + }); + } + }); + + assertEquals(result.get(), expectedStrings); + } } diff --git a/akka-actor-tests/src/test/scala/akka/actor/actor/Bench.scala b/akka-actor-tests/src/test/scala/akka/actor/actor/Bench.scala index f018de635c..1f121babd5 100644 --- a/akka-actor-tests/src/test/scala/akka/actor/actor/Bench.scala +++ b/akka-actor-tests/src/test/scala/akka/actor/actor/Bench.scala @@ -78,7 +78,7 @@ object Chameneos { var sumMeetings = 0 var numFaded = 0 - override def preStart = { + override def preStart() = { for (i <- 0 until numChameneos) actorOf(new Chameneo(self, colours(i % 3), i)) } diff --git a/akka-actor-tests/src/test/scala/akka/actor/supervisor/RestartStrategySpec.scala b/akka-actor-tests/src/test/scala/akka/actor/supervisor/RestartStrategySpec.scala index f2a3103d08..c2af94ba1a 100644 --- a/akka-actor-tests/src/test/scala/akka/actor/supervisor/RestartStrategySpec.scala +++ b/akka-actor-tests/src/test/scala/akka/actor/supervisor/RestartStrategySpec.scala @@ -46,7 +46,7 @@ class RestartStrategySpec extends JUnitSuite { secondRestartLatch.open } - override def postStop = { + override def postStop() = { stopLatch.open } }) @@ -131,7 +131,7 @@ class RestartStrategySpec extends JUnitSuite { thirdRestartLatch.open } - override def postStop = { + override def postStop() = { if (restartLatch.isOpen) { secondRestartLatch.open } @@ -189,7 +189,7 @@ class RestartStrategySpec extends JUnitSuite { secondRestartLatch.open } - override def postStop = { + override def postStop() = { stopLatch.open } }) @@ -243,7 +243,7 @@ class RestartStrategySpec extends JUnitSuite { restartLatch.open } - override def postStop = { + override def postStop() = { stopLatch.open } }) diff --git a/akka-actor-tests/src/test/scala/akka/actor/supervisor/SupervisorSpec.scala b/akka-actor-tests/src/test/scala/akka/actor/supervisor/SupervisorSpec.scala index fe00811959..67b2a00c54 100644 --- a/akka-actor-tests/src/test/scala/akka/actor/supervisor/SupervisorSpec.scala +++ b/akka-actor-tests/src/test/scala/akka/actor/supervisor/SupervisorSpec.scala @@ -385,7 +385,7 @@ class SupervisorSpec extends WordSpec with MustMatchers with BeforeAndAfterEach inits.get must be (3) - supervisor.shutdown + supervisor.shutdown() } } } diff --git a/akka-actor-tests/src/test/scala/akka/actor/supervisor/Ticket669Spec.scala b/akka-actor-tests/src/test/scala/akka/actor/supervisor/Ticket669Spec.scala index 5e3c23ee67..6ca1d2d7c9 100644 --- a/akka-actor-tests/src/test/scala/akka/actor/supervisor/Ticket669Spec.scala +++ b/akka-actor-tests/src/test/scala/akka/actor/supervisor/Ticket669Spec.scala @@ -65,7 +65,7 @@ object Ticket669Spec { self.reply_?("failure1") } - override def postStop { + override def postStop() { self.reply_?("failure2") } } diff --git a/akka-actor-tests/src/test/scala/akka/config/ConfigSpec.scala b/akka-actor-tests/src/test/scala/akka/config/ConfigSpec.scala index 5c8be5c801..722e638c2c 100644 --- a/akka-actor-tests/src/test/scala/akka/config/ConfigSpec.scala +++ b/akka-actor-tests/src/test/scala/akka/config/ConfigSpec.scala @@ -18,7 +18,7 @@ class ConfigSpec extends WordSpec with MustMatchers { getList("akka.boot") must equal(Nil) getString("akka.time-unit") must equal(Some("seconds")) - getString("akka.version") must equal(Some("1.1-SNAPSHOT")) + getString("akka.version") must equal(Some("1.2-SNAPSHOT")) getString("akka.actor.default-dispatcher.type") must equal(Some("GlobalExecutorBasedEventDriven")) getInt("akka.actor.default-dispatcher.keep-alive-time") must equal(Some(60)) diff --git a/akka-actor-tests/src/test/scala/akka/dataflow/DataFlowSpec.scala b/akka-actor-tests/src/test/scala/akka/dataflow/DataFlowSpec.scala new file mode 100644 index 0000000000..412605c02b --- /dev/null +++ b/akka-actor-tests/src/test/scala/akka/dataflow/DataFlowSpec.scala @@ -0,0 +1,165 @@ +/** + * Copyright (C) 2009-2011 Scalable Solutions AB + */ + +package akka.dataflow + +import org.scalatest.Spec +import org.scalatest.Assertions +import org.scalatest.matchers.ShouldMatchers +import org.scalatest.BeforeAndAfterAll +import org.scalatest.junit.JUnitRunner +import org.junit.runner.RunWith + +import akka.dispatch.DefaultCompletableFuture +import java.util.concurrent.{TimeUnit, CountDownLatch} +import annotation.tailrec +import java.util.concurrent.atomic.{AtomicLong, AtomicReference, AtomicInteger} +import akka.actor.ActorRegistry + +@RunWith(classOf[JUnitRunner]) +class DataFlowTest extends Spec with ShouldMatchers with BeforeAndAfterAll { + describe("DataflowVariable") { + it("should be able to set the value of one variable from other variables") { + import DataFlow._ + + val latch = new CountDownLatch(1) + val result = new AtomicInteger(0) + val x, y, z = new DataFlowVariable[Int] + thread { + z << x() + y() + result.set(z()) + latch.countDown() + } + thread { x << 40 } + thread { y << 2 } + + latch.await(10,TimeUnit.SECONDS) should equal (true) + result.get should equal (42) + List(x,y,z).foreach(_.shutdown()) + } + + it("should be able to sum a sequence of ints") { + import DataFlow._ + + def ints(n: Int, max: Int): List[Int] = + if (n == max) Nil + else n :: ints(n + 1, max) + + def sum(s: Int, stream: List[Int]): List[Int] = stream match { + case Nil => s :: Nil + case h :: t => s :: sum(h + s, t) + } + + val latch = new CountDownLatch(1) + val result = new AtomicReference[List[Int]](Nil) + val x = new DataFlowVariable[List[Int]] + val y = new DataFlowVariable[List[Int]] + val z = new DataFlowVariable[List[Int]] + + thread { x << ints(0, 1000) } + thread { y << sum(0, x()) } + + thread { z << y() + result.set(z()) + latch.countDown() + } + + latch.await(10,TimeUnit.SECONDS) should equal (true) + result.get should equal (sum(0,ints(0,1000))) + List(x,y,z).foreach(_.shutdown()) + } +/* + it("should be able to join streams") { + import DataFlow._ + Actor.registry.shutdownAll() + + def ints(n: Int, max: Int, stream: DataFlowStream[Int]): Unit = if (n != max) { + stream <<< n + ints(n + 1, max, stream) + } + + def sum(s: Int, in: DataFlowStream[Int], out: DataFlowStream[Int]): Unit = { + out <<< s + sum(in() + s, in, out) + } + + val producer = new DataFlowStream[Int] + val consumer = new DataFlowStream[Int] + val latch = new CountDownLatch(1) + val result = new AtomicInteger(0) + + val t1 = thread { ints(0, 1000, producer) } + val t2 = thread { + Thread.sleep(1000) + result.set(producer.map(x => x * x).foldLeft(0)(_ + _)) + latch.countDown() + } + + latch.await(3,TimeUnit.SECONDS) should equal (true) + result.get should equal (332833500) + } + + it("should be able to sum streams recursively") { + import DataFlow._ + + def ints(n: Int, max: Int, stream: DataFlowStream[Int]): Unit = if (n != max) { + stream <<< n + ints(n + 1, max, stream) + } + + def sum(s: Int, in: DataFlowStream[Int], out: DataFlowStream[Int]): Unit = { + out <<< s + sum(in() + s, in, out) + } + + val result = new AtomicLong(0) + + val producer = new DataFlowStream[Int] + val consumer = new DataFlowStream[Int] + val latch = new CountDownLatch(1) + + @tailrec def recurseSum(stream: DataFlowStream[Int]): Unit = { + val x = stream() + + if(result.addAndGet(x) == 166666500) + latch.countDown() + + recurseSum(stream) + } + + thread { ints(0, 1000, producer) } + thread { sum(0, producer, consumer) } + thread { recurseSum(consumer) } + + latch.await(15,TimeUnit.SECONDS) should equal (true) + } +*/ + /* Test not ready for prime time, causes some sort of deadlock */ + /* it("should be able to conditionally set variables") { + + import DataFlow._ + Actor.registry.shutdownAll() + + val latch = new CountDownLatch(1) + val x, y, z, v = new DataFlowVariable[Int] + + val main = thread { + x << 1 + z << Math.max(x(),y()) + latch.countDown() + } + + val setY = thread { + // Thread.sleep(2000) + y << 2 + } + + val setV = thread { + v << y + } + List(x,y,z,v) foreach (_.shutdown()) + latch.await(2,TimeUnit.SECONDS) should equal (true) + }*/ + } +} diff --git a/akka-actor-tests/src/test/scala/akka/dispatch/ActorModelSpec.scala b/akka-actor-tests/src/test/scala/akka/dispatch/ActorModelSpec.scala index 4e60ffcc96..d5cea19bf5 100644 --- a/akka-actor-tests/src/test/scala/akka/dispatch/ActorModelSpec.scala +++ b/akka-actor-tests/src/test/scala/akka/dispatch/ActorModelSpec.scala @@ -14,6 +14,7 @@ import java.util.concurrent.atomic.AtomicLong import java.util.concurrent. {ConcurrentHashMap, CountDownLatch, TimeUnit} import akka.actor.dispatch.ActorModelSpec.MessageDispatcherInterceptor import akka.util.{Duration, Switch} +import org.multiverse.api.latches.StandardLatch object ActorModelSpec { @@ -110,13 +111,13 @@ object ActorModelSpec { super.dispatch(invocation) } - private[akka] abstract override def start { - super.start + private[akka] abstract override def start() { + super.start() starts.incrementAndGet() } - private[akka] abstract override def shutdown { - super.shutdown + private[akka] abstract override def shutdown() { + super.shutdown() stops.incrementAndGet() } } @@ -216,6 +217,21 @@ abstract class ActorModelSpec extends JUnitSuite { msgsProcessed = 0, restarts = 0 ) + + val futures = for(i <- 1 to 10) yield Future { i } + await(dispatcher.stops.get == 2)(withinMs = dispatcher.timeoutMs * 5) + assertDispatcher(dispatcher)(starts = 2, stops = 2) + + val a2 = newTestActor + a2.start + val futures2 = for(i <- 1 to 10) yield Future { i } + + await(dispatcher.starts.get == 3)(withinMs = dispatcher.timeoutMs * 5) + assertDispatcher(dispatcher)(starts = 3, stops = 2) + + a2.stop + await(dispatcher.stops.get == 3)(withinMs = dispatcher.timeoutMs * 5) + assertDispatcher(dispatcher)(starts = 3, stops = 3) } @Test def dispatcherShouldProcessMessagesOneAtATime { diff --git a/akka-actor-tests/src/test/scala/akka/dispatch/FutureSpec.scala b/akka-actor-tests/src/test/scala/akka/dispatch/FutureSpec.scala index b2e4dd94e5..857bcbcd5f 100644 --- a/akka-actor-tests/src/test/scala/akka/dispatch/FutureSpec.scala +++ b/akka-actor-tests/src/test/scala/akka/dispatch/FutureSpec.scala @@ -62,9 +62,9 @@ class FutureSpec extends JUnitSuite { val future1 = actor1 !!! "Hello" flatMap ((s: String) => actor2 !!! s) val future2 = actor1 !!! "Hello" flatMap (actor2 !!! (_: String)) val future3 = actor1 !!! "Hello" flatMap (actor2 !!! (_: Int)) - assert(Some(Right("WORLD")) === future1.await.value) - assert(Some(Right("WORLD")) === future2.await.value) - intercept[ClassCastException] { future3.await.resultOrException } + assert((future1.get: Any) === "WORLD") + assert((future2.get: Any) === "WORLD") + intercept[ClassCastException] { future3.get } actor1.stop() actor2.stop() } @@ -74,8 +74,8 @@ class FutureSpec extends JUnitSuite { val actor2 = actorOf(new Actor { def receive = { case s: String => self reply s.toUpperCase } } ).start() val future1 = actor1 !!! "Hello" collect { case (s: String) => s } flatMap (actor2 !!! _) val future2 = actor1 !!! "Hello" collect { case (n: Int) => n } flatMap (actor2 !!! _) - assert(Some(Right("WORLD")) === future1.await.value) - intercept[MatchError] { future2.await.resultOrException } + assert((future1.get: Any) === "WORLD") + intercept[MatchError] { future2.get } actor1.stop() actor2.stop() } @@ -102,8 +102,8 @@ class FutureSpec extends JUnitSuite { c: String <- actor !!! 7 } yield b + "-" + c - assert(Some(Right("10-14")) === future1.await.value) - intercept[ClassCastException] { future2.await.resultOrException } + assert(future1.get === "10-14") + intercept[ClassCastException] { future2.get } actor.stop() } @@ -118,19 +118,64 @@ class FutureSpec extends JUnitSuite { }).start() val future1 = for { - a <- actor !!! Req("Hello") collect { case Res(x: Int) => x } - b <- actor !!! Req(a) collect { case Res(x: String) => x } - c <- actor !!! Req(7) collect { case Res(x: String) => x } + Res(a: Int) <- actor !!! Req("Hello") + Res(b: String) <- actor !!! Req(a) + Res(c: String) <- actor !!! Req(7) } yield b + "-" + c val future2 = for { - a <- actor !!! Req("Hello") collect { case Res(x: Int) => x } - b <- actor !!! Req(a) collect { case Res(x: Int) => x } - c <- actor !!! Req(7) collect { case Res(x: String) => x } + Res(a: Int) <- actor !!! Req("Hello") + Res(b: Int) <- actor !!! Req(a) + Res(c: Int) <- actor !!! Req(7) } yield b + "-" + c - assert(Some(Right("10-14")) === future1.await.value) - intercept[MatchError] { future2.await.resultOrException } + assert(future1.get === "10-14") + intercept[MatchError] { future2.get } + actor.stop() + } + + @Test def shouldMapMatchedExceptionsToResult { + val future1 = Future(5) + val future2 = future1 map (_ / 0) + val future3 = future2 map (_.toString) + + val future4 = future1 failure { + case e: ArithmeticException => 0 + } map (_.toString) + + val future5 = future2 failure { + case e: ArithmeticException => 0 + } map (_.toString) + + val future6 = future2 failure { + case e: MatchError => 0 + } map (_.toString) + + val future7 = future3 failure { case e: ArithmeticException => "You got ERROR" } + + val actor = actorOf[TestActor].start() + + val future8 = actor !!! "Failure" + val future9 = actor !!! "Failure" failure { + case e: RuntimeException => "FAIL!" + } + val future10 = actor !!! "Hello" failure { + case e: RuntimeException => "FAIL!" + } + val future11 = actor !!! "Failure" failure { case _ => "Oops!" } + + assert(future1.get === 5) + intercept[ArithmeticException] { future2.get } + intercept[ArithmeticException] { future3.get } + assert(future4.get === "5") + assert(future5.get === "0") + intercept[ArithmeticException] { future6.get } + assert(future7.get === "You got ERROR") + intercept[RuntimeException] { future8.get } + assert(future9.get === "FAIL!") + assert(future10.get === "World") + assert(future11.get === "Oops!") + actor.stop() } @@ -140,8 +185,9 @@ class FutureSpec extends JUnitSuite { def receive = { case (add: Int, wait: Int) => Thread.sleep(wait); self reply_? add } }).start() } - def futures = actors.zipWithIndex map { case (actor: ActorRef, idx: Int) => actor.!!![Int]((idx, idx * 200 )) } - assert(Futures.fold(0)(futures)(_ + _).awaitBlocking.result.get === 45) + val timeout = 10000 + def futures = actors.zipWithIndex map { case (actor: ActorRef, idx: Int) => actor.!!![Int]((idx, idx * 200 ), timeout) } + assert(Futures.fold(0, timeout)(futures)(_ + _).await.result.get === 45) } @Test def shouldFoldResultsByComposing { @@ -150,8 +196,8 @@ class FutureSpec extends JUnitSuite { def receive = { case (add: Int, wait: Int) => Thread.sleep(wait); self reply_? add } }).start() } - def futures = actors.zipWithIndex map { case (actor: ActorRef, idx: Int) => actor.!!![Int]((idx, idx * 200 )) } - assert(futures.foldLeft(Future(0))((fr, fa) => for (r <- fr; a <- fa) yield (r + a)).awaitBlocking.result.get === 45) + def futures = actors.zipWithIndex map { case (actor: ActorRef, idx: Int) => actor.!!![Int]((idx, idx * 200 ), 10000) } + assert(futures.foldLeft(Future(0))((fr, fa) => for (r <- fr; a <- fa) yield (r + a)).get === 45) } @Test def shouldFoldResultsWithException { @@ -165,12 +211,13 @@ class FutureSpec extends JUnitSuite { } }).start() } - def futures = actors.zipWithIndex map { case (actor: ActorRef, idx: Int) => actor.!!![Int]((idx, idx * 100 )) } - assert(Futures.fold(0)(futures)(_ + _).awaitBlocking.exception.get.getMessage === "shouldFoldResultsWithException: expected") + val timeout = 10000 + def futures = actors.zipWithIndex map { case (actor: ActorRef, idx: Int) => actor.!!![Int]((idx, idx * 100 ), timeout) } + assert(Futures.fold(0, timeout)(futures)(_ + _).await.exception.get.getMessage === "shouldFoldResultsWithException: expected") } @Test def shouldFoldReturnZeroOnEmptyInput { - assert(Futures.fold(0)(List[Future[Int]]())(_ + _).awaitBlocking.result.get === 0) + assert(Futures.fold(0)(List[Future[Int]]())(_ + _).get === 0) } @Test def shouldReduceResults { @@ -179,8 +226,9 @@ class FutureSpec extends JUnitSuite { def receive = { case (add: Int, wait: Int) => Thread.sleep(wait); self reply_? add } }).start() } - def futures = actors.zipWithIndex map { case (actor: ActorRef, idx: Int) => actor.!!![Int]((idx, idx * 200 )) } - assert(Futures.reduce(futures)(_ + _).awaitBlocking.result.get === 45) + val timeout = 10000 + def futures = actors.zipWithIndex map { case (actor: ActorRef, idx: Int) => actor.!!![Int]((idx, idx * 200 ), timeout) } + assert(Futures.reduce(futures, timeout)(_ + _).get === 45) } @Test def shouldReduceResultsWithException { @@ -194,34 +242,15 @@ class FutureSpec extends JUnitSuite { } }).start() } - def futures = actors.zipWithIndex map { case (actor: ActorRef, idx: Int) => actor.!!![Int]((idx, idx * 100 )) } - assert(Futures.reduce(futures)(_ + _).awaitBlocking.exception.get.getMessage === "shouldFoldResultsWithException: expected") + val timeout = 10000 + def futures = actors.zipWithIndex map { case (actor: ActorRef, idx: Int) => actor.!!![Int]((idx, idx * 100 ), timeout) } + assert(Futures.reduce(futures, timeout)(_ + _).await.exception.get.getMessage === "shouldFoldResultsWithException: expected") } @Test(expected = classOf[UnsupportedOperationException]) def shouldReduceThrowIAEOnEmptyInput { Futures.reduce(List[Future[Int]]())(_ + _).await.resultOrException } - @Test def resultWithinShouldNotThrowExceptions { - val latch = new StandardLatch - - val actors = (1 to 10).toList map { _ => - actorOf(new Actor { - def receive = { case (add: Int, wait: Boolean, latch: StandardLatch) => if (wait) latch.await; self reply_? add } - }).start() - } - - def futures = actors.zipWithIndex map { case (actor: ActorRef, idx: Int) => actor.!!![Int]((idx, idx >= 5, latch)) } - val result = for(f <- futures) yield f.valueWithin(2, TimeUnit.SECONDS) - latch.open - val done = result collect { case Some(Right(x)) => x } - val undone = result collect { case None => None } - val errors = result collect { case Some(Left(t)) => t } - assert(done.size === 5) - assert(undone.size === 5) - assert(errors.size === 0) - } - @Test def receiveShouldExecuteOnComplete { val latch = new StandardLatch val actor = actorOf[TestActor].start() @@ -269,41 +298,303 @@ class FutureSpec extends JUnitSuite { assert(f3.resultOrException === Some("SUCCESS")) // make sure all futures are completed in dispatcher - assert(Dispatchers.defaultGlobalDispatcher.futureQueueSize === 0) + assert(Dispatchers.defaultGlobalDispatcher.pendingFutures === 0) } @Test def shouldBlockUntilResult { val latch = new StandardLatch val f = Future({ latch.await; 5}) - val f2 = Future({ f() + 5 }) + val f2 = Future({ f.get + 5 }) assert(f2.resultOrException === None) latch.open - assert(f2() === 10) + assert(f2.get === 10) val f3 = Future({ Thread.sleep(100); 5}, 10) intercept[FutureTimeoutException] { - f3() + f3.get } } - @Test def lesslessIsMore { - import akka.actor.Actor.spawn - val dataflowVar, dataflowVar2 = new DefaultCompletableFuture[Int](Long.MaxValue) - val begin, end = new StandardLatch - spawn { - begin.await - dataflowVar2 << dataflowVar - end.open + @Test def futureComposingWithContinuations { + import Future.flow + + val actor = actorOf[TestActor].start + + val x = Future("Hello") + val y = x flatMap (actor !!! _) + + val r = flow(x() + " " + y[String]() + "!") + + assert(r.get === "Hello World!") + + actor.stop + } + + @Test def futureComposingWithContinuationsFailureDivideZero { + import Future.flow + + val x = Future("Hello") + val y = x map (_.length) + + val r = flow(x() + " " + y.map(_ / 0).map(_.toString)(), 100) + + intercept[java.lang.ArithmeticException](r.get) + } + + @Test def futureComposingWithContinuationsFailureCastInt { + import Future.flow + + val actor = actorOf[TestActor].start + + val x = Future(3) + val y = actor !!! "Hello" + + val r = flow(x() + y[Int](), 100) + + intercept[ClassCastException](r.get) + } + + @Test def futureComposingWithContinuationsFailureCastNothing { + import Future.flow + + val actor = actorOf[TestActor].start + + val x = Future("Hello") + val y = actor !!! "Hello" + + val r = flow(x() + y()) + + intercept[ClassCastException](r.get) + } + + @Test def futureCompletingWithContinuations { + import Future.flow + + val x, y, z = Promise[Int]() + val ly, lz = new StandardLatch + + val result = flow { + y completeWith x + ly.open // not within continuation + + z << x + lz.open // within continuation, will wait for 'z' to complete + z() + y() } - spawn { - dataflowVar << 5 + assert(ly.tryAwaitUninterruptible(100, TimeUnit.MILLISECONDS)) + assert(!lz.tryAwaitUninterruptible(100, TimeUnit.MILLISECONDS)) + + flow { x << 5 } + + assert(y.get === 5) + assert(z.get === 5) + assert(lz.isOpen) + assert(result.get === 10) + + val a, b, c = Promise[Int]() + + val result2 = flow { + val n = (a << c).result.get + 10 + b << (c() - 2) + a() + n * b() } - begin.open - end.await - assert(dataflowVar2() === 5) - assert(dataflowVar.get === 5) + + c completeWith Future(5) + + assert(a.get === 5) + assert(b.get === 3) + assert(result2.get === 50) + Thread.sleep(100) + + // make sure all futures are completed in dispatcher + assert(Dispatchers.defaultGlobalDispatcher.pendingFutures === 0) + } + + @Test def shouldNotAddOrRunCallbacksAfterFailureToBeCompletedBeforeExpiry { + val latch = new StandardLatch + val f = Promise[Int](0) + Thread.sleep(25) + f.onComplete( _ => latch.open ) //Shouldn't throw any exception here + + assert(f.isExpired) //Should be expired + + f.complete(Right(1)) //Shouldn't complete the Future since it is expired + + assert(f.value.isEmpty) //Shouldn't be completed + assert(!latch.isOpen) //Shouldn't run the listener + } + + @Test def futureDataFlowShouldEmulateBlocking1 { + import Future.flow + + val one, two = Promise[Int](1000 * 60) + val simpleResult = flow { + one() + two() + } + + assert(List(one, two, simpleResult).forall(_.isCompleted == false)) + + flow { one << 1 } + + assert(one.isCompleted) + assert(List(two, simpleResult).forall(_.isCompleted == false)) + + flow { two << 9 } + + assert(List(one, two).forall(_.isCompleted == true)) + assert(simpleResult.get === 10) + + } + + @Test def futureDataFlowShouldEmulateBlocking2 { + import Future.flow + val x1, x2, y1, y2 = Promise[Int](1000 * 60) + val lx, ly, lz = new StandardLatch + val result = flow { + lx.open() + x1 << y1 + ly.open() + x2 << y2 + lz.open() + x1() + x2() + } + assert(lx.isOpen) + assert(!ly.isOpen) + assert(!lz.isOpen) + assert(List(x1,x2,y1,y2).forall(_.isCompleted == false)) + + flow { y1 << 1 } // When this is set, it should cascade down the line + + assert(ly.tryAwaitUninterruptible(2000, TimeUnit.MILLISECONDS)) + assert(x1.get === 1) + assert(!lz.isOpen) + + flow { y2 << 9 } // When this is set, it should cascade down the line + + assert(lz.tryAwaitUninterruptible(2000, TimeUnit.MILLISECONDS)) + assert(x2.get === 9) + + assert(List(x1,x2,y1,y2).forall(_.isCompleted == true)) + + assert(result.get === 10) + } + + @Test def dataFlowAPIshouldbeSlick { + import Future.flow + + val i1, i2, s1, s2 = new StandardLatch + + val callService1 = Future { i1.open; s1.awaitUninterruptible; 1 } + val callService2 = Future { i2.open; s2.awaitUninterruptible; 9 } + + val result = flow { callService1() + callService2() } + + assert(!s1.isOpen) + assert(!s2.isOpen) + assert(!result.isCompleted) + assert(i1.tryAwaitUninterruptible(2000, TimeUnit.MILLISECONDS)) + assert(i2.tryAwaitUninterruptible(2000, TimeUnit.MILLISECONDS)) + s1.open + s2.open + assert(result.get === 10) + } + + @Test def futureCompletingWithContinuationsFailure { + import Future.flow + + val x, y, z = Promise[Int]() + val ly, lz = new StandardLatch + + val result = flow { + y << x + ly.open + val oops = 1 / 0 + z << x + lz.open + z() + y() + oops + } + + assert(!ly.tryAwaitUninterruptible(100, TimeUnit.MILLISECONDS)) + assert(!lz.tryAwaitUninterruptible(100, TimeUnit.MILLISECONDS)) + + flow { x << 5 } + + assert(y.get === 5) + intercept[java.lang.ArithmeticException](result.get) + assert(z.value === None) + assert(!lz.isOpen) + } + + @Test def futureContinuationsShouldNotBlock { + import Future.flow + + val latch = new StandardLatch + val future = Future { + latch.await + "Hello" + } + + val result = flow { + Some(future()).filter(_ == "Hello") + } + + assert(!result.isCompleted) + + latch.open + + assert(result.get === Some("Hello")) + } + + @Test def futureFlowShouldBeTypeSafe { + import Future.flow + + def checkType[A: Manifest, B](in: Future[A], refmanifest: Manifest[B]): Boolean = manifest[A] == refmanifest + + val rString = flow { + val x = Future(5) + x().toString + } + + val rInt = flow { + val x = rString.apply + val y = Future(5) + x.length + y() + } + + assert(checkType(rString, manifest[String])) + assert(checkType(rInt, manifest[Int])) + assert(!checkType(rInt, manifest[String])) + assert(!checkType(rInt, manifest[Nothing])) + assert(!checkType(rInt, manifest[Any])) + + rString.await + rInt.await + } + + @Test def futureFlowSimpleAssign { + import Future.flow + + val x, y, z = Promise[Int]() + + flow { + z << x() + y() + } + flow { x << 40 } + flow { y << 2 } + + assert(z.get === 42) + } + + @Test def ticket812FutureDispatchCleanup { + val dispatcher = implicitly[MessageDispatcher] + assert(dispatcher.pendingFutures === 0) + val future = Future({Thread.sleep(100);"Done"}, 10) + intercept[FutureTimeoutException] { future.await } + assert(dispatcher.pendingFutures === 1) + Thread.sleep(100) + assert(dispatcher.pendingFutures === 0) } } diff --git a/akka-actor-tests/src/test/scala/akka/dispatch/MailboxConfigSpec.scala b/akka-actor-tests/src/test/scala/akka/dispatch/MailboxConfigSpec.scala index 9ddbfdc332..0da861350d 100644 --- a/akka-actor-tests/src/test/scala/akka/dispatch/MailboxConfigSpec.scala +++ b/akka-actor-tests/src/test/scala/akka/dispatch/MailboxConfigSpec.scala @@ -23,8 +23,8 @@ abstract class MailboxSpec extends def factory: MailboxType => MessageQueue name should { - "create a !blockDequeue && unbounded mailbox" in { - val config = UnboundedMailbox(false) + "create an unbounded mailbox" in { + val config = UnboundedMailbox() val q = factory(config) ensureInitialMailboxState(config, q) @@ -37,8 +37,8 @@ abstract class MailboxSpec extends f.await.resultOrException must be === Some(null) } - "create a !blockDequeue and bounded mailbox with 10 capacity and with push timeout" in { - val config = BoundedMailbox(false, 10, Duration(10,TimeUnit.MILLISECONDS)) + "create a bounded mailbox with 10 capacity and with push timeout" in { + val config = BoundedMailbox(10, Duration(10,TimeUnit.MILLISECONDS)) val q = factory(config) ensureInitialMailboxState(config, q) @@ -59,30 +59,16 @@ abstract class MailboxSpec extends } "dequeue what was enqueued properly for unbounded mailboxes" in { - testEnqueueDequeue(UnboundedMailbox(false)) + testEnqueueDequeue(UnboundedMailbox()) } "dequeue what was enqueued properly for bounded mailboxes" in { - testEnqueueDequeue(BoundedMailbox(false, 10000, Duration(-1, TimeUnit.MILLISECONDS))) + testEnqueueDequeue(BoundedMailbox(10000, Duration(-1, TimeUnit.MILLISECONDS))) } "dequeue what was enqueued properly for bounded mailboxes with pushTimeout" in { - testEnqueueDequeue(BoundedMailbox(false, 10000, Duration(100, TimeUnit.MILLISECONDS))) + testEnqueueDequeue(BoundedMailbox(10000, Duration(100, TimeUnit.MILLISECONDS))) } - - /** FIXME Adapt test so it works with the last dequeue - - "dequeue what was enqueued properly for unbounded mailboxes with blockDeque" in { - testEnqueueDequeue(UnboundedMailbox(true)) - } - - "dequeue what was enqueued properly for bounded mailboxes with blockDeque" in { - testEnqueueDequeue(BoundedMailbox(true, 1000, Duration(-1, TimeUnit.MILLISECONDS))) - } - - "dequeue what was enqueued properly for bounded mailboxes with blockDeque and pushTimeout" in { - testEnqueueDequeue(BoundedMailbox(true, 1000, Duration(100, TimeUnit.MILLISECONDS))) - }*/ } //CANDIDATE FOR TESTKIT @@ -111,8 +97,8 @@ abstract class MailboxSpec extends q match { case aQueue: BlockingQueue[_] => config match { - case BoundedMailbox(_,capacity,_) => aQueue.remainingCapacity must be === capacity - case UnboundedMailbox(_) => aQueue.remainingCapacity must be === Int.MaxValue + case BoundedMailbox(capacity,_) => aQueue.remainingCapacity must be === capacity + case UnboundedMailbox() => aQueue.remainingCapacity must be === Int.MaxValue } case _ => } @@ -165,10 +151,8 @@ abstract class MailboxSpec extends class DefaultMailboxSpec extends MailboxSpec { lazy val name = "The default mailbox implementation" def factory = { - case UnboundedMailbox(blockDequeue) => - new DefaultUnboundedMessageQueue(blockDequeue) - case BoundedMailbox(blocking, capacity, pushTimeOut) => - new DefaultBoundedMessageQueue(capacity, pushTimeOut, blocking) + case UnboundedMailbox() => new DefaultUnboundedMessageQueue() + case BoundedMailbox(capacity, pushTimeOut) => new DefaultBoundedMessageQueue(capacity, pushTimeOut) } } @@ -176,9 +160,7 @@ class PriorityMailboxSpec extends MailboxSpec { val comparator = PriorityGenerator(_.##) lazy val name = "The priority mailbox implementation" def factory = { - case UnboundedMailbox(blockDequeue) => - new UnboundedPriorityMessageQueue(blockDequeue, comparator) - case BoundedMailbox(blocking, capacity, pushTimeOut) => - new BoundedPriorityMessageQueue(capacity, pushTimeOut, blocking, comparator) + case UnboundedMailbox() => new UnboundedPriorityMessageQueue(comparator) + case BoundedMailbox(capacity, pushTimeOut) => new BoundedPriorityMessageQueue(capacity, pushTimeOut, comparator) } } \ No newline at end of file diff --git a/akka-actor-tests/src/test/scala/akka/dispatch/PriorityDispatcherSpec.scala b/akka-actor-tests/src/test/scala/akka/dispatch/PriorityDispatcherSpec.scala index f256715b8c..002267a6c7 100644 --- a/akka-actor-tests/src/test/scala/akka/dispatch/PriorityDispatcherSpec.scala +++ b/akka-actor-tests/src/test/scala/akka/dispatch/PriorityDispatcherSpec.scala @@ -10,11 +10,11 @@ class PriorityDispatcherSpec extends WordSpec with MustMatchers { "A PriorityExecutorBasedEventDrivenDispatcher" must { "Order it's messages according to the specified comparator using an unbounded mailbox" in { - testOrdering(UnboundedMailbox(false)) + testOrdering(UnboundedMailbox()) } "Order it's messages according to the specified comparator using a bounded mailbox" in { - testOrdering(BoundedMailbox(false,1000)) + testOrdering(BoundedMailbox(1000)) } } diff --git a/akka-actor/src/main/scala/akka/AkkaException.scala b/akka-actor/src/main/scala/akka/AkkaException.scala index 748df1ced0..a9edf69a0b 100644 --- a/akka-actor/src/main/scala/akka/AkkaException.scala +++ b/akka-actor/src/main/scala/akka/AkkaException.scala @@ -16,14 +16,11 @@ import java.net.{InetAddress, UnknownHostException} * * @author Jonas Bonér */ -class AkkaException(message: String = "") extends RuntimeException(message) with Serializable { +class AkkaException(message: String = "", cause: Throwable = null) extends RuntimeException(message, cause) with Serializable { val uuid = "%s_%s".format(AkkaException.hostname, newUuid) - override lazy val toString = { - val name = getClass.getName - val trace = stackTraceToString - "%s: %s\n[%s]\n%s".format(name, message, uuid, trace) - } + override lazy val toString = + "%s: %s\n[%s]\n%s".format(getClass.getName, message, uuid, stackTraceToString) def stackTraceToString = { val trace = getStackTrace diff --git a/akka-actor/src/main/scala/akka/actor/Actor.scala b/akka-actor/src/main/scala/akka/actor/Actor.scala index 90ecd737ef..b7b537ec8e 100644 --- a/akka-actor/src/main/scala/akka/actor/Actor.scala +++ b/akka-actor/src/main/scala/akka/actor/Actor.scala @@ -17,6 +17,7 @@ import akka.event.EventHandler import scala.reflect.BeanProperty import com.eaio.uuid.UUID +import java.lang.reflect.InvocationTargetException /** * Life-cycle messages for the Actors @@ -28,18 +29,18 @@ sealed trait LifeCycleMessage extends Serializable */ sealed trait AutoReceivedMessage { self: LifeCycleMessage => } -case class HotSwap(code: ActorRef => Actor.Receive, discardOld: Boolean = true) - extends AutoReceivedMessage with LifeCycleMessage { +case class HotSwap(code: ActorRef => Actor.Receive, discardOld: Boolean = true) extends AutoReceivedMessage with LifeCycleMessage { /** * Java API */ - def this(code: akka.japi.Function[ActorRef,Procedure[Any]], discardOld: Boolean) = + def this(code: akka.japi.Function[ActorRef,Procedure[Any]], discardOld: Boolean) = { this( (self: ActorRef) => { val behavior = code(self) val result: Actor.Receive = { case msg => behavior(msg) } result }, discardOld) + } /** * Java API with default non-stacking behavior @@ -72,12 +73,12 @@ case class MaximumNumberOfRestartsWithinTimeRangeReached( @BeanProperty val lastExceptionCausingRestart: Throwable) extends LifeCycleMessage // Exceptions for Actors -class ActorStartException private[akka](message: String) extends AkkaException(message) -class IllegalActorStateException private[akka](message: String) extends AkkaException(message) -class ActorKilledException private[akka](message: String) extends AkkaException(message) -class ActorInitializationException private[akka](message: String) extends AkkaException(message) -class ActorTimeoutException private[akka](message: String) extends AkkaException(message) -class InvalidMessageException private[akka](message: String) extends AkkaException(message) +class ActorStartException private[akka](message: String, cause: Throwable = null) extends AkkaException(message, cause) +class IllegalActorStateException private[akka](message: String, cause: Throwable = null) extends AkkaException(message, cause) +class ActorKilledException private[akka](message: String, cause: Throwable = null) extends AkkaException(message, cause) +class ActorInitializationException private[akka](message: String, cause: Throwable = null) extends AkkaException(message, cause) +class ActorTimeoutException private[akka](message: String, cause: Throwable = null) extends AkkaException(message, cause) +class InvalidMessageException private[akka](message: String, cause: Throwable = null) extends AkkaException(message, cause) /** * This message is thrown by default when an Actors behavior doesn't match a message @@ -130,7 +131,9 @@ object Actor extends ListenerManagement { */ type Receive = PartialFunction[Any, Unit] - private[actor] val actorRefInCreation = new scala.util.DynamicVariable[Option[ActorRef]](None) + private[actor] val actorRefInCreation = new ThreadLocal[Option[ActorRef]]{ + override def initialValue = None + } /** * Creates an ActorRef out of the Actor with type T. @@ -238,7 +241,15 @@ object Actor extends ListenerManagement { implicit val format: Format[T] = { if (formatClassName == "N/A") formatErrorDueTo("no class name defined in configuration") - val f = ReflectiveAccess.getObjectFor(formatClassName).getOrElse(formatErrorDueTo("it could not be loaded")) + val f = ReflectiveAccess.getObjectFor(formatClassName) match { + case Right(actor) => actor + case Left(exception) => + val cause = exception match { + case i: InvocationTargetException => i.getTargetException + case _ => exception + } + formatErrorDueTo(" " + cause.toString) + } if (f.isInstanceOf[Format[T]]) f.asInstanceOf[Format[T]] else formatErrorDueTo("class must be of type [akka.serialization.Format[T]]") } @@ -296,7 +307,6 @@ object Actor extends ListenerManagement { * times if for example the Actor is supervised and needs to be restarted. * Uses generated address. *

- * This function should NOT be used for remote actors. *

    *   import Actor._
    *   val actor = actorOf(new MyActor)
@@ -340,7 +350,6 @@ object Actor extends ListenerManagement {
    * times if for example the Actor is supervised and needs to be restarted.
    * Uses generated address.
    * 

- * This function should NOT be used for remote actors. * JAVA API */ def actorOf[T <: Actor](creator: Creator[T]): ActorRef = @@ -387,13 +396,21 @@ object Actor extends ListenerManagement { private[akka] def newLocalActorRef(clazz: Class[_ <: Actor], address: String): ActorRef = { new LocalActorRef(() => { import ReflectiveAccess.{ createInstance, noParams, noArgs } - createInstance[Actor](clazz.asInstanceOf[Class[_]], noParams, noArgs).getOrElse( - throw new ActorInitializationException( - "Could not instantiate Actor" + - "\nMake sure Actor is NOT defined inside a class/trait," + - "\nif so put it outside the class/trait, f.e. in a companion object," + - "\nOR try to change: 'actorOf[MyActor]' to 'actorOf(new MyActor)'.")) - }, address) + createInstance[Actor](clazz.asInstanceOf[Class[_]], noParams, noArgs) match { + case Right(actor) => actor + case Left(exception) => + val cause = exception match { + case i: InvocationTargetException => i.getTargetException + case _ => exception + } + + throw new ActorInitializationException( + "Could not instantiate Actor of " + clazz + + "\nMake sure Actor is NOT defined inside a class/trait," + + "\nif so put it outside the class/trait, f.e. in a companion object," + + "\nOR try to change: 'actorOf[MyActor]' to 'actorOf(new MyActor)'.", cause) + } + }, address) } /** @@ -401,6 +418,19 @@ object Actor extends ListenerManagement { * to convert an Option[Any] to an Option[T]. */ implicit def toAnyOptionAsTypedOption(anyOption: Option[Any]) = new AnyOptionAsTypedOption(anyOption) + + /** + * Implicitly converts the given Future[_] to a AnyOptionAsTypedOption which offers the method as[T] + * to convert an Option[Any] to an Option[T]. + * This means that the following code is equivalent: + * (actor !! "foo").as[Int] (Deprecated) + * and + * (actor !!! "foo").as[Int] (Recommended) + */ + implicit def futureToAnyOptionAsTypedOption(anyFuture: Future[_]) = new AnyOptionAsTypedOption({ + try { anyFuture.await } catch { case t: FutureTimeoutException => } + anyFuture.resultOrException + }) } /** @@ -463,7 +493,7 @@ trait Actor { * the 'forward' function. */ @transient implicit val someSelf: Some[ActorRef] = { - val optRef = Actor.actorRefInCreation.value + val optRef = Actor.actorRefInCreation.get if (optRef.isEmpty) throw new ActorInitializationException( "ActorRef for instance of actor [" + getClass.getName + "] is not in scope." + "\n\tYou can not create an instance of an actor explicitly using 'new MyActor'." + @@ -471,7 +501,7 @@ trait Actor { "\n\tEither use:" + "\n\t\t'val actor = Actor.actorOf[MyActor]', or" + "\n\t\t'val actor = Actor.actorOf(new MyActor(..))'") - Actor.actorRefInCreation.value = None + Actor.actorRefInCreation.set(None) optRef.asInstanceOf[Some[ActorRef]] } @@ -540,14 +570,14 @@ trait Actor { *

* Is called when an Actor is started by invoking 'actor.start()'. */ - def preStart {} + def preStart() {} /** * User overridable callback. *

* Is called when 'actor.stop()' is invoked. */ - def postStop {} + def postStop() {} /** * User overridable callback. diff --git a/akka-actor/src/main/scala/akka/actor/ActorRef.scala b/akka-actor/src/main/scala/akka/actor/ActorRef.scala index b36d90aa5b..61543631fb 100644 --- a/akka-actor/src/main/scala/akka/actor/ActorRef.scala +++ b/akka-actor/src/main/scala/akka/actor/ActorRef.scala @@ -97,7 +97,7 @@ trait ActorRef extends ActorRefShared with java.lang.Comparable[ActorRef] { scal * Defines the default timeout for '!!' and '!!!' invocations, * e.g. the timeout for the future returned by the call to '!!' and '!!!'. */ - @deprecated("Will be replaced by implicit-scoped timeout on all methods that needs it, will default to timeout specified in config") + @deprecated("Will be replaced by implicit-scoped timeout on all methods that needs it, will default to timeout specified in config", "1.1") @BeanProperty @volatile var timeout: Long = Actor.TIMEOUT @@ -232,7 +232,7 @@ trait ActorRef extends ActorRefShared with java.lang.Comparable[ActorRef] { scal /** * Is the actor able to handle the message passed in as arguments? */ - @deprecated("Will be removed without replacement, it's just not reliable in the face of `become` and `unbecome`") + @deprecated("Will be removed without replacement, it's just not reliable in the face of `become` and `unbecome`", "1.1") def isDefinedAt(message: Any): Boolean = actor.isDefinedAt(message) /** @@ -512,7 +512,7 @@ class LocalActorRef private[akka] (private[this] val actorFactory: () => Actor, @volatile private var maxNrOfRetriesCount: Int = 0 @volatile - private var restartsWithinTimeRangeTimestamp: Long = 0L + private var restartTimeWindowStartNanos: Long = 0L @volatile private var _mailbox: AnyRef = _ @volatile @@ -724,30 +724,32 @@ class LocalActorRef private[akka] (private[this] val actorFactory: () => Actor, } private def requestRestartPermission(maxNrOfRetries: Option[Int], withinTimeRange: Option[Int]): Boolean = { + val denied = if (maxNrOfRetries.isEmpty && withinTimeRange.isEmpty) { //Immortal false } else if (withinTimeRange.isEmpty) { // restrict number of restarts - maxNrOfRetriesCount += 1 //Increment number of retries - maxNrOfRetriesCount > maxNrOfRetries.get + val retries = maxNrOfRetriesCount + 1 + maxNrOfRetriesCount = retries //Increment number of retries + retries > maxNrOfRetries.get } else { // cannot restart more than N within M timerange - maxNrOfRetriesCount += 1 //Increment number of retries - val windowStart = restartsWithinTimeRangeTimestamp - val now = System.currentTimeMillis - val retries = maxNrOfRetriesCount + val retries = maxNrOfRetriesCount + 1 + + val windowStart = restartTimeWindowStartNanos + val now = System.nanoTime //We are within the time window if it isn't the first restart, or if the window hasn't closed val insideWindow = if (windowStart == 0) false - else (now - windowStart) <= withinTimeRange.get - - //The actor is dead if it dies X times within the window of restart - val unrestartable = insideWindow && retries > maxNrOfRetries.getOrElse(1) + else (now - windowStart) <= TimeUnit.MILLISECONDS.toNanos(withinTimeRange.get) if (windowStart == 0 || !insideWindow) //(Re-)set the start of the window - restartsWithinTimeRangeTimestamp = now + restartTimeWindowStartNanos = now - if (windowStart != 0 && !insideWindow) //Reset number of restarts if window has expired - maxNrOfRetriesCount = 1 + //Reset number of restarts if window has expired, otherwise, increment it + maxNrOfRetriesCount = if (windowStart != 0 && !insideWindow) 1 else retries //Increment number of retries - unrestartable + val restartCountLimit = if (maxNrOfRetries.isDefined) maxNrOfRetries.get else 1 + + //The actor is dead if it dies X times within the window of restart + insideWindow && retries > restartCountLimit } denied == false //If we weren't denied, we have a go @@ -839,12 +841,12 @@ class LocalActorRef private[akka] (private[this] val actorFactory: () => Actor, private[this] def newActor: Actor = { try { - Actor.actorRefInCreation.value = Some(this) + Actor.actorRefInCreation.set(Some(this)) val a = actorFactory() if (a eq null) throw new ActorInitializationException("Actor instance passed to ActorRef can not be 'null'") a } finally { - Actor.actorRefInCreation.value = None + Actor.actorRefInCreation.set(None) } } @@ -1009,7 +1011,7 @@ private[akka] case class RemoteActorRef private[akka] ( } // ==== NOT SUPPORTED ==== - @deprecated("Will be removed without replacement, doesn't make any sense to have in the face of `become` and `unbecome`") + @deprecated("Will be removed without replacement, doesn't make any sense to have in the face of `become` and `unbecome`", "1.1") def actorClass: Class[_ <: Actor] = unsupported def dispatcher_=(md: MessageDispatcher): Unit = unsupported def dispatcher: MessageDispatcher = unsupported diff --git a/akka-actor/src/main/scala/akka/actor/ActorRegistry.scala b/akka-actor/src/main/scala/akka/actor/ActorRegistry.scala index 335f331902..d78e91f919 100644 --- a/akka-actor/src/main/scala/akka/actor/ActorRegistry.scala +++ b/akka-actor/src/main/scala/akka/actor/ActorRegistry.scala @@ -297,7 +297,7 @@ class Index[K <: AnyRef,V <: AnyRef : Manifest] { /** * Associates the value of type V with the key of type K - * @returns true if the value didn't exist for the key previously, and false otherwise + * @return true if the value didn't exist for the key previously, and false otherwise */ def put(key: K, value: V): Boolean = { //Tailrecursive spin-locking put @@ -339,7 +339,7 @@ class Index[K <: AnyRef,V <: AnyRef : Manifest] { } /** - * @returns a _new_ array of all existing values for the given key at the time of the call + * @return a _new_ array of all existing values for the given key at the time of the call */ def values(key: K): Array[V] = { val set: JSet[V] = container get key @@ -348,7 +348,7 @@ class Index[K <: AnyRef,V <: AnyRef : Manifest] { } /** - * @returns Some(value) for the first matching value where the supplied function returns true for the given key, + * @return Some(value) for the first matching value where the supplied function returns true for the given key, * if no matches it returns None */ def findValue(key: K)(f: (V) => Boolean): Option[V] = { @@ -370,7 +370,7 @@ class Index[K <: AnyRef,V <: AnyRef : Manifest] { /** * Disassociates the value of type V from the key of type K - * @returns true if the value was disassociated from the key and false if it wasn't previously associated with the key + * @return true if the value was disassociated from the key and false if it wasn't previously associated with the key */ def remove(key: K, value: V): Boolean = { val set = container get key @@ -388,7 +388,7 @@ class Index[K <: AnyRef,V <: AnyRef : Manifest] { } /** - * @returns true if the underlying containers is empty, may report false negatives when the last remove is underway + * @return true if the underlying containers is empty, may report false negatives when the last remove is underway */ def isEmpty: Boolean = container.isEmpty diff --git a/akka-actor/src/main/scala/akka/actor/BootableActorLoaderService.scala b/akka-actor/src/main/scala/akka/actor/BootableActorLoaderService.scala index 92b29e8714..73fa2e768e 100644 --- a/akka-actor/src/main/scala/akka/actor/BootableActorLoaderService.scala +++ b/akka-actor/src/main/scala/akka/actor/BootableActorLoaderService.scala @@ -21,7 +21,6 @@ trait BootableActorLoaderService extends Bootable { protected def createApplicationClassLoader : Option[ClassLoader] = Some({ if (HOME.isDefined) { - val CONFIG = HOME.get + "/config" val DEPLOY = HOME.get + "/deploy" val DEPLOY_DIR = new File(DEPLOY) if (!DEPLOY_DIR.exists) { diff --git a/akka-actor/src/main/scala/akka/actor/Scheduler.scala b/akka-actor/src/main/scala/akka/actor/Scheduler.scala index cbda9d0af9..1c1da8e7a2 100644 --- a/akka-actor/src/main/scala/akka/actor/Scheduler.scala +++ b/akka-actor/src/main/scala/akka/actor/Scheduler.scala @@ -105,13 +105,17 @@ object Scheduler { } } - def shutdown: Unit = synchronized { - service.shutdown + def shutdown() { + synchronized { + service.shutdown() + } } - def restart: Unit = synchronized { - shutdown - service = Executors.newSingleThreadScheduledExecutor(SchedulerThreadFactory) + def restart() { + synchronized { + shutdown() + service = Executors.newSingleThreadScheduledExecutor(SchedulerThreadFactory) + } } } diff --git a/akka-actor/src/main/scala/akka/actor/Supervisor.scala b/akka-actor/src/main/scala/akka/actor/Supervisor.scala index 9b76d3035a..86dd283c52 100644 --- a/akka-actor/src/main/scala/akka/actor/Supervisor.scala +++ b/akka-actor/src/main/scala/akka/actor/Supervisor.scala @@ -13,7 +13,7 @@ import java.util.concurrent.{CopyOnWriteArrayList, ConcurrentHashMap} import java.net.InetSocketAddress import akka.config.Supervision._ -class SupervisorException private[akka](message: String) extends AkkaException(message) +class SupervisorException private[akka](message: String, cause: Throwable = null) extends AkkaException(message, cause) /** * Factory object for creating supervisors declarative. It creates instances of the 'Supervisor' class. diff --git a/akka-actor/src/main/scala/akka/actor/UntypedActor.scala b/akka-actor/src/main/scala/akka/actor/UntypedActor.scala index 0aeb84c2b6..41585dc602 100644 --- a/akka-actor/src/main/scala/akka/actor/UntypedActor.scala +++ b/akka-actor/src/main/scala/akka/actor/UntypedActor.scala @@ -88,14 +88,14 @@ abstract class UntypedActor extends Actor { *

* Is called when an Actor is started by invoking 'actor.start()'. */ - override def preStart {} + override def preStart() {} /** * User overridable callback. *

* Is called when 'actor.stop()' is invoked. */ - override def postStop {} + override def postStop() {} /** * User overridable callback. diff --git a/akka-actor/src/main/scala/akka/actor/package.scala b/akka-actor/src/main/scala/akka/actor/package.scala index 0a781649eb..01ef989f2a 100644 --- a/akka-actor/src/main/scala/akka/actor/package.scala +++ b/akka-actor/src/main/scala/akka/actor/package.scala @@ -4,8 +4,6 @@ package akka -import actor.{ScalaActorRef, ActorRef} - package object actor { implicit def actorRef2Scala(ref: ActorRef): ScalaActorRef = ref.asInstanceOf[ScalaActorRef] diff --git a/akka-actor/src/main/scala/akka/config/Config.scala b/akka-actor/src/main/scala/akka/config/Config.scala index 7d50e59cd7..eef0c5d8e0 100644 --- a/akka-actor/src/main/scala/akka/config/Config.scala +++ b/akka-actor/src/main/scala/akka/config/Config.scala @@ -6,8 +6,8 @@ package akka.config import akka.AkkaException -class ConfigurationException(message: String) extends AkkaException(message) -class ModuleNotAvailableException(message: String) extends AkkaException(message) +class ConfigurationException(message: String, cause: Throwable = null) extends AkkaException(message, cause) +class ModuleNotAvailableException(message: String, cause: Throwable = null) extends AkkaException(message, cause) /** * Loads up the configuration (from the akka.conf file). @@ -15,7 +15,7 @@ class ModuleNotAvailableException(message: String) extends AkkaException(message * @author Jonas Bonér */ object Config { - val VERSION = "1.1-SNAPSHOT" + val VERSION = "1.2-SNAPSHOT" val HOME = { val envHome = System.getenv("AKKA_HOME") match { @@ -31,7 +31,7 @@ object Config { envHome orElse systemHome } - val config: Configuration = try { + val config: Configuration = { val confName = { val envConf = System.getenv("AKKA_MODE") match { case null | "" => None @@ -46,42 +46,46 @@ object Config { (envConf orElse systemConf).map("akka." + _ + ".conf").getOrElse("akka.conf") } - val newInstance = + val (newInstance, source) = if (System.getProperty("akka.config", "") != "") { val configFile = System.getProperty("akka.config", "") - println("Loading config from -Dakka.config=" + configFile) - Configuration.fromFile(configFile) + (() => Configuration.fromFile(configFile), "Loading config from -Dakka.config=" + configFile) } else if (getClass.getClassLoader.getResource(confName) ne null) { - println("Loading config [" + confName + "] from the application classpath.") - Configuration.fromResource(confName, getClass.getClassLoader) + (() => Configuration.fromResource(confName, getClass.getClassLoader), "Loading config [" + confName + "] from the application classpath.") } else if (HOME.isDefined) { val configFile = HOME.get + "/config/" + confName - println("AKKA_HOME is defined as [" + HOME.get + "], loading config from [" + configFile + "].") - Configuration.fromFile(configFile) + (() => Configuration.fromFile(configFile), "AKKA_HOME is defined as [" + HOME.get + "], loading config from [" + configFile + "].") } else { - println( - "\nCan't load '" + confName + "'." + + (() => Configuration.fromString("akka {}"), // default empty config + "\nCan't load '" + confName + "'." + "\nOne of the three ways of locating the '" + confName + "' file needs to be defined:" + "\n\t1. Define the '-Dakka.config=...' system property option." + "\n\t2. Put the '" + confName + "' file on the classpath." + "\n\t3. Define 'AKKA_HOME' environment variable pointing to the root of the Akka distribution." + "\nI have no way of finding the '" + confName + "' configuration file." + "\nUsing default values everywhere.") - Configuration.fromString("akka {}") // default empty config } - val configVersion = newInstance.getString("akka.version", VERSION) - if (configVersion != VERSION) - throw new ConfigurationException( - "Akka JAR version [" + VERSION + "] is different than the provided config version [" + configVersion + "]") + try { + val i = newInstance() - newInstance - } catch { - case e => - System.err.println("Couldn't parse config, fatal error.") - e.printStackTrace(System.err) - System.exit(-1) - throw e + val configVersion = i.getString("akka.version", VERSION) + if (configVersion != VERSION) + throw new ConfigurationException( + "Akka JAR version [" + VERSION + "] is different than the provided config version [" + configVersion + "]") + + if(Configuration.outputConfigSources) + System.out.println(source) + + i + } catch { + case e => + System.err.println("Couldn't parse config, fatal error.") + System.err.println("Config source: " + source) + e.printStackTrace(System.err) + System.exit(-1) + throw e + } } val CONFIG_VERSION = config.getString("akka.version", VERSION) diff --git a/akka-actor/src/main/scala/akka/config/Configuration.scala b/akka-actor/src/main/scala/akka/config/Configuration.scala index ba1d45d15b..7794a3eb3d 100644 --- a/akka-actor/src/main/scala/akka/config/Configuration.scala +++ b/akka-actor/src/main/scala/akka/config/Configuration.scala @@ -14,6 +14,8 @@ object Configuration { val DefaultPath = new File(".").getCanonicalPath val DefaultImporter = new FilesystemImporter(DefaultPath) + val outputConfigSources = System.getProperty("akka.output.config.source") ne null + def load(data: String, importer: Importer = DefaultImporter): Configuration = { val parser = new ConfigParser(importer = importer) new Configuration(parser parse data) @@ -59,6 +61,13 @@ class Configuration(val map: Map[String, Any]) { private val trueValues = Set("true", "on") private val falseValues = Set("false", "off") + private def outputIfDesiredAndReturnInput[T](key: String, t: T): T = { + if (Configuration.outputConfigSources) + println("Akka config is using default value for: " + key) + + t + } + def contains(key: String): Boolean = map contains key def keys: Iterable[String] = map.keys @@ -71,7 +80,8 @@ class Configuration(val map: Map[String, Any]) { } } - def getAny(key: String, defaultValue: Any): Any = getAny(key).getOrElse(defaultValue) + def getAny(key: String, defaultValue: Any): Any = + getAny(key).getOrElse(outputIfDesiredAndReturnInput(key, defaultValue)) def getListAny(key: String): Seq[Any] = { try { @@ -83,7 +93,8 @@ class Configuration(val map: Map[String, Any]) { def getString(key: String): Option[String] = map.get(key).map(_.toString) - def getString(key: String, defaultValue: String): String = getString(key).getOrElse(defaultValue) + def getString(key: String, defaultValue: String): String = + getString(key).getOrElse(outputIfDesiredAndReturnInput(key, defaultValue)) def getList(key: String): Seq[String] = { try { @@ -101,7 +112,8 @@ class Configuration(val map: Map[String, Any]) { } } - def getInt(key: String, defaultValue: Int): Int = getInt(key).getOrElse(defaultValue) + def getInt(key: String, defaultValue: Int): Int = + getInt(key).getOrElse(outputIfDesiredAndReturnInput(key, defaultValue)) def getLong(key: String): Option[Long] = { try { @@ -111,7 +123,8 @@ class Configuration(val map: Map[String, Any]) { } } - def getLong(key: String, defaultValue: Long): Long = getLong(key).getOrElse(defaultValue) + def getLong(key: String, defaultValue: Long): Long = + getLong(key).getOrElse(outputIfDesiredAndReturnInput(key, defaultValue)) def getFloat(key: String): Option[Float] = { try { @@ -121,7 +134,8 @@ class Configuration(val map: Map[String, Any]) { } } - def getFloat(key: String, defaultValue: Float): Float = getFloat(key).getOrElse(defaultValue) + def getFloat(key: String, defaultValue: Float): Float = + getFloat(key).getOrElse(outputIfDesiredAndReturnInput(key, defaultValue)) def getDouble(key: String): Option[Double] = { try { @@ -131,7 +145,8 @@ class Configuration(val map: Map[String, Any]) { } } - def getDouble(key: String, defaultValue: Double): Double = getDouble(key).getOrElse(defaultValue) + def getDouble(key: String, defaultValue: Double): Double = + getDouble(key).getOrElse(outputIfDesiredAndReturnInput(key, defaultValue)) def getBoolean(key: String): Option[Boolean] = { getString(key) flatMap { s => @@ -141,11 +156,13 @@ class Configuration(val map: Map[String, Any]) { } } - def getBoolean(key: String, defaultValue: Boolean): Boolean = getBool(key).getOrElse(defaultValue) + def getBoolean(key: String, defaultValue: Boolean): Boolean = + getBool(key).getOrElse(outputIfDesiredAndReturnInput(key, defaultValue)) def getBool(key: String): Option[Boolean] = getBoolean(key) - def getBool(key: String, defaultValue: Boolean): Boolean = getBoolean(key, defaultValue) + def getBool(key: String, defaultValue: Boolean): Boolean = + getBoolean(key, defaultValue) def apply(key: String): String = getString(key) match { case None => throw new ConfigurationException("undefined config: " + key) diff --git a/akka-actor/src/main/scala/akka/config/SupervisionConfig.scala b/akka-actor/src/main/scala/akka/config/SupervisionConfig.scala index 8ab4b4656b..e695f0301e 100644 --- a/akka-actor/src/main/scala/akka/config/SupervisionConfig.scala +++ b/akka-actor/src/main/scala/akka/config/SupervisionConfig.scala @@ -46,6 +46,12 @@ object Supervision { if (maxNrOfRetries < 0) None else Some(maxNrOfRetries), if (withinTimeRange < 0) None else Some(withinTimeRange)) } + /** + * Restart all actors linked to the same supervisor when one fails, + * trapExit = which Throwables should be intercepted + * maxNrOfRetries = the number of times an actor is allowed to be restarted + * withinTimeRange = millisecond time window for maxNrOfRetries, negative means no window + */ case class AllForOneStrategy(override val trapExit: List[Class[_ <: Throwable]], maxNrOfRetries: Option[Int] = None, withinTimeRange: Option[Int] = None) extends FaultHandlingStrategy(trapExit) { @@ -68,6 +74,12 @@ object Supervision { if (maxNrOfRetries < 0) None else Some(maxNrOfRetries), if (withinTimeRange < 0) None else Some(withinTimeRange)) } + /** + * Restart an actor when it fails + * trapExit = which Throwables should be intercepted + * maxNrOfRetries = the number of times an actor is allowed to be restarted + * withinTimeRange = millisecond time window for maxNrOfRetries, negative means no window + */ case class OneForOneStrategy(override val trapExit: List[Class[_ <: Throwable]], maxNrOfRetries: Option[Int] = None, withinTimeRange: Option[Int] = None) extends FaultHandlingStrategy(trapExit) { diff --git a/akka-actor/src/main/scala/akka/dataflow/DataFlow.scala b/akka-actor/src/main/scala/akka/dataflow/DataFlow.scala new file mode 100644 index 0000000000..13438132e6 --- /dev/null +++ b/akka-actor/src/main/scala/akka/dataflow/DataFlow.scala @@ -0,0 +1,165 @@ +/** + * Copyright (C) 2009-2011 Scalable Solutions AB + */ + +package akka.dataflow + +import java.util.concurrent.atomic.AtomicReference +import java.util.concurrent.{ConcurrentLinkedQueue, LinkedBlockingQueue} + +import akka.event.EventHandler +import akka.actor.{Actor, ActorRef} +import akka.actor.Actor._ +import akka.dispatch.CompletableFuture +import akka.AkkaException +import akka.japi.{ Function, Effect } + +/** + * Implements Oz-style dataflow (single assignment) variables. + * + * @author Jonas Bonér + */ +object DataFlow { + object Start + object Exit + + class DataFlowVariableException(message: String, cause: Throwable = null) extends AkkaException(message, cause) + + /** + * Executes the supplied thunk in another thread. + */ + def thread(body: => Unit): Unit = spawn(body) + + /** + * JavaAPI. + * Executes the supplied Effect in another thread. + */ + def thread(body: Effect): Unit = spawn(body.apply) + + /** + * Executes the supplied function in another thread. + */ + def thread[A <: AnyRef, R <: AnyRef](body: A => R) = + actorOf(new ReactiveEventBasedThread(body)).start() + + /** + * JavaAPI. + * Executes the supplied Function in another thread. + */ + def thread[A <: AnyRef, R <: AnyRef](body: Function[A,R]) = + actorOf(new ReactiveEventBasedThread(body.apply)).start() + + private class ReactiveEventBasedThread[A <: AnyRef, T <: AnyRef](body: A => T) + extends Actor { + def receive = { + case Exit => self.stop() + case message => self.reply(body(message.asInstanceOf[A])) + } + } + + private object DataFlowVariable { + private sealed abstract class DataFlowVariableMessage + private case class Set[T <: Any](value: T) extends DataFlowVariableMessage + private object Get extends DataFlowVariableMessage + } + + /** + * @author Jonas Bonér + */ + @deprecated("Superceeded by Future and CompletableFuture as of 1.1", "1.1") + sealed class DataFlowVariable[T <: Any](timeoutMs: Long) { + import DataFlowVariable._ + + def this() = this(1000 * 60) + + private val value = new AtomicReference[Option[T]](None) + private val blockedReaders = new ConcurrentLinkedQueue[ActorRef] + + private class In[T <: Any](dataFlow: DataFlowVariable[T]) extends Actor { + self.timeout = timeoutMs + def receive = { + case s@Set(v) => + if (dataFlow.value.compareAndSet(None, Some(v.asInstanceOf[T]))) { + while(dataFlow.blockedReaders.peek ne null) + dataFlow.blockedReaders.poll ! s + } else throw new DataFlowVariableException( + "Attempt to change data flow variable (from [" + dataFlow.value.get + "] to [" + v + "])") + case Exit => self.stop() + } + } + + private class Out[T <: Any](dataFlow: DataFlowVariable[T]) extends Actor { + self.timeout = timeoutMs + private var readerFuture: Option[CompletableFuture[Any]] = None + def receive = { + case Get => dataFlow.value.get match { + case Some(value) => self reply value + case None => readerFuture = self.senderFuture + } + case Set(v:T) => readerFuture.map(_ completeWithResult v) + case Exit => self.stop() + } + } + + private[this] val in = actorOf(new In(this)).start() + + /** + * Sets the value of this variable (if unset) with the value of the supplied variable. + */ + def <<(ref: DataFlowVariable[T]) { + if (this.value.get.isEmpty) in ! Set(ref()) + else throw new DataFlowVariableException( + "Attempt to change data flow variable (from [" + this.value.get + "] to [" + ref() + "])") + } + + /** + * JavaAPI. + * Sets the value of this variable (if unset) with the value of the supplied variable. + */ + def set(ref: DataFlowVariable[T]) { this << ref } + + /** + * Sets the value of this variable (if unset). + */ + def <<(value: T) { + if (this.value.get.isEmpty) in ! Set(value) + else throw new DataFlowVariableException( + "Attempt to change data flow variable (from [" + this.value.get + "] to [" + value + "])") + } + + /** + * JavaAPI. + * Sets the value of this variable (if unset) with the value of the supplied variable. + */ + def set(value: T) { this << value } + + /** + * Retrieves the value of variable, throws a DataFlowVariableException if it times out. + */ + def get(): T = this() + + /** + * Retrieves the value of variable, throws a DataFlowVariableException if it times out. + */ + def apply(): T = { + value.get getOrElse { + val out = actorOf(new Out(this)).start() + + val result = try { + blockedReaders offer out + (out !! Get).as[T] + } catch { + case e: Exception => + EventHandler.error(e, this, e.getMessage) + out ! Exit + throw e + } + + result.getOrElse(throw new DataFlowVariableException( + "Timed out (after " + timeoutMs + " milliseconds) while waiting for result")) + } + } + + def shutdown() { in ! Exit } + } +} diff --git a/akka-actor/src/main/scala/akka/dispatch/Dispatchers.scala b/akka-actor/src/main/scala/akka/dispatch/Dispatchers.scala index 04ff6a9504..eee5d53c51 100644 --- a/akka-actor/src/main/scala/akka/dispatch/Dispatchers.scala +++ b/akka-actor/src/main/scala/akka/dispatch/Dispatchers.scala @@ -187,14 +187,14 @@ object Dispatchers { case "GlobalExecutorBasedEventDriven" => GlobalExecutorBasedEventDrivenDispatcherConfigurator case fqn => ReflectiveAccess.getClassFor[MessageDispatcherConfigurator](fqn) match { - case Some(clazz) => - val instance = ReflectiveAccess.createInstance[MessageDispatcherConfigurator](clazz, Array[Class[_]](), Array[AnyRef]()) - if (instance.isEmpty) - throw new IllegalArgumentException("Cannot instantiate MessageDispatcherConfigurator type [%s], make sure it has a default no-args constructor" format fqn) - else - instance.get - case None => - throw new IllegalArgumentException("Unknown MessageDispatcherConfigurator type [%s]" format fqn) + case r: Right[_, Class[MessageDispatcherConfigurator]] => + ReflectiveAccess.createInstance[MessageDispatcherConfigurator](r.b, Array[Class[_]](), Array[AnyRef]()) match { + case r: Right[Exception, MessageDispatcherConfigurator] => r.b + case l: Left[Exception, MessageDispatcherConfigurator] => + throw new IllegalArgumentException("Cannot instantiate MessageDispatcherConfigurator type [%s], make sure it has a default no-args constructor" format fqn, l.a) + } + case l: Left[Exception, _] => + throw new IllegalArgumentException("Unknown MessageDispatcherConfigurator type [%s]" format fqn, l.a) } } map { _ configure cfg diff --git a/akka-actor/src/main/scala/akka/dispatch/ExecutorBasedEventDrivenDispatcher.scala b/akka-actor/src/main/scala/akka/dispatch/ExecutorBasedEventDrivenDispatcher.scala index 105028f693..bc03607fdc 100644 --- a/akka-actor/src/main/scala/akka/dispatch/ExecutorBasedEventDrivenDispatcher.scala +++ b/akka-actor/src/main/scala/akka/dispatch/ExecutorBasedEventDrivenDispatcher.scala @@ -5,12 +5,10 @@ package akka.dispatch import akka.event.EventHandler -import akka.actor.{ActorRef, IllegalActorStateException} -import akka.util.{ReflectiveAccess, Switch} +import akka.actor.{ActorRef} -import java.util.Queue import java.util.concurrent.atomic.AtomicReference -import java.util.concurrent.{ TimeUnit, ExecutorService, RejectedExecutionException, ConcurrentLinkedQueue, LinkedBlockingQueue} +import java.util.concurrent.{ TimeUnit, ExecutorService, RejectedExecutionException, ConcurrentLinkedQueue} /** * Default settings are: @@ -99,7 +97,7 @@ class ExecutorBasedEventDrivenDispatcher( registerForExecution(mbox) } - private[akka] def executeFuture(invocation: FutureInvocation): Unit = if (active.isOn) { + private[akka] def executeFuture(invocation: FutureInvocation[_]): Unit = if (active.isOn) { try executorService.get() execute invocation catch { case e: RejectedExecutionException => @@ -117,20 +115,14 @@ class ExecutorBasedEventDrivenDispatcher( def createMailbox(actorRef: ActorRef): AnyRef = mailboxType match { case b: UnboundedMailbox => - if (b.blocking) { - new DefaultUnboundedMessageQueue(true) with ExecutableMailbox { - final def dispatcher = ExecutorBasedEventDrivenDispatcher.this - } - } else { //If we have an unbounded, non-blocking mailbox, we can go lockless - new ConcurrentLinkedQueue[MessageInvocation] with MessageQueue with ExecutableMailbox { - final def dispatcher = ExecutorBasedEventDrivenDispatcher.this - final def enqueue(m: MessageInvocation) = this.add(m) - final def dequeue(): MessageInvocation = this.poll() - } + new ConcurrentLinkedQueue[MessageInvocation] with MessageQueue with ExecutableMailbox { + @inline final def dispatcher = ExecutorBasedEventDrivenDispatcher.this + @inline final def enqueue(m: MessageInvocation) = this.add(m) + @inline final def dequeue(): MessageInvocation = this.poll() } case b: BoundedMailbox => - new DefaultBoundedMessageQueue(b.capacity, b.pushTimeOut, b.blocking) with ExecutableMailbox { - final def dispatcher = ExecutorBasedEventDrivenDispatcher.this + new DefaultBoundedMessageQueue(b.capacity, b.pushTimeOut) with ExecutableMailbox { + @inline final def dispatcher = ExecutorBasedEventDrivenDispatcher.this } } @@ -294,13 +286,13 @@ trait PriorityMailbox { self: ExecutorBasedEventDrivenDispatcher => override def createMailbox(actorRef: ActorRef): AnyRef = self.mailboxType match { case b: UnboundedMailbox => - new UnboundedPriorityMessageQueue(b.blocking, comparator) with ExecutableMailbox { - final def dispatcher = self + new UnboundedPriorityMessageQueue(comparator) with ExecutableMailbox { + @inline final def dispatcher = self } case b: BoundedMailbox => - new BoundedPriorityMessageQueue(b.capacity, b.pushTimeOut, b.blocking, comparator) with ExecutableMailbox { - final def dispatcher = self + new BoundedPriorityMessageQueue(b.capacity, b.pushTimeOut, comparator) with ExecutableMailbox { + @inline final def dispatcher = self } } } diff --git a/akka-actor/src/main/scala/akka/dispatch/ExecutorBasedEventDrivenWorkStealingDispatcher.scala b/akka-actor/src/main/scala/akka/dispatch/ExecutorBasedEventDrivenWorkStealingDispatcher.scala index f2f63a3ff4..637aea8e60 100644 --- a/akka-actor/src/main/scala/akka/dispatch/ExecutorBasedEventDrivenWorkStealingDispatcher.scala +++ b/akka-actor/src/main/scala/akka/dispatch/ExecutorBasedEventDrivenWorkStealingDispatcher.scala @@ -5,11 +5,8 @@ package akka.dispatch import akka.actor.{ActorRef, Actor, IllegalActorStateException} -import akka.util.{ReflectiveAccess, Switch} -import java.util.Queue -import java.util.concurrent.atomic.{AtomicReference, AtomicInteger} -import java.util.concurrent.{ TimeUnit, ExecutorService, RejectedExecutionException, ConcurrentLinkedQueue, LinkedBlockingQueue} +import util.DynamicVariable /** * An executor based event driven dispatcher which will try to redistribute work from busy actors to idle actors. It is assumed @@ -55,6 +52,7 @@ class ExecutorBasedEventDrivenWorkStealingDispatcher( @volatile private var actorType: Option[Class[_]] = None @volatile private var members = Vector[ActorRef]() + private val donationInProgress = new DynamicVariable(false) private[akka] override def register(actorRef: ActorRef) = { //Verify actor type conformity @@ -78,18 +76,22 @@ class ExecutorBasedEventDrivenWorkStealingDispatcher( override private[akka] def dispatch(invocation: MessageInvocation) = { val mbox = getMailbox(invocation.receiver) - /*if (!mbox.isEmpty && attemptDonationOf(invocation, mbox)) { + if (donationInProgress.value == false && (!mbox.isEmpty || mbox.dispatcherLock.locked) && attemptDonationOf(invocation, mbox)) { //We were busy and we got to donate the message to some other lucky guy, we're done here - } else {*/ + } else { mbox enqueue invocation registerForExecution(mbox) - //} + } } override private[akka] def reRegisterForExecution(mbox: MessageQueue with ExecutableMailbox): Unit = { - while(donateFrom(mbox)) {} //When we reregister, first donate messages to another actor + try { + donationInProgress.value = true + while(donateFrom(mbox)) {} //When we reregister, first donate messages to another actor + } finally { donationInProgress.value = false } + if (!mbox.isEmpty) //If we still have messages left to process, reschedule for execution - super.reRegisterForExecution(mbox) + super.reRegisterForExecution(mbox) } /** @@ -110,13 +112,14 @@ class ExecutorBasedEventDrivenWorkStealingDispatcher( /** * Returns true if the donation succeeded or false otherwise */ - /*protected def attemptDonationOf(message: MessageInvocation, donorMbox: MessageQueue with ExecutableMailbox): Boolean = { + protected def attemptDonationOf(message: MessageInvocation, donorMbox: MessageQueue with ExecutableMailbox): Boolean = try { + donationInProgress.value = true val actors = members // copy to prevent concurrent modifications having any impact doFindDonorRecipient(donorMbox, actors, System.identityHashCode(message) % actors.size) match { case null => false case recipient => donate(message, recipient) } - }*/ + } finally { donationInProgress.value = false } /** * Rewrites the message and adds that message to the recipients mailbox diff --git a/akka-actor/src/main/scala/akka/dispatch/Future.scala b/akka-actor/src/main/scala/akka/dispatch/Future.scala index ce375e7061..ba5233f48b 100644 --- a/akka-actor/src/main/scala/akka/dispatch/Future.scala +++ b/akka-actor/src/main/scala/akka/dispatch/Future.scala @@ -7,21 +7,24 @@ package akka.dispatch import akka.AkkaException import akka.event.EventHandler import akka.actor.{Actor, Channel} -import akka.routing.Dispatcher +import akka.util.Duration import akka.japi.{ Procedure, Function => JFunc } +import scala.util.continuations._ + import java.util.concurrent.locks.ReentrantLock import java.util.concurrent. {ConcurrentLinkedQueue, TimeUnit, Callable} import java.util.concurrent.TimeUnit.{NANOSECONDS => NANOS, MILLISECONDS => MILLIS} -import java.util.concurrent.atomic. {AtomicBoolean, AtomicInteger} +import java.util.concurrent.atomic. {AtomicBoolean} import java.lang.{Iterable => JIterable} import java.util.{LinkedList => JLinkedList} import scala.annotation.tailrec import scala.collection.generic.CanBuildFrom import scala.collection.mutable.Builder +import scala.collection.mutable.Stack -class FutureTimeoutException(message: String) extends AkkaException(message) +class FutureTimeoutException(message: String, cause: Throwable = null) extends AkkaException(message, cause) object Futures { @@ -159,10 +162,10 @@ object Futures { /** * Java API. - * Simple version of Futures.traverse. Transforms a java.lang.Iterable[Future[A]] into a Future[java.util.LinkedList[A]]. + * Simple version of Futures.traverse. Transforms a java.lang.Iterable[Future[A]] into a Future[java.lang.Iterable[A]]. * Useful for reducing many Futures into a single Future. */ - def sequence[A](in: JIterable[Future[A]], timeout: Long): Future[JLinkedList[A]] = + def sequence[A](in: JIterable[Future[A]], timeout: Long): Future[JIterable[A]] = scala.collection.JavaConversions.iterableAsScalaIterable(in).foldLeft(Future(new JLinkedList[A]()))((fr, fa) => for (r <- fr; a <- fa) yield { r add a @@ -171,18 +174,18 @@ object Futures { /** * Java API. - * Simple version of Futures.traverse. Transforms a java.lang.Iterable[Future[A]] into a Future[java.util.LinkedList[A]]. + * Simple version of Futures.traverse. Transforms a java.lang.Iterable[Future[A]] into a Future[java.lang.Iterable[A]]. * Useful for reducing many Futures into a single Future. */ - def sequence[A](in: JIterable[Future[A]]): Future[JLinkedList[A]] = sequence(in, Actor.TIMEOUT) + def sequence[A](in: JIterable[Future[A]]): Future[JIterable[A]] = sequence(in, Actor.TIMEOUT) /** * Java API. - * Transforms a java.lang.Iterable[A] into a Future[java.util.LinkedList[B]] using the provided Function A => Future[B]. + * Transforms a java.lang.Iterable[A] into a Future[java.lang.Iterable[B]] using the provided Function A => Future[B]. * This is useful for performing a parallel map. For example, to apply a function to all items of a list * in parallel. */ - def traverse[A, B](in: JIterable[A], timeout: Long, fn: JFunc[A,Future[B]]): Future[JLinkedList[B]] = + def traverse[A, B](in: JIterable[A], timeout: Long, fn: JFunc[A,Future[B]]): Future[JIterable[B]] = scala.collection.JavaConversions.iterableAsScalaIterable(in).foldLeft(Future(new JLinkedList[B]())){(fr, a) => val fb = fn(a) for (r <- fr; b <- fb) yield { @@ -193,15 +196,17 @@ object Futures { /** * Java API. - * Transforms a java.lang.Iterable[A] into a Future[java.util.LinkedList[B]] using the provided Function A => Future[B]. + * Transforms a java.lang.Iterable[A] into a Future[java.lang.Iterable[B]] using the provided Function A => Future[B]. * This is useful for performing a parallel map. For example, to apply a function to all items of a list * in parallel. - */ + * def traverse[A, B, M[_] <: Traversable[_]](in: M[A], timeout: Long = Actor.TIMEOUT)(fn: A => Future[B])(implicit cbf: CanBuildFrom[M[A], B, M[B]]): Future[M[B]] = in.foldLeft(new DefaultCompletableFuture[Builder[B, M[B]]](timeout).completeWithResult(cbf(in)): Future[Builder[B, M[B]]]) { (fr, a) => val fb = fn(a.asInstanceOf[A]) for (r <- fr; b <-fb) yield (r += b) }.map(_.result) + */ + def traverse[A, B](in: JIterable[A], fn: JFunc[A,Future[B]]): Future[JIterable[B]] = traverse(in, Actor.TIMEOUT, fn) } object Future { @@ -210,18 +215,15 @@ object Future { * This method constructs and returns a Future that will eventually hold the result of the execution of the supplied body * The execution is performed by the specified Dispatcher. */ - def apply[T](body: => T, timeout: Long = Actor.TIMEOUT)(implicit dispatcher: MessageDispatcher): Future[T] = { - val f = new DefaultCompletableFuture[T](timeout) - dispatcher.dispatchFuture(FutureInvocation(f.asInstanceOf[CompletableFuture[Any]], () => body)) - f - } + def apply[T](body: => T, timeout: Long = Actor.TIMEOUT)(implicit dispatcher: MessageDispatcher): Future[T] = + dispatcher.dispatchFuture(() => body, timeout) /** * Construct a completable channel */ def channel(timeout: Long = Actor.TIMEOUT) = new Channel[Any] { val future = empty[Any](timeout) - def !(msg: Any) = future << msg + def !(msg: Any) = future completeWithResult msg } /** @@ -252,22 +254,60 @@ object Future { val fb = fn(a.asInstanceOf[A]) for (r <- fr; b <-fb) yield (r += b) }.map(_.result) + + /** + * Captures a block that will be transformed into 'Continuation Passing Style' using Scala's Delimited + * Continuations plugin. + * + * Within the block, the result of a Future may be accessed by calling Future.apply. At that point + * execution is suspended with the rest of the block being stored in a continuation until the result + * of the Future is available. If an Exception is thrown while processing, it will be contained + * within the resulting Future. + * + * This allows working with Futures in an imperative style without blocking for each result. + * + * Completing a Future using 'CompletableFuture << Future' will also suspend execution until the + * value of the other Future is available. + * + * The Delimited Continuations compiler plugin must be enabled in order to use this method. + */ + def flow[A](body: => A @cps[Future[Any]], timeout: Long = Actor.TIMEOUT): Future[A] = { + val future = Promise[A](timeout) + (reset(future.asInstanceOf[CompletableFuture[Any]].completeWithResult(body)): Future[Any]) onComplete { f => + val opte = f.exception + if (opte.isDefined) future completeWithException (opte.get) + } + future + } + + private[akka] val callbacksPendingExecution = new ThreadLocal[Option[Stack[() => Unit]]]() { + override def initialValue = None + } } sealed trait Future[+T] { /** - * Returns the result of this future after waiting for it to complete, - * this method will throw any throwable that this Future was completed with - * and will throw a java.util.concurrent.TimeoutException if there is no result - * within the Futures timeout + * For use only within a Future.flow block or another compatible Delimited Continuations reset block. + * + * Returns the result of this Future without blocking, by suspending execution and storing it as a + * continuation until the result is available. + * + * If this Future is untyped (a Future[Nothing]), a type parameter must be explicitly provided or + * execution will fail. The normal result of getting a Future from an ActorRef using !!! will return + * an untyped Future. */ - def apply(): T = this.await.resultOrException.get + def apply[A >: T](): A @cps[Future[Any]] = shift(this flatMap (_: A => Future[Any])) /** - * Java API for apply() + * Blocks awaiting completion of this Future, then returns the resulting value, + * or throws the completed exception + * + * Scala & Java API + * + * throws FutureTimeoutException if this Future times out when waiting for completion */ - def get: T = apply() + def get: T = this.await.resultOrException.get /** * Blocks the current thread until the Future has been completed or the @@ -276,11 +316,20 @@ sealed trait Future[+T] { */ def await : Future[T] + /** + * Blocks the current thread until the Future has been completed or the + * timeout has expired. The timeout will be the least value of 'atMost' and the timeout + * supplied at the constructuion of this Future. + * In the case of the timeout expiring a FutureTimeoutException will be thrown. + */ + def await(atMost: Duration) : Future[T] + /** * Blocks the current thread until the Future has been completed. Use * caution with this method as it ignores the timeout and will block * indefinitely if the Future is never completed. */ + @deprecated("Will be removed after 1.1, it's dangerous and can cause deadlocks, agony and insanity.", "1.1") def awaitBlocking : Future[T] /** @@ -318,24 +367,6 @@ sealed trait Future[+T] { else None } - /** - * Waits for the completion of this Future, then returns the completed value. - * If the Future's timeout expires while waiting a FutureTimeoutException - * will be thrown. - * - * Equivalent to calling future.await.value. - */ - def awaitValue: Option[Either[Throwable, T]] - - /** - * Returns the result of the Future if one is available within the specified - * time, if the time left on the future is less than the specified time, the - * time left on the future will be used instead of the specified time. - * returns None if no result, Some(Right(t)) if a result, or - * Some(Left(error)) if there was an exception - */ - def valueWithin(time: Long, unit: TimeUnit): Option[Either[Throwable, T]] - /** * Returns the contained exception of this Future if it exists. */ @@ -387,21 +418,18 @@ sealed trait Future[+T] { final def collect[A](pf: PartialFunction[Any, A]): Future[A] = { val fa = new DefaultCompletableFuture[A](timeoutInNanos, NANOS) onComplete { ft => - val optv = ft.value - if (optv.isDefined) { - val v = optv.get - fa complete { - if (v.isLeft) v.asInstanceOf[Either[Throwable, A]] - else { - try { - val r = v.right.get - if (pf isDefinedAt r) Right(pf(r)) - else Left(new MatchError(r)) - } catch { - case e: Exception => - EventHandler.error(e, this, e.getMessage) - Left(e) - } + val v = ft.value.get + fa complete { + if (v.isLeft) v.asInstanceOf[Either[Throwable, A]] + else { + try { + val r = v.right.get + if (pf isDefinedAt r) Right(pf(r)) + else Left(new MatchError(r)) + } catch { + case e: Exception => + EventHandler.error(e, this, e.getMessage) + Left(e) } } } @@ -409,6 +437,36 @@ sealed trait Future[+T] { fa } + /** + * Creates a new Future that will handle any matching Throwable that this + * Future might contain. If there is no match, or if this Future contains + * a valid result then the new Future will contain the same. + * Example: + *

+   * Future(6 / 0) failure { case e: ArithmeticException => 0 } // result: 0
+   * Future(6 / 0) failure { case e: NotFoundException   => 0 } // result: exception
+   * Future(6 / 2) failure { case e: ArithmeticException => 0 } // result: 3
+   * 
+ */ + final def failure[A >: T](pf: PartialFunction[Throwable, A]): Future[A] = { + val fa = new DefaultCompletableFuture[A](timeoutInNanos, NANOS) + onComplete { ft => + val opte = ft.exception + fa complete { + if (opte.isDefined) { + val e = opte.get + try { + if (pf isDefinedAt e) Right(pf(e)) + else Left(e) + } catch { + case x: Exception => Left(x) + } + } else ft.value.get + } + } + fa + } + /** * Creates a new Future by applying a function to the successful result of * this Future. If this Future is completed with an exception then the new @@ -486,7 +544,7 @@ sealed trait Future[+T] { f(optr.get) } - final def filter(p: T => Boolean): Future[T] = { + final def filter(p: Any => Boolean): Future[Any] = { val f = new DefaultCompletableFuture[T](timeoutInNanos, NANOS) onComplete { ft => val optv = ft.value @@ -531,7 +589,15 @@ sealed trait Future[+T] { final def foreach[A >: T](proc: Procedure[A]): Unit = foreach(proc(_)) - final def filter[A >: T](p: JFunc[A,Boolean]): Future[T] = filter(p(_)) + final def filter(p: JFunc[Any,Boolean]): Future[Any] = filter(p(_)) + +} + +object Promise { + + def apply[A](timeout: Long): CompletableFuture[A] = new DefaultCompletableFuture[A](timeout) + + def apply[A](): CompletableFuture[A] = apply(Actor.TIMEOUT) } @@ -567,15 +633,22 @@ trait CompletableFuture[T] extends Future[T] { this } - /** - * Alias for complete(Right(value)). - */ - final def << (value: T): Future[T] = complete(Right(value)) + final def << (value: T): Future[T] @cps[Future[Any]] = shift { cont: (Future[T] => Future[Any]) => cont(complete(Right(value))) } + + final def << (other: Future[T]): Future[T] @cps[Future[Any]] = shift { cont: (Future[T] => Future[Any]) => + val fr = new DefaultCompletableFuture[Any](Actor.TIMEOUT) + this completeWith other onComplete { f => + try { + fr completeWith cont(f) + } catch { + case e: Exception => + EventHandler.error(e, this, e.getMessage) + fr completeWithException e + } + } + fr + } - /** - * Alias for completeWith(other). - */ - final def << (other : Future[T]): Future[T] = completeWith(other) } /** @@ -594,45 +667,34 @@ class DefaultCompletableFuture[T](timeout: Long, timeunit: TimeUnit) extends Com private var _value: Option[Either[Throwable, T]] = None private var _listeners: List[Future[T] => Unit] = Nil + /** + * Must be called inside _lock.lock<->_lock.unlock + */ @tailrec - private def awaitUnsafe(wait: Long): Boolean = { - if (_value.isEmpty && wait > 0) { + private def awaitUnsafe(waitTimeNanos: Long): Boolean = { + if (_value.isEmpty && waitTimeNanos > 0) { val start = currentTimeInNanos - val remaining = try { - _signal.awaitNanos(wait) + val remainingNanos = try { + _signal.awaitNanos(waitTimeNanos) } catch { case e: InterruptedException => - wait - (currentTimeInNanos - start) + waitTimeNanos - (currentTimeInNanos - start) } - awaitUnsafe(remaining) + awaitUnsafe(remainingNanos) } else { _value.isDefined } } - def awaitValue: Option[Either[Throwable, T]] = { + def await(atMost: Duration) = { _lock.lock - try { - awaitUnsafe(timeoutInNanos - (currentTimeInNanos - _startTimeInNanos)) - _value - } finally { - _lock.unlock - } - } - - def valueWithin(time: Long, unit: TimeUnit): Option[Either[Throwable, T]] = { - _lock.lock - try { - awaitUnsafe(unit.toNanos(time).min(timeoutInNanos - (currentTimeInNanos - _startTimeInNanos))) - _value - } finally { - _lock.unlock - } + if (try { awaitUnsafe(atMost.toNanos min timeLeft()) } finally { _lock.unlock }) this + else throw new FutureTimeoutException("Futures timed out after [" + NANOS.toMillis(timeoutInNanos) + "] milliseconds") } def await = { _lock.lock - if (try { awaitUnsafe(timeoutInNanos - (currentTimeInNanos - _startTimeInNanos)) } finally { _lock.unlock }) this + if (try { awaitUnsafe(timeLeft()) } finally { _lock.unlock }) this else throw new FutureTimeoutException("Futures timed out after [" + NANOS.toMillis(timeoutInNanos) + "] milliseconds") } @@ -648,7 +710,7 @@ class DefaultCompletableFuture[T](timeout: Long, timeunit: TimeUnit) extends Com } } - def isExpired: Boolean = timeoutInNanos - (currentTimeInNanos - _startTimeInNanos) <= 0 + def isExpired: Boolean = timeLeft() <= 0 def value: Option[Either[Throwable, T]] = { _lock.lock @@ -662,7 +724,7 @@ class DefaultCompletableFuture[T](timeout: Long, timeunit: TimeUnit) extends Com def complete(value: Either[Throwable, T]): DefaultCompletableFuture[T] = { _lock.lock val notifyTheseListeners = try { - if (_value.isEmpty) { + if (_value.isEmpty && !isExpired) { //Only complete if we aren't expired _value = Some(value) val existingListeners = _listeners _listeners = Nil @@ -673,8 +735,29 @@ class DefaultCompletableFuture[T](timeout: Long, timeunit: TimeUnit) extends Com _lock.unlock } - if (notifyTheseListeners.nonEmpty) - notifyTheseListeners.reverse foreach notify + if (notifyTheseListeners.nonEmpty) { // Steps to ensure we don't run into a stack-overflow situation + @tailrec def runCallbacks(rest: List[Future[T] => Unit], callbacks: Stack[() => Unit]) { + if (rest.nonEmpty) { + notifyCompleted(rest.head) + while (callbacks.nonEmpty) { callbacks.pop().apply() } + runCallbacks(rest.tail, callbacks) + } + } + + val pending = Future.callbacksPendingExecution.get + if (pending.isDefined) { //Instead of nesting the calls to the callbacks (leading to stack overflow) + pending.get.push(() => { // Linearize/aggregate callbacks at top level and then execute + val doNotify = notifyCompleted _ //Hoist closure to avoid garbage + notifyTheseListeners foreach doNotify + }) + } else { + try { + val callbacks = Stack[() => Unit]() // Allocate new aggregator for pending callbacks + Future.callbacksPendingExecution.set(Some(callbacks)) // Specify the callback aggregator + runCallbacks(notifyTheseListeners, callbacks) // Execute callbacks, if they trigger new callbacks, they are aggregated + } finally { Future.callbacksPendingExecution.set(None) } // Ensure cleanup + } + } this } @@ -683,19 +766,21 @@ class DefaultCompletableFuture[T](timeout: Long, timeunit: TimeUnit) extends Com _lock.lock val notifyNow = try { if (_value.isEmpty) { - _listeners ::= func - false + if(!isExpired) { //Only add the listener if the future isn't expired + _listeners ::= func + false + } else false //Will never run the callback since the future is expired } else true } finally { _lock.unlock } - if (notifyNow) notify(func) + if (notifyNow) notifyCompleted(func) this } - private def notify(func: Future[T] => Unit) { + private def notifyCompleted(func: Future[T] => Unit) { try { func(this) } catch { @@ -703,7 +788,8 @@ class DefaultCompletableFuture[T](timeout: Long, timeunit: TimeUnit) extends Com } } - private def currentTimeInNanos: Long = MILLIS.toNanos(System.currentTimeMillis) + @inline private def currentTimeInNanos: Long = MILLIS.toNanos(System.currentTimeMillis) + @inline private def timeLeft(): Long = timeoutInNanos - (currentTimeInNanos - _startTimeInNanos) } /** @@ -715,8 +801,7 @@ sealed class AlreadyCompletedFuture[T](suppliedValue: Either[Throwable, T]) exte def complete(value: Either[Throwable, T]): CompletableFuture[T] = this def onComplete(func: Future[T] => Unit): Future[T] = { func(this); this } - def awaitValue: Option[Either[Throwable, T]] = value - def valueWithin(time: Long, unit: TimeUnit): Option[Either[Throwable, T]] = value + def await(atMost: Duration): Future[T] = this def await : Future[T] = this def awaitBlocking : Future[T] = this def isExpired: Boolean = true diff --git a/akka-actor/src/main/scala/akka/dispatch/MailboxHandling.scala b/akka-actor/src/main/scala/akka/dispatch/MailboxHandling.scala index e0586a40a7..3b3032ad90 100644 --- a/akka-actor/src/main/scala/akka/dispatch/MailboxHandling.scala +++ b/akka-actor/src/main/scala/akka/dispatch/MailboxHandling.scala @@ -4,14 +4,13 @@ package akka.dispatch -import akka.actor.{Actor, ActorType, ActorRef, ActorInitializationException} import akka.AkkaException -import java.util.{Queue, List, Comparator, PriorityQueue} +import java.util.{Comparator, PriorityQueue} import java.util.concurrent._ import akka.util._ -class MessageQueueAppendFailedException(message: String) extends AkkaException(message) +class MessageQueueAppendFailedException(message: String, cause: Throwable = null) extends AkkaException(message, cause) /** * @author Jonas Bonér @@ -30,9 +29,8 @@ trait MessageQueue { */ sealed trait MailboxType -case class UnboundedMailbox(val blocking: Boolean = false) extends MailboxType +case class UnboundedMailbox() extends MailboxType case class BoundedMailbox( - val blocking: Boolean = false, val capacity: Int = { if (Dispatchers.MAILBOX_CAPACITY < 0) Int.MaxValue else Dispatchers.MAILBOX_CAPACITY }, val pushTimeOut: Duration = Dispatchers.MAILBOX_PUSH_TIME_OUT) extends MailboxType { if (capacity < 0) throw new IllegalArgumentException("The capacity for BoundedMailbox can not be negative") @@ -40,46 +38,35 @@ case class BoundedMailbox( } trait UnboundedMessageQueueSemantics extends MessageQueue { self: BlockingQueue[MessageInvocation] => - def blockDequeue: Boolean - - final def enqueue(handle: MessageInvocation) { - this add handle - } - - final def dequeue(): MessageInvocation = { - if (blockDequeue) this.take() - else this.poll() - } + @inline final def enqueue(handle: MessageInvocation): Unit = this add handle + @inline final def dequeue(): MessageInvocation = this.poll() } trait BoundedMessageQueueSemantics extends MessageQueue { self: BlockingQueue[MessageInvocation] => - def blockDequeue: Boolean def pushTimeOut: Duration final def enqueue(handle: MessageInvocation) { - if (pushTimeOut.length > 0 && pushTimeOut.toMillis > 0) { - if (!this.offer(handle, pushTimeOut.length, pushTimeOut.unit)) - throw new MessageQueueAppendFailedException("Couldn't enqueue message " + handle + " to " + toString) + if (pushTimeOut.length > 0) { + this.offer(handle, pushTimeOut.length, pushTimeOut.unit) || { + throw new MessageQueueAppendFailedException("Couldn't enqueue message " + handle + " to " + toString) } } else this put handle } - final def dequeue(): MessageInvocation = - if (blockDequeue) this.take() - else this.poll() + @inline final def dequeue(): MessageInvocation = this.poll() } -class DefaultUnboundedMessageQueue(val blockDequeue: Boolean) extends +class DefaultUnboundedMessageQueue extends LinkedBlockingQueue[MessageInvocation] with UnboundedMessageQueueSemantics -class DefaultBoundedMessageQueue(capacity: Int, val pushTimeOut: Duration, val blockDequeue: Boolean) extends +class DefaultBoundedMessageQueue(capacity: Int, val pushTimeOut: Duration) extends LinkedBlockingQueue[MessageInvocation](capacity) with BoundedMessageQueueSemantics -class UnboundedPriorityMessageQueue(val blockDequeue: Boolean, cmp: Comparator[MessageInvocation]) extends +class UnboundedPriorityMessageQueue(cmp: Comparator[MessageInvocation]) extends PriorityBlockingQueue[MessageInvocation](11, cmp) with UnboundedMessageQueueSemantics -class BoundedPriorityMessageQueue(capacity: Int, val pushTimeOut: Duration, val blockDequeue: Boolean, cmp: Comparator[MessageInvocation]) extends - BoundedBlockingQueue[MessageInvocation](capacity, new PriorityQueue[MessageInvocation](11, cmp)) with - BoundedMessageQueueSemantics +class BoundedPriorityMessageQueue(capacity: Int, val pushTimeOut: Duration, cmp: Comparator[MessageInvocation]) extends + BoundedBlockingQueue[MessageInvocation](capacity, new PriorityQueue[MessageInvocation](11, cmp)) with + BoundedMessageQueueSemantics diff --git a/akka-actor/src/main/scala/akka/dispatch/MessageHandling.scala b/akka-actor/src/main/scala/akka/dispatch/MessageHandling.scala index 341987cb83..85d1bda374 100644 --- a/akka-actor/src/main/scala/akka/dispatch/MessageHandling.scala +++ b/akka-actor/src/main/scala/akka/dispatch/MessageHandling.scala @@ -5,11 +5,11 @@ package akka.dispatch import java.util.concurrent._ -import atomic. {AtomicInteger, AtomicBoolean, AtomicReference, AtomicLong} +import java.util.concurrent.atomic.AtomicLong import akka.event.EventHandler import akka.config.Configuration import akka.config.Config.TIME_UNIT -import akka.util.{Duration, Switch, ReentrantGuard, HashCode, ReflectiveAccess} +import akka.util.{Duration, Switch, ReentrantGuard} import java.util.concurrent.ThreadPoolExecutor.{AbortPolicy, CallerRunsPolicy, DiscardOldestPolicy, DiscardPolicy} import akka.actor._ @@ -30,16 +30,18 @@ final case class MessageInvocation(val receiver: ActorRef, } } -final case class FutureInvocation(future: CompletableFuture[Any], function: () => Any) extends Runnable { - val uuid = akka.actor.newUuid - - def run = future complete (try { - Right(function.apply) - } catch { - case e => - EventHandler.error(e, this, e.getMessage) - Left(e) - }) +final case class FutureInvocation[T](future: CompletableFuture[T], function: () => T, cleanup: () => Unit) extends Runnable { + def run = { + future complete (try { + Right(function()) + } catch { + case e => + EventHandler.error(e, this, e.getMessage) + Left(e) + } finally { + cleanup() + }) + } } object MessageDispatcher { @@ -57,7 +59,7 @@ trait MessageDispatcher { import MessageDispatcher._ protected val uuids = new ConcurrentSkipListSet[Uuid] - protected val futures = new ConcurrentSkipListSet[Uuid] + protected val futures = new AtomicLong(0L) protected val guard = new ReentrantGuard protected val active = new Switch(false) @@ -84,15 +86,27 @@ trait MessageDispatcher { private[akka] final def dispatchMessage(invocation: MessageInvocation): Unit = dispatch(invocation) - private[akka] final def dispatchFuture(invocation: FutureInvocation): Unit = { - guard withGuard { - futures add invocation.uuid - if (active.isOff) { active.switchOn { start } } + private[akka] final def dispatchFuture[T](block: () => T, timeout: Long): Future[T] = { + futures.getAndIncrement() + try { + val future = new DefaultCompletableFuture[T](timeout) + + if (active.isOff) + guard withGuard { active.switchOn { start } } + + executeFuture(FutureInvocation[T](future, block, futureCleanup)) + future + } catch { + case e => + futures.decrementAndGet + throw e } - invocation.future.onComplete { f => + } + + private val futureCleanup: () => Unit = + () => if (futures.decrementAndGet() == 0) { guard withGuard { - futures remove invocation.uuid - if (futures.isEmpty && uuids.isEmpty) { + if (futures.get == 0 && uuids.isEmpty) { shutdownSchedule match { case UNSCHEDULED => shutdownSchedule = SCHEDULED @@ -104,8 +118,6 @@ trait MessageDispatcher { } } } - executeFuture(invocation) - } private[akka] def register(actorRef: ActorRef) { if (actorRef.mailbox eq null) @@ -122,7 +134,7 @@ trait MessageDispatcher { private[akka] def unregister(actorRef: ActorRef) = { if (uuids remove actorRef.uuid) { actorRef.mailbox = null - if (uuids.isEmpty && futures.isEmpty){ + if (uuids.isEmpty && futures.get == 0){ shutdownSchedule match { case UNSCHEDULED => shutdownSchedule = SCHEDULED @@ -156,7 +168,7 @@ trait MessageDispatcher { shutdownSchedule = SCHEDULED Scheduler.scheduleOnce(this, timeoutMs, TimeUnit.MILLISECONDS) case SCHEDULED => - if (uuids.isEmpty() && futures.isEmpty) { + if (uuids.isEmpty && futures.get == 0) { active switchOff { shutdown // shut down in the dispatcher's references is zero } @@ -188,17 +200,17 @@ trait MessageDispatcher { */ private[akka] def dispatch(invocation: MessageInvocation): Unit - private[akka] def executeFuture(invocation: FutureInvocation): Unit + private[akka] def executeFuture(invocation: FutureInvocation[_]): Unit /** * Called one time every time an actor is attached to this dispatcher and this dispatcher was previously shutdown */ - private[akka] def start: Unit + private[akka] def start(): Unit /** * Called one time every time an actor is detached from this dispatcher and this dispatcher has no actors left attached */ - private[akka] def shutdown: Unit + private[akka] def shutdown(): Unit /** * Returns the size of the mailbox for the specified actor @@ -206,9 +218,9 @@ trait MessageDispatcher { def mailboxSize(actorRef: ActorRef): Int /** - * Returns the size of the Future queue + * Returns the amount of futures queued for execution */ - def futureQueueSize: Int = futures.size + def pendingFutures: Long = futures.get } /** @@ -222,9 +234,8 @@ abstract class MessageDispatcherConfigurator { def mailboxType(config: Configuration): MailboxType = { val capacity = config.getInt("mailbox-capacity", Dispatchers.MAILBOX_CAPACITY) - // FIXME how do we read in isBlocking for mailbox? Now set to 'false'. if (capacity < 1) UnboundedMailbox() - else BoundedMailbox(false, capacity, Duration(config.getInt("mailbox-push-timeout-time", Dispatchers.MAILBOX_PUSH_TIME_OUT.toMillis.toInt), TIME_UNIT)) + else BoundedMailbox(capacity, Duration(config.getInt("mailbox-push-timeout-time", Dispatchers.MAILBOX_PUSH_TIME_OUT.toMillis.toInt), TIME_UNIT)) } def configureThreadPool(config: Configuration, createDispatcher: => (ThreadPoolConfig) => MessageDispatcher): ThreadPoolConfigDispatcherBuilder = { diff --git a/akka-actor/src/main/scala/akka/dispatch/ThreadBasedDispatcher.scala b/akka-actor/src/main/scala/akka/dispatch/ThreadBasedDispatcher.scala index a8dfcf5860..ae2c2ecfc3 100644 --- a/akka-actor/src/main/scala/akka/dispatch/ThreadBasedDispatcher.scala +++ b/akka-actor/src/main/scala/akka/dispatch/ThreadBasedDispatcher.scala @@ -4,13 +4,9 @@ package akka.dispatch -import akka.actor.{Actor, ActorRef} -import akka.config.Config.config +import akka.actor.{ActorRef} import akka.util.Duration -import java.util.Queue -import java.util.concurrent.{ConcurrentLinkedQueue, BlockingQueue, TimeUnit, LinkedBlockingQueue} -import akka.actor import java.util.concurrent.atomic.AtomicReference /** @@ -25,13 +21,13 @@ class ThreadBasedDispatcher(_actor: ActorRef, _mailboxType: MailboxType) private[akka] val owner = new AtomicReference[ActorRef](_actor) def this(actor: ActorRef) = - this(actor, UnboundedMailbox(true)) // For Java API + this(actor, UnboundedMailbox()) // For Java API def this(actor: ActorRef, capacity: Int) = - this(actor, BoundedMailbox(true, capacity)) //For Java API + this(actor, BoundedMailbox(capacity)) //For Java API def this(actor: ActorRef, capacity: Int, pushTimeOut: Duration) = //For Java API - this(actor, BoundedMailbox(true, capacity, pushTimeOut)) + this(actor, BoundedMailbox(capacity, pushTimeOut)) override def register(actorRef: ActorRef) = { val actor = owner.get() diff --git a/akka-actor/src/main/scala/akka/dispatch/ThreadPoolBuilder.scala b/akka-actor/src/main/scala/akka/dispatch/ThreadPoolBuilder.scala index 130eeb9163..a1ae48a6ae 100644 --- a/akka-actor/src/main/scala/akka/dispatch/ThreadPoolBuilder.scala +++ b/akka-actor/src/main/scala/akka/dispatch/ThreadPoolBuilder.scala @@ -218,9 +218,9 @@ trait ExecutorServiceDelegate extends ExecutorService { def execute(command: Runnable) = executor.execute(command) - def shutdown = executor.shutdown + def shutdown() { executor.shutdown() } - def shutdownNow = executor.shutdownNow + def shutdownNow() = executor.shutdownNow() def isShutdown = executor.isShutdown diff --git a/akka-actor/src/main/scala/akka/event/EventHandler.scala b/akka-actor/src/main/scala/akka/event/EventHandler.scala index f7b4598dce..8f8c59a497 100644 --- a/akka-actor/src/main/scala/akka/event/EventHandler.scala +++ b/akka-actor/src/main/scala/akka/event/EventHandler.scala @@ -109,15 +109,15 @@ object EventHandler extends ListenerManagement { } defaultListeners foreach { listenerName => try { - ReflectiveAccess.getClassFor[Actor](listenerName) map { clazz => - addListener(Actor.actorOf(clazz, listenerName).start()) + ReflectiveAccess.getClassFor[Actor](listenerName) match { + case r: Right[_, Class[Actor]] => addListener(Actor.actorOf(r.b, listenerName).start()) + case l: Left[Exception,_] => throw l.a } } catch { - case e: akka.actor.DeploymentAlreadyBoundException => // do nothing case e: Exception => throw new ConfigurationException( "Event Handler specified in config can't be loaded [" + listenerName + - "] due to [" + e.toString + "]") + "] due to [" + e.toString + "]", e) } } info(this, "Starting up EventHandler") @@ -132,8 +132,8 @@ object EventHandler extends ListenerManagement { * Shuts down all event handler listeners including the event handle dispatcher. */ def shutdown() { - foreachListener(_.stop) - EventHandlerDispatcher.shutdown + foreachListener(_.stop()) + EventHandlerDispatcher.shutdown() } def notify(event: Any) { diff --git a/akka-actor/src/main/scala/akka/remoteinterface/RemoteInterface.scala b/akka-actor/src/main/scala/akka/remoteinterface/RemoteInterface.scala index a013207a8f..a1909ddd82 100644 --- a/akka-actor/src/main/scala/akka/remoteinterface/RemoteInterface.scala +++ b/akka-actor/src/main/scala/akka/remoteinterface/RemoteInterface.scala @@ -16,6 +16,7 @@ import scala.reflect.BeanProperty import java.net.InetSocketAddress import java.util.concurrent.ConcurrentHashMap import java.io.{PrintWriter, PrintStream} +import java.lang.reflect.InvocationTargetException trait RemoteModule { val UUID_PREFIX = "uuid:".intern @@ -117,7 +118,7 @@ case class RemoteServerWriteFailed( class RemoteClientException private[akka] ( message: String, @BeanProperty val client: RemoteClientModule, - val remoteAddress: InetSocketAddress) extends AkkaException(message) + val remoteAddress: InetSocketAddress, cause: Throwable = null) extends AkkaException(message, cause) /** * Thrown when the remote server actor dispatching fails for some reason. @@ -146,7 +147,7 @@ abstract class RemoteSupport extends ListenerManagement with RemoteServerModule handler } - def shutdown { + def shutdown() { eventHandler.stop() removeListener(eventHandler) this.shutdownClientModule() @@ -282,7 +283,8 @@ trait RemoteServerModule extends RemoteModule { def registerByUuid(actorRef: ActorRef): Unit /** - * Register Remote Actor by a specific 'id' passed as argument. + * Register Remote Actor by a specific 'id' passed as argument. The actor is registered by UUID rather than ID + * when prefixing the handle with the “uuid:” protocol. *

* NOTE: If you use this method to register your remote actor then you must unregister the actor by this ID yourself. */ diff --git a/akka-actor/src/main/scala/akka/routing/Pool.scala b/akka-actor/src/main/scala/akka/routing/Pool.scala index 6ab6aa0c4d..c95a80b0fc 100644 --- a/akka-actor/src/main/scala/akka/routing/Pool.scala +++ b/akka-actor/src/main/scala/akka/routing/Pool.scala @@ -5,7 +5,6 @@ package akka.routing import akka.actor.{Actor, ActorRef, PoisonPill} -import java.util.concurrent.TimeUnit /** * Actor pooling @@ -47,14 +46,13 @@ trait ActorPool { */ trait DefaultActorPool extends ActorPool { this: Actor => import ActorPool._ - import collection.mutable.LinkedList import akka.actor.MaximumNumberOfRestartsWithinTimeRangeReached protected var _delegates = Vector[ActorRef]() private var _lastCapacityChange = 0 private var _lastSelectorCount = 0 - override def postStop = _delegates foreach { + override def postStop() = _delegates foreach { delegate => try { delegate ! PoisonPill } catch { case e: Exception => } //Ignore any exceptions here diff --git a/akka-actor/src/main/scala/akka/util/AkkaLoader.scala b/akka-actor/src/main/scala/akka/util/AkkaLoader.scala index b7f113313d..c780cb48f3 100644 --- a/akka-actor/src/main/scala/akka/util/AkkaLoader.scala +++ b/akka-actor/src/main/scala/akka/util/AkkaLoader.scala @@ -21,7 +21,7 @@ class AkkaLoader { * Boot initializes the specified bundles */ def boot(withBanner: Boolean, b : Bootable): Unit = hasBooted switchOn { - if (withBanner) printBanner + if (withBanner) printBanner() println("Starting Akka...") b.onLoad Thread.currentThread.setContextClassLoader(getClass.getClassLoader) @@ -32,40 +32,62 @@ class AkkaLoader { /* * Shutdown, well, shuts down the bundles used in boot */ - def shutdown: Unit = hasBooted switchOff { - println("Shutting down Akka...") - _bundles.foreach(_.onUnload) - _bundles = None - Actor.shutdownHook.run - println("Akka succesfully shut down") + def shutdown() { + hasBooted switchOff { + println("Shutting down Akka...") + _bundles.foreach(_.onUnload) + _bundles = None + Actor.shutdownHook.run + println("Akka succesfully shut down") + } } - private def printBanner = { - println("==================================================") - println(" t") - println(" t t t") - println(" t t tt t") - println(" tt t t tt t") - println(" t ttttttt t ttt t") - println(" t tt ttt t ttt t") - println(" t t ttt t ttt t t") - println(" tt t ttt ttt ttt t") - println(" t t ttt ttt t tt t") - println(" t ttt ttt t t") - println(" tt ttt ttt t") - println(" ttt ttt") - println(" tttttttt ttt ttt ttt ttt tttttttt") - println(" ttt tt ttt ttt ttt ttt ttt ttt") - println(" ttt ttt ttt ttt ttt ttt ttt ttt") - println(" ttt ttt ttt ttt ttt tt ttt ttt") - println(" tttt ttttttttt tttttttt tttt") - println(" ttttttttt ttt ttt ttt ttt ttttttttt") - println(" ttt ttt ttt ttt ttt ttt ttt ttt") - println(" ttt ttt ttt ttt ttt ttt ttt ttt") - println(" ttt tt ttt ttt ttt ttt ttt ttt") - println(" tttttttt ttt ttt ttt ttt tttttttt") - println("==================================================") - println(" Running version {}", Config.VERSION) - println("==================================================") + private def printBanner() { + println(""" +============================================================================== + + ZZ: + ZZZZ + ZZZZZZ + ZZZ' ZZZ + ~7 7ZZ' ZZZ + :ZZZ: IZZ' ZZZ + ,OZZZZ.~ZZ? ZZZ + ZZZZ' 'ZZZ$ ZZZ + . $ZZZ ~ZZ$ ZZZ + .=Z?. .ZZZO ~ZZ7 OZZ + .ZZZZ7..:ZZZ~ 7ZZZ ZZZ~ + .$ZZZ$Z+.ZZZZ ZZZ: ZZZ$ + .,ZZZZ?' =ZZO= .OZZ 'ZZZ + .$ZZZZ+ .ZZZZ IZZZ ZZZ$ + .ZZZZZ' .ZZZZ' .ZZZ$ ?ZZZ + .ZZZZZZ' .OZZZ? ?ZZZ 'ZZZ$ + .?ZZZZZZ' .ZZZZ? .ZZZ? 'ZZZO + .+ZZZZZZ?' .7ZZZZ' .ZZZZ :ZZZZ + .ZZZZZZ$' .?ZZZZZ' .~ZZZZ 'ZZZZ. + + + NNNNN $NNNN+ + NNNNN $NNNN+ + NNNNN $NNNN+ + NNNNN $NNNN+ + NNNNN $NNNN+ + =NNNNNNNNND$ NNNNN DDDDDD: $NNNN+ DDDDDN NDDNNNNNNNN, + NNNNNNNNNNNNND NNNNN DNNNNN $NNNN+ 8NNNNN= :NNNNNNNNNNNNNN + NNNNN$ DNNNNN NNNNN $NNNNN~ $NNNN+ NNNNNN NNNNN, :NNNNN+ + ?DN~ NNNNN NNNNN MNNNNN $NNNN+:NNNNN7 $ND =NNNNN + DNNNNN NNNNNDNNNN$ $NNNNDNNNNN :DNNNNN + ZNDNNNNNNNNND NNNNNNNNNND, $NNNNNNNNNNN DNDNNNNNNNNNN + NNNNNNNDDINNNNN NNNNNNNNNNND $NNNNNNNNNNND ONNNNNNND8+NNNNN + :NNNND NNNNN NNNNNN DNNNN, $NNNNNO 7NNNND NNNNNO :NNNNN + DNNNN NNNNN NNNNN DNNNN $NNNN+ 8NNNNN NNNNN $NNNNN + DNNNNO NNNNNN NNNNN NNNNN $NNNN+ NNNNN$ NNNND, ,NNNNND + NNNNNNDDNNNNNNNN NNNNN =NNNNN $NNNN+ DNNNN? DNNNNNNDNNNNNNNND + NNNNNNNNN NNNN$ NNNNN 8NNNND $NNNN+ NNNNN= ,DNNNNNNND NNNNN$ + +============================================================================== + Running version %s +============================================================================== +""".format(Config.VERSION)) } } diff --git a/akka-actor/src/main/scala/akka/util/Bootable.scala b/akka-actor/src/main/scala/akka/util/Bootable.scala index bea62e5ac7..d07643e1ac 100644 --- a/akka-actor/src/main/scala/akka/util/Bootable.scala +++ b/akka-actor/src/main/scala/akka/util/Bootable.scala @@ -5,6 +5,6 @@ package akka.util trait Bootable { - def onLoad {} - def onUnload {} + def onLoad() {} + def onUnload() {} } diff --git a/akka-actor/src/main/scala/akka/util/ReflectiveAccess.scala b/akka-actor/src/main/scala/akka/util/ReflectiveAccess.scala index 5c16a3f8c5..ace294b743 100644 --- a/akka-actor/src/main/scala/akka/util/ReflectiveAccess.scala +++ b/akka-actor/src/main/scala/akka/util/ReflectiveAccess.scala @@ -48,11 +48,26 @@ object ReflectiveAccess { } } - lazy val clusterInstance: Option[Cluster] = getObjectFor("akka.cluster.Cluster$") + lazy val clusterInstance: Option[Cluster] = getObjectFor("akka.cluster.Cluster$") match { + case Right(value) => Some(value) + case Left(exception) => + EventHandler.debug(this, exception.toString) + None + } - lazy val clusterDeployerInstance: Option[ClusterDeployer] = getObjectFor("akka.cluster.ClusterDeployer$") + lazy val clusterDeployerInstance: Option[ClusterDeployer] = getObjectFor("akka.cluster.ClusterDeployer$") match { + case Right(value) => Some(value) + case Left(exception) => + EventHandler.debug(this, exception.toString) + None + } - lazy val serializerClass: Option[Class[_]] = getClassFor("akka.serialization.Serializer") + lazy val serializerClass: Option[Class[_]] = getClassFor("akka.serialization.Serializer") match { + case Right(value) => Some(value) + case Left(exception) => + EventHandler.debug(this, exception.toString) + None + } lazy val node: ClusterNode = { ensureEnabled() @@ -123,16 +138,20 @@ object ReflectiveAccess { lazy val isEnabled = remoteSupportClass.isDefined - def ensureEnabled() { + def ensureEnabled() = { if (!isEnabled) { - val e = new ModuleNotAvailableException( - "Can't load the remoting module, make sure that akka-remote.jar is on the classpath") - EventHandler.debug(this, e.toString) - throw e + val e = new ModuleNotAvailableException("Can't load the remoting module, make sure that akka-remote.jar is on the classpath") + EventHandler.debug(this, e.toString) + throw e } } - val remoteSupportClass: Option[Class[_ <: RemoteSupport]] = getClassFor(TRANSPORT) + val remoteSupportClass = getClassFor[RemoteSupport](TRANSPORT) match { + case Right(value) => Some(value) + case Left(exception) => + EventHandler.debug(this, exception.toString) + None + } protected[akka] val defaultRemoteSupport: Option[() => RemoteSupport] = remoteSupportClass map { remoteClass => @@ -140,9 +159,11 @@ object ReflectiveAccess { remoteClass, Array[Class[_]](), Array[AnyRef]() - ) getOrElse { + ) match { + case Right(value) => value + case Left(exception) => val e = new ModuleNotAvailableException( - "Can't instantiate [%s] - make sure that akka-remote.jar is on the classpath".format(remoteClass.getName)) + "Can't instantiate [%s] - make sure that akka-remote.jar is on the classpath".format(remoteClass.getName), exception) EventHandler.debug(this, e.toString) throw e } @@ -172,7 +193,12 @@ object ReflectiveAccess { } val typedActorObjectInstance: Option[TypedActorObject] = - getObjectFor("akka.actor.TypedActor$") + getObjectFor[TypedActorObject]("akka.actor.TypedActor$") match { + case Right(value) => Some(value) + case Left(exception)=> + EventHandler.debug(this, exception.toString) + None + } def resolveFutureIfMessageIsJoinPoint(message: Any, future: Future[_]): Boolean = { ensureEnabled() @@ -188,94 +214,93 @@ object ReflectiveAccess { def createInstance[T](clazz: Class[_], params: Array[Class[_]], - args: Array[AnyRef]): Option[T] = try { + args: Array[AnyRef]): Either[Exception,T] = try { assert(clazz ne null) assert(params ne null) assert(args ne null) val ctor = clazz.getDeclaredConstructor(params: _*) ctor.setAccessible(true) - Some(ctor.newInstance(args: _*).asInstanceOf[T]) + Right(ctor.newInstance(args: _*).asInstanceOf[T]) } catch { case e: java.lang.reflect.InvocationTargetException => EventHandler.debug(this, e.getCause.toString) - None + Left(e) case e: Exception => EventHandler.debug(this, e.toString) - None + Left(e) } def createInstance[T](fqn: String, params: Array[Class[_]], args: Array[AnyRef], - classloader: ClassLoader = loader): Option[T] = try { + classloader: ClassLoader = loader): Either[Exception,T] = try { assert(params ne null) assert(args ne null) getClassFor(fqn) match { - case Some(clazz) => - val ctor = clazz.getDeclaredConstructor(params: _*) + case Right(value) => + val ctor = value.getDeclaredConstructor(params: _*) ctor.setAccessible(true) - Some(ctor.newInstance(args: _*).asInstanceOf[T]) - case None => None + Right(ctor.newInstance(args: _*).asInstanceOf[T]) + case Left(exception) => Left(exception) //We could just cast this to Either[Exception, T] but it's ugly } } catch { case e: Exception => - EventHandler.debug(this, e.toString) - None + Left(e) } - def getObjectFor[T](fqn: String, classloader: ClassLoader = loader): Option[T] = try {//Obtains a reference to $MODULE$ + //Obtains a reference to fqn.MODULE$ + def getObjectFor[T](fqn: String, classloader: ClassLoader = loader): Either[Exception,T] = try { getClassFor(fqn) match { - case Some(clazz) => - val instance = clazz.getDeclaredField("MODULE$") + case Right(value) => + val instance = value.getDeclaredField("MODULE$") instance.setAccessible(true) - Option(instance.get(null).asInstanceOf[T]) - case None => None + val obj = instance.get(null) + if (obj eq null) Left(new NullPointerException) else Right(obj.asInstanceOf[T]) + case Left(exception) => Left(exception) //We could just cast this to Either[Exception, T] but it's ugly } } catch { - case e: ExceptionInInitializerError => - EventHandler.debug(this, e.toString) - throw e + case e: Exception => + Left(e) } - def getClassFor[T](fqn: String, classloader: ClassLoader = loader): Option[Class[T]] = { + def getClassFor[T](fqn: String, classloader: ClassLoader = loader): Either[Exception,Class[T]] = try { assert(fqn ne null) // First, use the specified CL val first = try { - Option(classloader.loadClass(fqn).asInstanceOf[Class[T]]) + Right(classloader.loadClass(fqn).asInstanceOf[Class[T]]) } catch { - case c: ClassNotFoundException => None + case c: ClassNotFoundException => Left(c) } - if (first.isDefined) first + if (first.isRight) first else { // Second option is to use the ContextClassLoader val second = try { - Option(Thread.currentThread.getContextClassLoader.loadClass(fqn).asInstanceOf[Class[T]]) + Right(Thread.currentThread.getContextClassLoader.loadClass(fqn).asInstanceOf[Class[T]]) } catch { - case c: ClassNotFoundException => None + case c: ClassNotFoundException => Left(c) } - if (second.isDefined) second + if (second.isRight) second else { val third = try { - // Don't try to use "loader" if we got the default "classloader" parameter - if (classloader ne loader) Option(loader.loadClass(fqn).asInstanceOf[Class[T]]) - else None + if (classloader ne loader) Right(loader.loadClass(fqn).asInstanceOf[Class[T]]) else Left(null) //Horrid } catch { - case c: ClassNotFoundException => None + case c: ClassNotFoundException => Left(c) } - if (third.isDefined) third + if (third.isRight) third else { - // Last option is Class.forName try { - Option(Class.forName(fqn).asInstanceOf[Class[T]]) + Right(Class.forName(fqn).asInstanceOf[Class[T]]) // Last option is Class.forName } catch { - case c: ClassNotFoundException => None + case c: ClassNotFoundException => Left(c) } } } } + } catch { + case e: Exception => Left(e) } } diff --git a/akka-docs/Makefile b/akka-docs/Makefile index 49f649367f..9811732058 100644 --- a/akka-docs/Makefile +++ b/akka-docs/Makefile @@ -16,7 +16,11 @@ PAPEROPT_letter = -D latex_paper_size=letter ALLSPHINXOPTS = -d $(BUILDDIR)/doctrees $(PAPEROPT_$(PAPER)) $(SPHINXOPTS) . # Set python path to include local packages for pygments styles. -PYTHONPATH += $(LOCALPACKAGES) +ifneq (,$(PYTHONPATH)) + PYTHONPATH := $(PYTHONPATH):$(LOCALPACKAGES) +else + PYTHONPATH := $(LOCALPACKAGES) +endif export PYTHONPATH .PHONY: help clean pygments html singlehtml latex pdf @@ -40,8 +44,11 @@ pygments: @echo "Custom pygments styles have been installed." @echo -html: pygments - $(SPHINXBUILD) -b html $(ALLSPHINXOPTS) $(BUILDDIR)/html +$(LOCALPACKAGES): + $(MAKE) pygments + +html: $(LOCALPACKAGES) + $(SPHINXBUILD) -a -b html $(ALLSPHINXOPTS) $(BUILDDIR)/html @echo @echo "Build finished. The HTML pages are in $(BUILDDIR)/html." diff --git a/akka-docs/_sphinx/static/akka-intellij-code-style.jar b/akka-docs/_sphinx/static/akka-intellij-code-style.jar new file mode 100644 index 0000000000..55866c22c5 Binary files /dev/null and b/akka-docs/_sphinx/static/akka-intellij-code-style.jar differ diff --git a/akka-docs/_sphinx/static/akka.png b/akka-docs/_sphinx/static/akka.png index d79821a047..a6bc9c3b98 100644 Binary files a/akka-docs/_sphinx/static/akka.png and b/akka-docs/_sphinx/static/akka.png differ diff --git a/akka-docs/_sphinx/static/logo.png b/akka-docs/_sphinx/static/logo.png index 2c36c66a36..558dfed6eb 100644 Binary files a/akka-docs/_sphinx/static/logo.png and b/akka-docs/_sphinx/static/logo.png differ diff --git a/akka-docs/_sphinx/themes/akka/layout.html b/akka-docs/_sphinx/themes/akka/layout.html index db443c3fe5..0bd735c446 100644 --- a/akka-docs/_sphinx/themes/akka/layout.html +++ b/akka-docs/_sphinx/themes/akka/layout.html @@ -32,18 +32,12 @@ {% block content %}

{%- block akkaheader %} - {%- if theme_full_logo != "false" %} - - - - {%- else %} {%- if logo -%} - + {%- endif -%} -

- {{ shorttitle|e }}

-

{{ title|striptags|e }}

- {%- endif %} +

{{ shorttitle|e }}

+

Version {{ version|e }}

+

PDF

{%- endblock %}
diff --git a/akka-docs/_sphinx/themes/akka/static/akka.css_t b/akka-docs/_sphinx/themes/akka/static/akka.css_t index f05e86bb6a..5020439d8f 100644 --- a/akka-docs/_sphinx/themes/akka/static/akka.css_t +++ b/akka-docs/_sphinx/themes/akka/static/akka.css_t @@ -112,17 +112,26 @@ div.header h2 { font-size: 1.3em; font-weight: normal; letter-spacing: 1px; - text-transform: uppercase; - color: #aaa; + color: {{ theme_headingcolor }}; border: 0; margin-top: -3px; padding: 0; } +div.header img.leftlogo { + float: left; +} + div.header img.rightlogo { float: right; } +div.header h2.rightheading { + position: relative; + top:-45px; + float: right; +} + div.title { font-size: 1.3em; diff --git a/akka-docs/additional/add-on-modules.rst b/akka-docs/additional/add-on-modules.rst new file mode 100644 index 0000000000..bab2f1b174 --- /dev/null +++ b/akka-docs/additional/add-on-modules.rst @@ -0,0 +1,16 @@ +.. _add-on-modules: + +Add-on Modules +============== + +Akka Modules consist of add-on modules outside the core of Akka: + +- ``akka-kernel-1.1.jar`` -- Akka microkernel for running a bare-bones mini application server (embeds Jetty etc.) +- ``akka-amqp-1.1.jar`` -- AMQP integration +- ``akka-camel-1.1.jar`` -- Apache Camel Actors integration (it's the best way to have your Akka application communicate with the rest of the world) +- ``akka-camel-typed-1.1.jar`` -- Apache Camel Typed Actors integration +- ``akka-scalaz-1.1.jar`` -- Support for the Scalaz library +- ``akka-spring-1.1.jar`` -- Spring framework integration +- ``akka-osgi-dependencies-bundle-1.1.jar`` -- OSGi support + +Documentation for Akka Modules is located `here `_. diff --git a/akka-docs/pending/articles.rst b/akka-docs/additional/articles.rst similarity index 98% rename from akka-docs/pending/articles.rst rename to akka-docs/additional/articles.rst index 06f01f9a7d..2e22142e63 100644 --- a/akka-docs/pending/articles.rst +++ b/akka-docs/additional/articles.rst @@ -21,6 +21,8 @@ Videos Articles -------- +`Scatter-Gather with Akka Dataflow `_ + `Actor-Based Continuations with Akka and Swarm `_ `Mimicking Twitter Using an Akka-Based Event-Driven Architecture `_ diff --git a/akka-docs/additional/benchmarks.rst b/akka-docs/additional/benchmarks.rst new file mode 100644 index 0000000000..359914379f --- /dev/null +++ b/akka-docs/additional/benchmarks.rst @@ -0,0 +1,35 @@ +Benchmarks +========== + +Scalability, Throughput and Latency benchmark +--------------------------------------------- + + .. image:: ../images/benchmark-akka-sample-trading-throughput.png + +Simple Trading system. + +- `Here is the result with some graphs `_ +- `Here is the article `_ +- `Here is the code `_ + +Compares: + +- Synchronous Scala solution +- Scala library Actors + + - Fire-forget + - Request-reply + +- Akka + - Request-reply + - Fire-forget with default dispatcher + - Fire-forget with Hawt dispatcher + +Performance benchmark +--------------------- + +Benchmarking Akka against: + +- Scala Library Actors +- Raw Java concurrency +- Jetlang (Java actors lib) ``_ diff --git a/akka-docs/pending/companies-using-akka.rst b/akka-docs/additional/companies-using-akka.rst similarity index 100% rename from akka-docs/pending/companies-using-akka.rst rename to akka-docs/additional/companies-using-akka.rst diff --git a/akka-docs/pending/external-sample-projects.rst b/akka-docs/additional/external-sample-projects.rst similarity index 100% rename from akka-docs/pending/external-sample-projects.rst rename to akka-docs/additional/external-sample-projects.rst diff --git a/akka-docs/additional/index.rst b/akka-docs/additional/index.rst new file mode 100644 index 0000000000..7398db601b --- /dev/null +++ b/akka-docs/additional/index.rst @@ -0,0 +1,15 @@ +Additional Information +====================== + +.. toctree:: + :maxdepth: 2 + + add-on-modules + articles + benchmarks + recipies + external-sample-projects + companies-using-akka + third-party-integrations + language-bindings + stability-matrix diff --git a/akka-docs/pending/language-bindings.rst b/akka-docs/additional/language-bindings.rst similarity index 100% rename from akka-docs/pending/language-bindings.rst rename to akka-docs/additional/language-bindings.rst diff --git a/akka-docs/pending/Recipes.rst b/akka-docs/additional/recipies.rst similarity index 100% rename from akka-docs/pending/Recipes.rst rename to akka-docs/additional/recipies.rst diff --git a/akka-docs/additional/stability-matrix.rst b/akka-docs/additional/stability-matrix.rst new file mode 100644 index 0000000000..61e5d247fb --- /dev/null +++ b/akka-docs/additional/stability-matrix.rst @@ -0,0 +1,33 @@ +Feature Stability Matrix +======================== + +Akka is comprised of a number if modules, with different levels of maturity and in different parts of their lifecycle, the matrix below gives you get current stability level of the modules. + +Explanation of the different levels of stability +------------------------------------------------ + +* **Solid** - Proven solid in heavy production usage +* **Stable** - Ready for use in production environment +* **In progress** - Not enough feedback/use to claim it's ready for production use + +================================ ============ ============ ============ +Feature Solid Stable In progress +================================ ============ ============ ============ +Actors (Scala) Solid +Actors (Java) Solid +Typed Actors (Scala) Solid +Typed Actors (Java) Solid +STM (Scala) Solid +STM (Java) Solid +Transactors (Scala) Solid +Transactors (Java) Solid +Remote Actors (Scala) Solid +Remote Actors (Java) Solid +Camel Solid +AMQP Solid +HTTP Solid +Integration Guice Stable +Integration Spring Stable +Scheduler Solid +Redis Pub Sub In progress +================================ ============ ============ ============ diff --git a/akka-docs/pending/third-party-integrations.rst b/akka-docs/additional/third-party-integrations.rst similarity index 68% rename from akka-docs/pending/third-party-integrations.rst rename to akka-docs/additional/third-party-integrations.rst index 579c3123d0..be663c4e4b 100644 --- a/akka-docs/pending/third-party-integrations.rst +++ b/akka-docs/additional/third-party-integrations.rst @@ -2,19 +2,20 @@ Third-party Integrations ======================== The Play! Framework -=================== +------------------- Dustin Whitney has done an Akka integration module for the `Play! framework `_. Detailed instructions here: ``_. There are three screencasts: -# Using Play! with Akka STM: ``_ -# Using Play! with Akka Actors: ``_ -# Using Play! with Akka Remote Actors: ``_ + +- Using Play! with Akka STM: ``_ +- Using Play! with Akka Actors: ``_ +- Using Play! with Akka Remote Actors: ``_ The Pinky REST/MVC Framework -============================ +---------------------------- Peter Hausel has done an Akka integration module for the `Pinky framework `_. diff --git a/akka-docs/general/util.rst b/akka-docs/common/duration.rst similarity index 78% rename from akka-docs/general/util.rst rename to akka-docs/common/duration.rst index bb0f61e778..523c8a2283 100644 --- a/akka-docs/general/util.rst +++ b/akka-docs/common/duration.rst @@ -1,23 +1,20 @@ -######### -Utilities -######### - -.. sidebar:: Contents - - .. contents:: :local: - -This section of the manual describes miscellaneous utilities which are provided -by Akka and used in multiple places. - .. _Duration: +######## Duration -======== +######## + +Module stability: **SOLID** Durations are used throughout the Akka library, wherefore this concept is represented by a special data type, :class:`Duration`. Values of this type may represent infinite (:obj:`Duration.Inf`, :obj:`Duration.MinusInf`) or finite -durations, where the latter are constructable using a mini-DSL: +durations. + +Scala +===== + +In Scala durations are constructable using a mini-DSL and support all expected operations: .. code-block:: scala @@ -27,6 +24,8 @@ durations, where the latter are constructable using a mini-DSL: val threemillis = 3.millis val diff = fivesec - threemillis assert (diff < fivesec) + val fourmillis = threemillis * 4 / 3 // though you cannot write it the other way around + val n = threemillis / (1 millisecond) .. note:: @@ -35,6 +34,9 @@ durations, where the latter are constructable using a mini-DSL: if the time unit is the last token on a line, otherwise semi-colon inference might go wrong, depending on what starts the next line. +Java +==== + Java provides less syntactic sugar, so you have to spell out the operations as method calls instead: diff --git a/akka-docs/common/index.rst b/akka-docs/common/index.rst new file mode 100644 index 0000000000..f3ed26aa73 --- /dev/null +++ b/akka-docs/common/index.rst @@ -0,0 +1,8 @@ +Common utilities +========================== + +.. toctree:: + :maxdepth: 2 + + scheduler + duration diff --git a/akka-docs/common/scheduler.rst b/akka-docs/common/scheduler.rst new file mode 100644 index 0000000000..bf2b813d2e --- /dev/null +++ b/akka-docs/common/scheduler.rst @@ -0,0 +1,23 @@ +Scheduler +========= + +Module stability: **SOLID** + +``Akka`` has a little scheduler written using actors. +This can be convenient if you want to schedule some periodic task for maintenance or similar. + +It allows you to register a message that you want to be sent to a specific actor at a periodic interval. + +Here is an example: +------------------- + +.. code-block:: scala + + import akka.actor.Scheduler + + //Sends messageToBeSent to receiverActor after initialDelayBeforeSending and then after each delayBetweenMessages + Scheduler.schedule(receiverActor, messageToBeSent, initialDelayBeforeSending, delayBetweenMessages, timeUnit) + + //Sends messageToBeSent to receiverActor after delayUntilSend + Scheduler.scheduleOnce(receiverActor, messageToBeSent, delayUntilSend, timeUnit) + diff --git a/akka-docs/conf.py b/akka-docs/conf.py index 209f747afc..760a226ed6 100644 --- a/akka-docs/conf.py +++ b/akka-docs/conf.py @@ -13,12 +13,12 @@ extensions = ['sphinx.ext.todo', 'includecode'] templates_path = ['_templates'] source_suffix = '.rst' master_doc = 'index' -exclude_patterns = ['_build', 'pending'] +exclude_patterns = ['_build', 'pending', 'disabled'] project = u'Akka' -copyright = u'2009-2011, Scalable Solutions AB' -version = '1.1' -release = '1.1' +copyright = u'2011, Typesafe Inc' +version = '1.2-SNAPSHOT' +release = '1.2-SNAPSHOT' pygments_style = 'simple' highlight_language = 'scala' @@ -28,9 +28,6 @@ show_authors = True # -- Options for HTML output --------------------------------------------------- html_theme = 'akka' -html_theme_options = { - 'full_logo': 'true' - } html_theme_path = ['_sphinx/themes'] html_title = 'Akka Documentation' diff --git a/akka-docs/dev/building-akka.rst b/akka-docs/dev/building-akka.rst new file mode 100644 index 0000000000..2622051447 --- /dev/null +++ b/akka-docs/dev/building-akka.rst @@ -0,0 +1,175 @@ + +.. highlightlang:: none + +.. _building-akka: + +############### + Building Akka +############### + +This page describes how to build and run Akka from the latest source code. + +.. contents:: :local: + + +Get the source code +=================== + +Akka uses `Git`_ and is hosted at `Github`_. + +.. _Git: http://git-scm.com +.. _Github: http://github.com + +You first need Git installed on your machine. You can then clone the source +repositories: + +- Akka repository from http://github.com/jboner/akka +- Akka Modules repository from http://github.com/jboner/akka-modules + +For example:: + + git clone git://github.com/jboner/akka.git + git clone git://github.com/jboner/akka-modules.git + +If you have already cloned the repositories previously then you can update the +code with ``git pull``:: + + git pull origin master + + +SBT - Simple Build Tool +======================= + +Akka is using the excellent `SBT`_ build system. So the first thing you have to +do is to download and install SBT. You can read more about how to do that in the +`SBT setup`_ documentation. + +.. _SBT: http://code.google.com/p/simple-build-tool +.. _SBT setup: http://code.google.com/p/simple-build-tool/wiki/Setup + +The SBT commands that you'll need to build Akka are all included below. If you +want to find out more about SBT and using it for your own projects do read the +`SBT documentation`_. + +.. _SBT documentation: http://code.google.com/p/simple-build-tool/wiki/RunningSbt + +The Akka SBT build file is ``project/build/AkkaProject.scala`` with some +properties defined in ``project/build.properties``. + + +Building Akka +============= + +First make sure that you are in the akka code directory:: + + cd akka + + +Fetching dependencies +--------------------- + +SBT does not fetch dependencies automatically. You need to manually do this with +the ``update`` command:: + + sbt update + +Once finished, all the dependencies for Akka will be in the ``lib_managed`` +directory under each module: akka-actor, akka-stm, and so on. + +*Note: you only need to run update the first time you are building the code, +or when the dependencies have changed.* + + +Building +-------- + +To compile all the Akka core modules use the ``compile`` command:: + + sbt compile + +You can run all tests with the ``test`` command:: + + sbt test + +If compiling and testing are successful then you have everything working for the +latest Akka development version. + + +Publish to local Ivy repository +------------------------------- + +If you want to deploy the artifacts to your local Ivy repository (for example, +to use from an SBT project) use the ``publish-local`` command:: + + sbt publish-local + + +Publish to local Maven repository +--------------------------------- + +If you want to deploy the artifacts to your local Maven repository use:: + + sbt publish-local publish + + +SBT interactive mode +-------------------- + +Note that in the examples above we are calling ``sbt compile`` and ``sbt test`` +and so on. SBT also has an interactive mode. If you just run ``sbt`` you enter +the interactive SBT prompt and can enter the commands directly. This saves +starting up a new JVM instance for each command and can be much faster and more +convenient. + +For example, building Akka as above is more commonly done like this:: + + % sbt + [info] Building project akka 1.2-SNAPSHOT against Scala 2.9.0 + [info] using AkkaParentProject with sbt 0.7.6 and Scala 2.7.7 + > update + [info] + [info] == akka-actor / update == + ... + [success] Successful. + [info] + [info] Total time ... + > compile + ... + > test + ... + + +SBT batch mode +-------------- + +It's also possible to combine commands in a single call. For example, updating, +testing, and publishing Akka to the local Ivy repository can be done with:: + + sbt update test publish-local + + +Building Akka Modules +===================== + +See the Akka Modules documentation. + + +.. _dependencies: + +Dependencies +============ + +If you are managing dependencies by hand you can find the dependencies for each +module by looking in the ``lib_managed`` directories. For example, this will +list all compile dependencies (providing you have the source code and have run +``sbt update``):: + + cd akka + ls -1 */lib_managed/compile + +You can also look at the Ivy dependency resolution information that is created +on ``sbt update`` and found in ``~/.ivy2/cache``. For example, the +``.ivy2/cache/se.scalablesolutions.akka-akka-remote-compile.xml`` file contains +the resolution information for the akka-remote module compile dependencies. If +you open this file in a web browser you will get an easy to navigate view of +dependencies. diff --git a/akka-docs/pending/developer-guidelines.rst b/akka-docs/dev/developer-guidelines.rst similarity index 57% rename from akka-docs/pending/developer-guidelines.rst rename to akka-docs/dev/developer-guidelines.rst index bf2e9dad26..be83c7bd04 100644 --- a/akka-docs/pending/developer-guidelines.rst +++ b/akka-docs/dev/developer-guidelines.rst @@ -1,3 +1,5 @@ +.. _developer_guidelines: + Developer Guidelines ==================== @@ -6,25 +8,25 @@ Code Style The Akka code style follows `this document `_ . -Here is a code style settings file for IntelliJ IDEA. -``_ +Here is a code style settings file for ``IntelliJ IDEA``: +`Download <../_static/akka-intellij-code-style.jar>`_ Please follow the code style. Look at the code around you and mimic. Testing ------- -All code that is checked in should have tests. All testing is done with ScalaTest and ScalaCheck. +All code that is checked in **should** have tests. All testing is done with ``ScalaTest`` and ``ScalaCheck``. -* Name tests as *Test.scala if they do not depend on any external stuff. That keeps surefire happy. -* Name tests as *Spec.scala if they have external dependencies. +* Name tests as **Test.scala** if they do not depend on any external stuff. That keeps surefire happy. +* Name tests as **Spec.scala** if they have external dependencies. -There is a testing standard that should be followed: `Ticket001Spec <@https://github.com/jboner/akka/blob/master/akka-actor/src/test/scala/akka/ticket/Ticket001Spec.scala>`_ +There is a testing standard that should be followed: `Ticket001Spec `_ Actor TestKit ^^^^^^^^^^^^^ -There is a useful test kit for testing actors: `akka.util.TestKit <@https://github.com/jboner/akka/tree/master/akka-actor/src/main/scala/akka/util/TestKit.scala>`_. It enables assertions concerning replies received and their timing, there is more documentation in the ``_ module. +There is a useful test kit for testing actors: `akka.util.TestKit `_. It enables assertions concerning replies received and their timing, there is more documentation in the :ref:`akka-testkit` module. NetworkFailureTest ^^^^^^^^^^^^^^^^^^ diff --git a/akka-docs/dev/documentation.rst b/akka-docs/dev/documentation.rst index 9e280220e6..aa44d15291 100644 --- a/akka-docs/dev/documentation.rst +++ b/akka-docs/dev/documentation.rst @@ -3,10 +3,14 @@ .. _documentation: -############### - Documentation -############### +######################### + Documentation Guidelines +######################### +.. sidebar:: Contents + + .. contents:: :local: + The Akka documentation uses `reStructuredText`_ as its markup language and is built using `Sphinx`_. @@ -67,3 +71,83 @@ For example:: Here is a reference to "akka section": :ref:`akka-section` which will have the name "Akka Section". +Build the documentation +======================= + +First install `Sphinx`_. See below. + +Building +-------- + +:: + + cd akka-docs + + make html + open _build/html/index.html + + make pdf + open _build/latex/Akka.pdf + + +Installing Sphinx on OS X +------------------------- + +Install `Homebrew `_ + +Install Python and pip: + +:: + + brew install python + /usr/local/share/python/easy_install pip + +Add the Homebrew Python path to your $PATH: + +:: + + /usr/local/Cellar/python/2.7.1/bin + + +More information in case of trouble: +https://github.com/mxcl/homebrew/wiki/Homebrew-and-Python + +Install sphinx: + +:: + + pip install sphinx + +Add sphinx_build to your $PATH: + +:: + + /usr/local/share/python + +Install BasicTeX package from: +http://www.tug.org/mactex/morepackages.html + +Add texlive bin to $PATH: + +:: + + /usr/local/texlive/2010basic/bin/universal-darwin + +Add missing tex packages: + +:: + + sudo tlmgr update --self + sudo tlmgr install titlesec + sudo tlmgr install framed + sudo tlmgr install threeparttable + sudo tlmgr install wrapfig + sudo tlmgr install helvetic + sudo tlmgr install courier + +Link the akka pygments style: + +:: + + cd /usr/local/Cellar/python/2.7.1/lib/python2.7/site-packages/pygments/styles + ln -s /path/to/akka/akka-docs/themes/akka/pygments/akka.py akka.py diff --git a/akka-docs/dev/index.rst b/akka-docs/dev/index.rst index 05ab53742d..690ea88664 100644 --- a/akka-docs/dev/index.rst +++ b/akka-docs/dev/index.rst @@ -4,4 +4,8 @@ Information for Developers .. toctree:: :maxdepth: 2 + building-akka + developer-guidelines documentation + team + diff --git a/akka-docs/dev/team.rst b/akka-docs/dev/team.rst new file mode 100644 index 0000000000..53991eb260 --- /dev/null +++ b/akka-docs/dev/team.rst @@ -0,0 +1,28 @@ +.. _team: + +Team +===== + +=================== ========================== ==================================== +Name Role Email +=================== ========================== ==================================== +Jonas Bonér Founder, Despot, Committer jonas AT jonasboner DOT com +Viktor Klang Bad cop, Committer viktor DOT klang AT gmail DOT com +Debasish Ghosh Committer dghosh AT acm DOT org +Ross McDonald Alumni rossajmcd AT gmail DOT com +Eckhart Hertzler Alumni +Mikael Högqvist Alumni +Tim Perrett Alumni +Jeanfrancois Arcand Alumni jfarcand AT apache DOT org +Martin Krasser Committer krasserm AT googlemail DOT com +Jan Van Besien Alumni +Michael Kober Alumni +Peter Vlugter Committer +Peter Veentjer Committer +Irmo Manie Committer +Heiko Seeberger Committer +Hiram Chirino Committer +Scott Clasen Committer +Roland Kuhn Committer +Patrik Nordwall Committer patrik DOT nordwall AT gmail DOT com +=================== ========================== ==================================== \ No newline at end of file diff --git a/akka-docs/intro/examples/Pi.scala b/akka-docs/disabled/examples/Pi.scala similarity index 98% rename from akka-docs/intro/examples/Pi.scala rename to akka-docs/disabled/examples/Pi.scala index 1635229802..41f8e88b9f 100644 --- a/akka-docs/intro/examples/Pi.scala +++ b/akka-docs/disabled/examples/Pi.scala @@ -91,11 +91,11 @@ object Pi extends App { } //#master-receive - override def preStart { + override def preStart() { start = now } - override def postStop { + override def postStop() { // tell the world that the calculation is complete println( "\n\tPi estimate: \t\t%s\n\tCalculation time: \t%s millis" diff --git a/akka-docs/intro/getting-started-first.rst b/akka-docs/disabled/getting-started-first.rst similarity index 95% rename from akka-docs/intro/getting-started-first.rst rename to akka-docs/disabled/getting-started-first.rst index 79c220d14a..3fa245febc 100644 --- a/akka-docs/intro/getting-started-first.rst +++ b/akka-docs/disabled/getting-started-first.rst @@ -19,19 +19,22 @@ We will be using an algorithm that is called "embarrassingly parallel" which jus Here is the formula for the algorithm we will use: -.. image:: pi-formula.png +.. image:: ../images/pi-formula.png In this particular algorithm the master splits the series into chunks which are sent out to each worker actor to be processed. When each worker has processed its chunk it sends a result back to the master which aggregates the total result. Tutorial source code -------------------- -If you want don't want to type in the code and/or set up an SBT project then you can check out the full tutorial from the Akka GitHub repository. It is in the ``akka-tutorials/akka-tutorial-first`` module. You can also browse it online `here `_, with the actual source code `here `_. +If you want don't want to type in the code and/or set up an SBT project then you can check out the full tutorial from the Akka GitHub repository. It is in the ``akka-tutorials/akka-tutorial-first`` module. You can also browse it online `here`__, with the actual source code `here`__. + +__ https://github.com/jboner/akka/tree/master/akka-tutorials/akka-tutorial-first +__ https://github.com/jboner/akka/blob/master/akka-tutorials/akka-tutorial-first/src/main/scala/Pi.scala Prerequisites ------------- -This tutorial assumes that you have Jave 1.6 or later installed on you machine and ``java`` on your ``PATH``. You also need to know how to run commands in a shell (ZSH, Bash, DOS etc.) and a decent text editor or IDE to type in the Scala code. +This tutorial assumes that you have Java 1.6 or later installed on you machine and ``java`` on your ``PATH``. You also need to know how to run commands in a shell (ZSH, Bash, DOS etc.) and a decent text editor or IDE to type in the Scala code. Downloading and installing Akka ------------------------------- @@ -77,7 +80,7 @@ Akka is very modular and has many JARs for containing different features. The co - ``akka-slf4j-1.1.jar`` -- SLF4J Event Handler Listener - ``akka-testkit-1.1.jar`` -- Toolkit for testing Actors -We also have Akka Modules containing add-on modules outside the core of Akka. You can download the Akka Modules distribution from TODO. It contains Akka core as well. We will not be needing any modules there today, but for your information the module JARs are these: +We also have Akka Modules containing add-on modules outside the core of Akka. You can download the Akka Modules distribution from ``_. It contains Akka core as well. We will not be needing any modules there today, but for your information the module JARs are these: - ``akka-kernel-1.1.jar`` -- Akka microkernel for running a bare-bones mini application server (embeds Jetty etc.) - ``akka-amqp-1.1.jar`` -- AMQP integration @@ -92,16 +95,16 @@ Downloading and installing Scala To build and run the tutorial sample from the command line, you have to install the Scala distribution. If you prefer to use SBT to build and run the sample then you can skip this section and jump to the next one. -Scala can be downloaded from `http://www.scala-lang.org/downloads `_. Browse there and download the Scala 2.9.0.RC1 release. If you pick the ``tgz`` or ``zip`` distribution then just unzip it where you want it installed. If you pick the IzPack Installer then double click on it and follow the instructions. +Scala can be downloaded from `http://www.scala-lang.org/downloads `_. Browse there and download the Scala 2.9.0 release. If you pick the ``tgz`` or ``zip`` distribution then just unzip it where you want it installed. If you pick the IzPack Installer then double click on it and follow the instructions. -You also need to make sure that the ``scala-2.9.0.RC1/bin`` (if that is the directory where you installed Scala) is on your ``PATH``:: +You also need to make sure that the ``scala-2.9.0/bin`` (if that is the directory where you installed Scala) is on your ``PATH``:: - $ export PATH=$PATH:scala-2.9.0.RC1/bin + $ export PATH=$PATH:scala-2.9.0/bin You can test your installation by invoking scala:: $ scala -version - Scala code runner version 2.9.0.RC1 -- Copyright 2002-2011, LAMP/EPFL + Scala code runner version 2.9.0.final -- Copyright 2002-2011, LAMP/EPFL Looks like we are all good. Finally let's create a source file ``Pi.scala`` for the tutorial and put it in the root of the Akka distribution in the ``tutorial`` directory (you have to create it first). @@ -130,7 +133,7 @@ If you have not already done so, now is the time to create an SBT project for ou Name: Tutorial 1 Organization: Hakkers Inc Version [1.0]: - Scala version [2.9.0.RC1]: + Scala version [2.9.0]: sbt version [0.7.6.RC0]: Now we have the basis for an SBT project. Akka has an SBT Plugin making it very easy to use Akka is an SBT-based project so let's use that. diff --git a/akka-docs/general/configuration.rst b/akka-docs/general/configuration.rst new file mode 100644 index 0000000000..bd6c9510e3 --- /dev/null +++ b/akka-docs/general/configuration.rst @@ -0,0 +1,125 @@ +Configuration +============= + +.. sidebar:: Contents + + .. contents:: :local: + +.. _-Dakka.config: +.. _-Dakka.home: + +Specifying the configuration file +--------------------------------- + +If you don't specify a configuration file then Akka uses default values, corresponding to the ``akka-reference.conf`` +that you see below. You can specify your own configuration file to override any property in the reference config. +You only have to define the properties that differ from the default configuration. + +The location of the config file to use can be specified in various ways: + +* Define the ``-Dakka.config=...`` system property parameter with a file path to configuration file. + +* Put an ``akka.conf`` file in the root of the classpath. + +* Define the ``AKKA_HOME`` environment variable pointing to the root of the Akka + distribution. The config is taken from the ``AKKA_HOME/config/akka.conf``. You + can also point to the AKKA_HOME by specifying the ``-Dakka.home=...`` system + property parameter. + +If several of these ways to specify the config file are used at the same time the precedence is the order as given above, +i.e. you can always redefine the location with the ``-Dakka.config=...`` system property. + + +Defining the configuration file +------------------------------- + +Here is the reference configuration file: + +.. literalinclude:: ../../config/akka-reference.conf + :language: none + +A custom ``akka.conf`` might look like this: + +:: + + # In this file you can override any option defined in the 'akka-reference.conf' file. + # Copy in all or parts of the 'akka-reference.conf' file and modify as you please. + + akka { + event-handlers = ["akka.event.slf4j.Slf4jEventHandler"] + + # Comma separated list of the enabled modules. + enabled-modules = ["camel", "remote"] + + # These boot classes are loaded (and created) automatically when the Akka Microkernel boots up + # Can be used to bootstrap your application(s) + # Should be the FQN (Fully Qualified Name) of the boot class which needs to have a default constructor + boot = ["sample.camel.Boot", + "sample.myservice.Boot"] + + actor { + throughput = 10 # Throughput for ExecutorBasedEventDrivenDispatcher, set to 1 for complete fairness + } + + remote { + server { + port = 2562 # The port clients should connect to. Default is 2552 (AKKA) + } + } + } + +.. _-Dakka.mode: + +Specifying files for different modes +------------------------------------ + +You can use different configuration files for different purposes by specifying a mode option, either as +``-Dakka.mode=...`` system property or as ``AKKA_MODE=...`` environment variable. For example using DEBUG log level +when in development mode. Run with ``-Dakka.mode=dev`` and place the following ``akka.dev.conf`` in the root of +the classpath. + +akka.dev.conf: + +:: + + akka { + event-handler-level = "DEBUG" + } + +The mode option works in the same way when using configuration files in ``AKKA_HOME/config/`` directory. + +The mode option is not used when specifying the configuration file with ``-Dakka.config=...`` system property. + +Including files +--------------- + +Sometimes it can be useful to include another configuration file, for example if you have one ``akka.conf`` with all +environment independent settings and then override some settings for specific modes. + +akka.dev.conf: + +:: + + include "akka.conf" + + akka { + event-handler-level = "DEBUG" + } + +.. _-Dakka.output.config.source: + +Showing Configuration Source +---------------------------- + +If the system property ``akka.output.config.source`` is set to anything but +null, then the source from which Akka reads its configuration is printed to the +console during application startup. + +Summary of System Properties +---------------------------- + +* :ref:`akka.home <-Dakka.home>` (``AKKA_HOME``): where Akka searches for configuration +* :ref:`akka.config <-Dakka.config>`: explicit configuration file location +* :ref:`akka.mode <-Dakka.mode>` (``AKKA_MODE``): modify configuration file name for multiple profiles +* :ref:`akka.output.config.source <-Dakka.output.config.source>`: whether to print configuration source to console + diff --git a/akka-docs/pending/event-handler.rst b/akka-docs/general/event-handler.rst similarity index 86% rename from akka-docs/pending/event-handler.rst rename to akka-docs/general/event-handler.rst index 18eefefb0a..e4396faef5 100644 --- a/akka-docs/pending/event-handler.rst +++ b/akka-docs/general/event-handler.rst @@ -1,3 +1,5 @@ +.. _event-handler: + Event Handler ============= @@ -12,11 +14,12 @@ You can configure which event handlers should be registered at boot time. That i .. code-block:: ruby akka { - event-handlers = ["akka.event.EventHandler$DefaultListener"] # event handlers to register at boot time (EventHandler$DefaultListener logs to STDOUT) + # event handlers to register at boot time (EventHandler$DefaultListener logs to STDOUT) + event-handlers = ["akka.event.EventHandler$DefaultListener"] event-handler-level = "DEBUG" # Options: ERROR, WARNING, INFO, DEBUG } -The default one logs to STDOUT and is registered by default. It is not intended to be used for production. There is also an SLF4J event handler available in the 'akka-slf4j.jar' module. Read more about it `here `_. +The default one logs to STDOUT and is registered by default. It is not intended to be used for production. There is also an :ref:`slf4j` event handler available in the 'akka-slf4j' module. Example of creating a listener from Scala (from Java you just have to create an 'UntypedActor' and create a handler for these messages): @@ -88,9 +91,10 @@ The methods take a call-by-name parameter for the message to avoid object alloca From Java you need to nest the call in an if statement to achieve the same thing. -``_ -if (EventHandler.isDebugEnabled()) { - EventHandler.debug(this, String.format("Processing took %s ms", duration)); -} +.. code-block:: java + + if (EventHandler.isDebugEnabled()) { + EventHandler.debug(this, String.format("Processing took %s ms", duration)); + } + -``_ diff --git a/akka-docs/general/index.rst b/akka-docs/general/index.rst index 5b0e3c24d6..ae8a8f30b3 100644 --- a/akka-docs/general/index.rst +++ b/akka-docs/general/index.rst @@ -4,5 +4,8 @@ General .. toctree:: :maxdepth: 2 - migration-guides - util + jmm + configuration + event-handler + slf4j + diff --git a/akka-docs/general/jmm.rst b/akka-docs/general/jmm.rst new file mode 100644 index 0000000000..fd65ce3f28 --- /dev/null +++ b/akka-docs/general/jmm.rst @@ -0,0 +1,36 @@ +Akka and the Java Memory Model +================================ + +Prior to Java 5, the Java Memory Model (JMM) was broken. It was possible to get all kinds of strange results like unpredictable merged writes made by concurrent executing threads, unexpected reordering of instructions, and even final fields were not guaranteed to be final. With Java 5 and JSR-133, the Java Memory Model is clearly specified. This specification makes it possible to write code that performs, but doesn't cause concurrency problems. The Java Memory Model is specified in 'happens before'-rules, e.g.: + +* **monitor lock rule**: a release of a lock happens before every subsequent acquire of the same lock. +* **volatile variable rule**: a write of a volatile variable happens before every subsequent read of the same volatile variable + +The 'happens before'-rules clearly specify which visibility guarantees are provided on memory and which re-orderings are allowed. Without these rules it would not be possible to write concurrent and performant code in Java. + +Actors and the Java Memory Model +-------------------------------- + +With the Actors implementation in Akka, there are 2 ways multiple threads can execute actions on shared memory over time: + +* if a message is send to an actor (e.g. by another actor). In most cases messages are immutable, but if that message is not a properly constructed immutable object, without happens before rules, the system still could be subject to instruction re-orderings and visibility problems (so a possible source of concurrency errors). +* if an actor makes changes to its internal state in one 'receive' method and access that state while processing another message. With the actors model you don't get any guarantee that the same thread will be executing the same actor for different messages. Without a happens before relation between these actions, there could be another source of concurrency errors. + +To solve the 2 problems above, Akka adds the following 2 'happens before'-rules to the JMM: + +* **the actor send rule**: where the send of the message to an actor happens before the receive of the **same** actor. +* **the actor subsequent processing rule**: where processing of one message happens before processing of the next message by the **same** actor. + +Both rules only apply for the same actor instance and are not valid if different actors are used. + +STM and the Java Memory Model +----------------------------- + +The Akka STM also provides a happens before rule called: + +* **the transaction rule**: a commit on a transaction happens before every subsequent start of a transaction where there is at least 1 shared reference. + +How these rules are realized in Akka, is an implementation detail and can change over time (the exact details could even depend on the used configuration) but they will lift on the other JMM rules like the monitor lock rule or the volatile variable rule. Essentially this means that you, the Akka user, do not need to worry about adding synchronization to provide such a happens before relation, because it is the responsibility of Akka. So you have your hands free to deal with your problems and not that of the framework. + + + diff --git a/akka-docs/general/migration-guide-1.0.x-1.1.x.rst b/akka-docs/general/migration-guide-1.0.x-1.1.x.rst deleted file mode 100644 index 3fc555abaf..0000000000 --- a/akka-docs/general/migration-guide-1.0.x-1.1.x.rst +++ /dev/null @@ -1,41 +0,0 @@ -Migration Guide 1.0.x to 1.1.x -=================================== - -**Akka has now moved to Scala 2.9.x** - - -Akka HTTP ---------- - -# akka.servlet.Initializer has been moved to ``akka-kernel`` to be able to have ``akka-http`` not depend on ``akka-remote``, if you don't want to use the class for kernel, just create your own version of ``akka.servlet.Initializer``, it's just a couple of lines of code and there is instructions here: `Akka Http Docs `_ -# akka.http.ListWriter has been removed in full, if you use it and want to keep using it, here's the code: `ListWriter `_ -# Jersey-server is now a "provided" dependency for ``akka-http``, so you'll need to add the dependency to your project, it's built against Jersey 1.3 - -Akka Actor ----------- - -# is now dependency free, with the exception of the dependency on the ``scala-library.jar`` -# does not bundle any logging anymore, but you can subscribe to events within Akka by registering an event handler on akka.event.EventHandler or by specifying the ``FQN`` of an Actor in the akka.conf under akka.event-handlers; there is an ``akka-slf4j`` module which still provides the Logging trait and a default ``SLF4J`` logger adapter. -Don't forget to add a SLF4J backend though, we recommend: - -.. code-block:: scala - - lazy val logback = "ch.qos.logback" % "logback-classic" % "0.9.28" % "runtime" - -# If you used HawtDispatcher and want to continue using it, you need to include akka-dispatcher-extras.jar from Akka Modules, in your akka.conf you need to specify: ``akka.dispatch.HawtDispatcherConfigurator`` instead of ``HawtDispatcher`` -# FSM: the onTransition method changed from Function1 to PartialFunction; there is an implicit conversion for the precise types in place, but it may be necessary to add an underscore if you are passing an eta-expansion (using a method as function value). - -Akka Typed Actor ----------------- - -All methods starting with 'get*' are deprecated and will be removed in post 1.1 release. - -Akka Remote ------------ - -# ``UnparsebleException`` has been renamed to ``CannotInstantiateRemoteExceptionDueToRemoteProtocolParsingErrorException(exception, classname, message)`` - -Akka Testkit ------------- - -The TestKit moved into the akka-testkit subproject and correspondingly into the :code:`akka.testkit` package. diff --git a/akka-docs/pending/slf4j.rst b/akka-docs/general/slf4j.rst similarity index 61% rename from akka-docs/pending/slf4j.rst rename to akka-docs/general/slf4j.rst index 780030a543..a49b731771 100644 --- a/akka-docs/pending/slf4j.rst +++ b/akka-docs/general/slf4j.rst @@ -1,7 +1,15 @@ +.. _slf4j: + SLF4J ===== -This module is available in the 'akka-slf4j.jar'. It has one single dependency; the slf4j-api jar. +This module is available in the 'akka-slf4j.jar'. It has one single dependency; the slf4j-api jar. In runtime you +also need a SLF4J backend, we recommend: + + .. code-block:: scala + + lazy val logback = "ch.qos.logback" % "logback-classic" % "0.9.28" % "runtime" + Event Handler ------------- @@ -15,5 +23,5 @@ This module includes a SLF4J Event Handler that works with Akka's standard Event event-handler-level = "DEBUG" } -Read more about how to use the event handler `here `_. +Read more about how to use the :ref:`event-handler`. diff --git a/akka-docs/images/benchmark-akka-sample-trading-throughput.png b/akka-docs/images/benchmark-akka-sample-trading-throughput.png new file mode 100644 index 0000000000..52cc7819da Binary files /dev/null and b/akka-docs/images/benchmark-akka-sample-trading-throughput.png differ diff --git a/akka-docs/intro/build-path.png b/akka-docs/images/build-path.png similarity index 100% rename from akka-docs/intro/build-path.png rename to akka-docs/images/build-path.png diff --git a/akka-docs/images/clojure-trees.png b/akka-docs/images/clojure-trees.png new file mode 100644 index 0000000000..60127d52b2 Binary files /dev/null and b/akka-docs/images/clojure-trees.png differ diff --git a/akka-docs/intro/diagnostics-window.png b/akka-docs/images/diagnostics-window.png similarity index 100% rename from akka-docs/intro/diagnostics-window.png rename to akka-docs/images/diagnostics-window.png diff --git a/akka-docs/intro/example-code.png b/akka-docs/images/example-code.png similarity index 100% rename from akka-docs/intro/example-code.png rename to akka-docs/images/example-code.png diff --git a/akka-docs/intro/import-project.png b/akka-docs/images/import-project.png similarity index 100% rename from akka-docs/intro/import-project.png rename to akka-docs/images/import-project.png diff --git a/akka-docs/intro/install-beta2-updatesite.png b/akka-docs/images/install-beta2-updatesite.png similarity index 100% rename from akka-docs/intro/install-beta2-updatesite.png rename to akka-docs/images/install-beta2-updatesite.png diff --git a/akka-docs/intro/pi-formula.png b/akka-docs/images/pi-formula.png similarity index 100% rename from akka-docs/intro/pi-formula.png rename to akka-docs/images/pi-formula.png diff --git a/akka-docs/intro/quickfix.png b/akka-docs/images/quickfix.png similarity index 100% rename from akka-docs/intro/quickfix.png rename to akka-docs/images/quickfix.png diff --git a/akka-docs/intro/run-config.png b/akka-docs/images/run-config.png similarity index 100% rename from akka-docs/intro/run-config.png rename to akka-docs/images/run-config.png diff --git a/akka-docs/index.rst b/akka-docs/index.rst index fbb2506fab..738b8e636f 100644 --- a/akka-docs/index.rst +++ b/akka-docs/index.rst @@ -6,10 +6,29 @@ Contents intro/index general/index + common/index scala/index + java/index dev/index + project/index + additional/index Links ===== -* `Support `_ +* :ref:`migration` + +* `Downloads `_ + +* `Source Code `_ + +* :ref:`scaladoc` + +* :ref:`other-doc` + +* `Akka Modules Documentation `_ + +* :ref:`issue_tracking` + +* :ref:`support` + diff --git a/akka-docs/intro/building-akka.rst b/akka-docs/intro/building-akka.rst deleted file mode 100644 index 2f2a745eeb..0000000000 --- a/akka-docs/intro/building-akka.rst +++ /dev/null @@ -1,340 +0,0 @@ -Building Akka -============= - -This page describes how to build and run Akka from the latest source code. - -.. contents:: :local: - - -Get the source code -------------------- - -Akka uses `Git `_ and is hosted at `Github -`_. - -You first need Git installed on your machine. You can then clone the source -repositories: - -- Akka repository from ``_ -- Akka Modules repository from ``_ - -For example:: - - git clone git://github.com/jboner/akka.git - git clone git://github.com/jboner/akka-modules.git - -If you have already cloned the repositories previously then you can update the -code with ``git pull``:: - - git pull origin master - - -SBT - Simple Build Tool ------------------------ - -Akka is using the excellent `SBT `_ -build system. So the first thing you have to do is to download and install -SBT. You can read more about how to do that `here -`_ . - -The SBT commands that you'll need to build Akka are all included below. If you -want to find out more about SBT and using it for your own projects do read the -`SBT documentation -`_. - -The Akka SBT build file is ``project/build/AkkaProject.scala`` with some -properties defined in ``project/build.properties``. - - -Building Akka -------------- - -First make sure that you are in the akka code directory:: - - cd akka - - -Fetching dependencies -^^^^^^^^^^^^^^^^^^^^^ - -SBT does not fetch dependencies automatically. You need to manually do this with -the ``update`` command:: - - sbt update - -Once finished, all the dependencies for Akka will be in the ``lib_managed`` -directory under each module: akka-actor, akka-stm, and so on. - -*Note: you only need to run update the first time you are building the code, -or when the dependencies have changed.* - - -Building -^^^^^^^^ - -To compile all the Akka core modules use the ``compile`` command:: - - sbt compile - -You can run all tests with the ``test`` command:: - - sbt test - -If compiling and testing are successful then you have everything working for the -latest Akka development version. - - -Publish to local Ivy repository -^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ - -If you want to deploy the artifacts to your local Ivy repository (for example, -to use from an SBT project) use the ``publish-local`` command:: - - sbt publish-local - - -Publish to local Maven repository -^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ - -If you want to deploy the artifacts to your local Maven repository use:: - - sbt publish-local publish - - -SBT interactive mode -^^^^^^^^^^^^^^^^^^^^ - -Note that in the examples above we are calling ``sbt compile`` and ``sbt test`` -and so on. SBT also has an interactive mode. If you just run ``sbt`` you enter -the interactive SBT prompt and can enter the commands directly. This saves -starting up a new JVM instance for each command and can be much faster and more -convenient. - -For example, building Akka as above is more commonly done like this: - -.. code-block:: none - - % sbt - [info] Building project akka 1.1-SNAPSHOT against Scala 2.9.0.RC1 - [info] using AkkaParentProject with sbt 0.7.6.RC0 and Scala 2.7.7 - > update - [info] - [info] == akka-actor / update == - ... - [success] Successful. - [info] - [info] Total time ... - > compile - ... - > test - ... - - -SBT batch mode -^^^^^^^^^^^^^^ - -It's also possible to combine commands in a single call. For example, updating, -testing, and publishing Akka to the local Ivy repository can be done with:: - - sbt update test publish-local - - -Building Akka Modules ---------------------- - -To build Akka Modules first build and publish Akka to your local Ivy repository -as described above. Or using:: - - cd akka - sbt update publish-local - -Then you can build Akka Modules using the same steps as building Akka. First -update to get all dependencies (including the Akka core modules), then compile, -test, or publish-local as needed. For example:: - - cd akka-modules - sbt update publish-local - - -Microkernel distribution -^^^^^^^^^^^^^^^^^^^^^^^^ - -To build the Akka Modules microkernel (the same as the Akka Modules distribution -download) use the ``dist`` command:: - - sbt dist - -The distribution zip can be found in the dist directory and is called -``akka-modules-{version}.zip``. - -To run the microkernel, unzip the zip file, change into the unzipped directory, -set the ``AKKA_HOME`` environment variable, and run the main jar file. For -example: - -.. code-block:: none - - unzip dist/akka-modules-1.1-SNAPSHOT.zip - cd akka-modules-1.1-SNAPSHOT - export AKKA_HOME=`pwd` - java -jar akka-modules-1.1-SNAPSHOT.jar - -The microkernel will boot up and install the sample applications that reside in -the distribution's ``deploy`` directory. You can deploy your own applications -into the ``deploy`` directory as well. - - -Scripts -------- - -Linux/Unix init script -^^^^^^^^^^^^^^^^^^^^^^ - -Here is a Linux/Unix init script that can be very useful: - -http://github.com/jboner/akka/blob/master/scripts/akka-init-script.sh - -Copy and modify as needed. - - -Simple startup shell script -^^^^^^^^^^^^^^^^^^^^^^^^^^^ - -This little script might help a bit. Just make sure you have the Akka -distribution in the '$AKKA_HOME/dist' directory and then invoke this script to -start up the kernel. The distribution is created in the './dist' dir for you if -you invoke 'sbt dist'. - -http://github.com/jboner/akka/blob/master/scripts/run_akka.sh - -Copy and modify as needed. - - -Dependencies ------------- - -If you are managing dependencies by hand you can find out what all the compile -dependencies are for each module by looking in the ``lib_managed/compile`` -directories. For example, you can run this to create a listing of dependencies -(providing you have the source code and have run ``sbt update``):: - - cd akka - ls -1 */lib_managed/compile - - -Dependencies used by the Akka core modules ------------------------------------------- - -akka-actor -^^^^^^^^^^ - -* No dependencies - -akka-stm -^^^^^^^^ - -* Depends on akka-actor -* multiverse-alpha-0.6.2.jar - -akka-typed-actor -^^^^^^^^^^^^^^^^ - -* Depends on akka-stm -* aopalliance-1.0.jar -* aspectwerkz-2.2.3.jar -* guice-all-2.0.jar - -akka-remote -^^^^^^^^^^^ - -* Depends on akka-typed-actor -* commons-codec-1.4.jar -* commons-io-2.0.1.jar -* dispatch-json_2.8.1-0.7.8.jar -* guice-all-2.0.jar -* h2-lzf-1.0.jar -* jackson-core-asl-1.7.1.jar -* jackson-mapper-asl-1.7.1.jar -* junit-4.8.1.jar -* netty-3.2.3.Final.jar -* objenesis-1.2.jar -* protobuf-java-2.3.0.jar -* sjson_2.8.1-0.9.1.jar - -akka-http -^^^^^^^^^ - -* Depends on akka-remote -* jsr250-api-1.0.jar -* jsr311-api-1.1.jar - - -Dependencies used by the Akka modules -------------------------------------- - -akka-amqp -^^^^^^^^^ - -* Depends on akka-remote -* commons-cli-1.1.jar -* amqp-client-1.8.1.jar - -akka-camel -^^^^^^^^^^ - -* Depends on akka-actor -* camel-core-2.7.0.jar -* commons-logging-api-1.1.jar -* commons-management-1.0.jar - -akka-camel-typed -^^^^^^^^^^^^^^^^ - -* Depends on akka-typed-actor -* camel-core-2.7.0.jar -* commons-logging-api-1.1.jar -* commons-management-1.0.jar - -akka-spring -^^^^^^^^^^^ - -* Depends on akka-camel -* akka-camel-typed -* commons-logging-1.1.1.jar -* spring-aop-3.0.4.RELEASE.jar -* spring-asm-3.0.4.RELEASE.jar -* spring-beans-3.0.4.RELEASE.jar -* spring-context-3.0.4.RELEASE.jar -* spring-core-3.0.4.RELEASE.jar -* spring-expression-3.0.4.RELEASE.jar - -akka-scalaz -^^^^^^^^^^^ - -* Depends on akka-actor -* hawtdispatch-1.1.jar -* hawtdispatch-scala-1.1.jar -* scalaz-core_2.8.1-6.0-SNAPSHOT.jar - -akka-kernel -^^^^^^^^^^^ - -* Depends on akka-http, akka-amqp, and akka-spring -* activation-1.1.jar -* asm-3.1.jar -* jaxb-api-2.1.jar -* jaxb-impl-2.1.12.jar -* jersey-core-1.3.jar -* jersey-json-1.3.jar -* jersey-scala-1.3.jar -* jersey-server-1.3.jar -* jettison-1.1.jar -* jetty-continuation-7.1.6.v20100715.jar -* jetty-http-7.1.6.v20100715.jar -* jetty-io-7.1.6.v20100715.jar -* jetty-security-7.1.6.v20100715.jar -* jetty-server-7.1.6.v20100715.jar -* jetty-servlet-7.1.6.v20100715.jar -* jetty-util-7.1.6.v20100715.jar -* jetty-xml-7.1.6.v20100715.jar -* servlet-api-2.5.jar -* stax-api-1.0.1.jar diff --git a/akka-docs/intro/configuration.rst b/akka-docs/intro/configuration.rst deleted file mode 100644 index fd19b71db4..0000000000 --- a/akka-docs/intro/configuration.rst +++ /dev/null @@ -1,31 +0,0 @@ -Configuration -============= - -Specifying the configuration file ---------------------------------- - -If you don't specify a configuration file then Akka uses default values. If -you want to override these then you should edit the ``akka.conf`` file in the -``AKKA_HOME/config`` directory. This config inherits from the -``akka-reference.conf`` file that you see below. Use your ``akka.conf`` to override -any property in the reference config. - -The config can be specified in various ways: - -* Define the ``-Dakka.config=...`` system property option - -* Put an ``akka.conf`` file on the classpath - -* Define the ``AKKA_HOME`` environment variable pointing to the root of the Akka - distribution. The config is taken from the ``AKKA_HOME/config`` directory. You - can also point to the AKKA_HOME by specifying the ``-Dakka.home=...`` system - property option. - - -Defining the configuration file -------------------------------- - -Here is the reference configuration file: - -.. literalinclude:: ../../config/akka-reference.conf - :language: none diff --git a/akka-docs/intro/deployment-scenarios.rst b/akka-docs/intro/deployment-scenarios.rst new file mode 100644 index 0000000000..a8343d5b33 --- /dev/null +++ b/akka-docs/intro/deployment-scenarios.rst @@ -0,0 +1,74 @@ + +.. _deployment-scenarios: + +################################### + Use-case and Deployment Scenarios +################################### + +How can I use and deploy Akka? +============================== + +Akka can be used in two different ways: + +- As a library: used as a regular JAR on the classpath and/or in a web app, to + be put into ``WEB-INF/lib`` + +- As a microkernel: stand-alone microkernel, embedding a servlet container along + with many other services + + +Using Akka as library +--------------------- + +This is most likely what you want if you are building Web applications. There +are several ways you can use Akka in Library mode by adding more and more +modules to the stack. + +Actors as services +^^^^^^^^^^^^^^^^^^ + +The simplest way you can use Akka is to use the actors as services in your Web +application. All that’s needed to do that is to put the Akka charts as well as +its dependency jars into ``WEB-INF/lib``. You also need to put the ``akka.conf`` +config file in the ``$AKKA_HOME/config`` directory. Now you can create your +Actors as regular services referenced from your Web application. You should also +be able to use the Remoting service, e.g. be able to make certain Actors remote +on other hosts. Please note that remoting service does not speak HTTP over port +80, but a custom protocol over the port is specified in ``akka.conf``. + + +Using Akka as a stand alone microkernel +--------------------------------------- + +Akka can also be run as a stand-alone microkernel. It implements a full +enterprise stack. See the :ref:`add-on-modules` for more information. + +Using the Akka sbt plugin to package your application +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +The Akka sbt plugin can create a full Akka microkernel deployment for your sbt +project. + +To use the plugin, first add a plugin definition to your SBT project by creating +``project/plugins/Plugins.scala`` with:: + + import sbt._ + + class Plugins(info: ProjectInfo) extends PluginDefinition(info) { + val akkaRepo = "Akka Repo" at "http://akka.io/repository" + val akkaPlugin = "se.scalablesolutions.akka" % "akka-sbt-plugin" % "1.1" + } + +Then mix the ``AkkaKernelProject`` trait into your project definition. For +example:: + + class MyProject(info: ProjectInfo) extends DefaultProject(info) with AkkaKernelProject + +This will automatically add all the Akka dependencies needed for a microkernel +deployment (download them with ``sbt update``). + +Place your config files in ``src/main/config``. + +To build a microkernel deployment use the ``dist`` task:: + + sbt dist diff --git a/akka-docs/intro/getting-started-first-java.rst b/akka-docs/intro/getting-started-first-java.rst index 99db6f8c07..6b4b724216 100644 --- a/akka-docs/intro/getting-started-first-java.rst +++ b/akka-docs/intro/getting-started-first-java.rst @@ -1,10 +1,12 @@ +.. _getting-started-first-java: + Getting Started Tutorial (Java): First Chapter ============================================== Introduction ------------ -Welcome to the first tutorial on how to get started with Akka and Java. We assume that you already know what Akka and Java are and will now focus on the steps necessary to start your first project. +Welcome to the first tutorial on how to get started with `Akka `_ and Java. We assume that you already know what Akka and Java are and will now focus on the steps necessary to start your first project. There are two variations of this first tutorial: @@ -19,14 +21,25 @@ We will be using an algorithm that is called "embarrassingly parallel" which jus Here is the formula for the algorithm we will use: -.. image:: pi-formula.png +.. image:: ../images/pi-formula.png In this particular algorithm the master splits the series into chunks which are sent out to each worker actor to be processed. When each worker has processed its chunk it sends a result back to the master which aggregates the total result. Tutorial source code -------------------- -If you want don't want to type in the code and/or set up a Maven project then you can check out the full tutorial from the Akka GitHub repository. It is in the ``akka-tutorials/akka-tutorial-first`` module. You can also browse it online `here `_, with the actual source code `here `_. +If you want don't want to type in the code and/or set up a Maven project then you can check out the full tutorial from the Akka GitHub repository. It is in the ``akka-tutorials/akka-tutorial-first`` module. You can also browse it online `here`__, with the actual source code `here`__. + +__ https://github.com/jboner/akka/tree/master/akka-tutorials/akka-tutorial-first +__ https://github.com/jboner/akka/blob/master/akka-tutorials/akka-tutorial-first/src/main/java/akka/tutorial/first/java/Pi.java + +To check out the code using Git invoke the following:: + + $ git clone git://github.com/jboner/akka.git + +Then you can navigate down to the tutorial:: + + $ cd akka/akka-tutorials/akka-tutorial-first Prerequisites ------------- @@ -45,39 +58,49 @@ You can test your installation by invoking ``java``:: Java(TM) SE Runtime Environment (build 1.6.0_24-b07-334-10M3326) Java HotSpot(TM) 64-Bit Server VM (build 19.1-b02-334, mixed mode) + Downloading and installing Akka ------------------------------- -To build and run the tutorial sample from the command line, you have to download Akka. If you prefer to use SBT to build and run the sample then you can skip this section and jump to the next one. +To build and run the tutorial sample from the command line, you have to download +Akka. If you prefer to use SBT to build and run the sample then you can skip +this section and jump to the next one. -Let's get the ``akka-1.1`` distribution of Akka core (not Akka Modules) from `http://akka.io/downloads `_. Once you have downloaded the distribution unzip it in the folder you would like to have Akka installed in, in my case I choose to install it in ``/Users/jboner/tools/``, simply by unzipping it to this directory. +Let's get the ``akka-actors-1.1.zip`` distribution of Akka from +http://akka.io/downloads/ which includes everything we need for this +tutorial. Once you have downloaded the distribution unzip it in the folder you +would like to have Akka installed in. In my case I choose to install it in +``/Users/jboner/tools/``, simply by unzipping it to this directory. -You need to do one more thing in order to install Akka properly: set the ``AKKA_HOME`` environment variable to the root of the distribution. In my case I'm opening up a shell, navigating down to the distribution, and setting the ``AKKA_HOME`` variable:: +You need to do one more thing in order to install Akka properly: set the +``AKKA_HOME`` environment variable to the root of the distribution. In my case +I'm opening up a shell, navigating down to the distribution, and setting the +``AKKA_HOME`` variable:: - $ cd /Users/jboner/tools/akka-1.1 + $ cd /Users/jboner/tools/akka-actors-1.1 $ export AKKA_HOME=`pwd` $ echo $AKKA_HOME - /Users/jboner/tools/akka-1.1 + /Users/jboner/tools/akka-actors-1.1 The distribution looks like this:: - $ ls -l - total 16944 - drwxr-xr-x 7 jboner staff 238 Apr 6 11:15 . - drwxr-xr-x 28 jboner staff 952 Apr 6 11:16 .. - drwxr-xr-x 17 jboner staff 578 Apr 6 11:16 deploy - drwxr-xr-x 26 jboner staff 884 Apr 6 11:16 dist - drwxr-xr-x 3 jboner staff 102 Apr 6 11:15 lib_managed - -rwxr-xr-x 1 jboner staff 8674105 Apr 6 11:15 scala-library.jar - drwxr-xr-x 4 jboner staff 136 Apr 6 11:16 scripts + $ ls -1 + config + doc + lib + src -- In the ``dist`` directory we have the Akka JARs, including sources and docs. -- In the ``lib_managed/compile`` directory we have Akka's dependency JARs. -- In the ``deploy`` directory we have the sample JARs. -- In the ``scripts`` directory we have scripts for running Akka. -- Finally ``scala-library.jar`` is the JAR for the latest Scala distribution that Akka depends on. +- In the ``config`` directory we have the Akka conf files. +- In the ``doc`` directory we have the documentation, API, doc JARs, and also + the source files for the tutorials. +- In the ``lib`` directory we have the Scala and Akka JARs. +- In the ``src`` directory we have the source JARs for Akka. -The only JAR we will need for this tutorial (apart from the ``scala-library.jar`` JAR) is the ``akka-actor-1.1.jar`` JAR in the ``dist`` directory. This is a self-contained JAR with zero dependencies and contains everything we need to write a system using Actors. + +The only JAR we will need for this tutorial (apart from the +``scala-library.jar`` JAR) is the ``akka-actor-1.1.jar`` JAR in the ``lib/akka`` +directory. This is a self-contained JAR with zero dependencies and contains +everything we need to write a system using Actors. Akka is very modular and has many JARs for containing different features. The core distribution has seven modules: @@ -89,7 +112,10 @@ Akka is very modular and has many JARs for containing different features. The co - ``akka-slf4j-1.1.jar`` -- SLF4J Event Handler Listener for logging with SLF4J - ``akka-testkit-1.1.jar`` -- Toolkit for testing Actors -We also have Akka Modules containing add-on modules outside the core of Akka. You can download the Akka Modules distribution from TODO. It contains Akka core as well. We will not be needing any modules there today, but for your information the module JARs are these: +We also have Akka Modules containing add-on modules outside the core of +Akka. You can download the Akka Modules distribution from ``_. It contains Akka +core as well. We will not be needing any modules there today, but for your +information the module JARs are these: - ``akka-kernel-1.1.jar`` -- Akka microkernel for running a bare-bones mini application server (embeds Jetty etc.) - ``akka-amqp-1.1.jar`` -- AMQP integration @@ -99,6 +125,7 @@ We also have Akka Modules containing add-on modules outside the core of Akka. Yo - ``akka-spring-1.1.jar`` -- Spring framework integration - ``akka-osgi-dependencies-bundle-1.1.jar`` -- OSGi support + Downloading and installing Maven -------------------------------- @@ -138,7 +165,9 @@ Here is the layout that Maven created:: As you can see we already have a Java source file called ``App.java``, let's now rename it to ``Pi.java``. -We also need to edit the ``pom.xml`` build file. Let's add the dependency we need as well as the Maven repository it should download it from. It should now look something like this:: +We also need to edit the ``pom.xml`` build file. Let's add the dependency we need as well as the Maven repository it should download it from. It should now look something like this: + +.. code-block:: xml Akka Akka Maven2 Repository - http://www.scalablesolutions.se/akka/repository/ + http://akka.io/repository/ @@ -265,7 +294,7 @@ Now we can create the worker actor. This is done by extending in the ``UntypedA Work work = (Work) message; // perform the work - double result = calculatePiFor(work.getStart(), work.getNrOfElements()) + double result = calculatePiFor(work.getStart(), work.getNrOfElements()); // reply with the result getContext().replyUnsafe(new Result(result)); @@ -465,7 +494,7 @@ Now the only thing that is left to implement is the runner that should bootstrap pi.calculate(4, 10000, 10000); } - public void calculate(int nrOfWorkers, int nrOfElements, int nrOfMessages) + public void calculate(final int nrOfWorkers, final int nrOfElements, final int nrOfMessages) throws Exception { // this latch is only plumbing to know when the calculation is completed @@ -661,7 +690,7 @@ Before we package it up and run it, let's take a look at the full code now, with // ================== // ===== Run it ===== // ================== - public void calculate(int nrOfWorkers, int nrOfElements, int nrOfMessages) + public void calculate(final int nrOfWorkers, final int nrOfElements, final int nrOfMessages) throws Exception { // this latch is only plumbing to know when the calculation is completed @@ -682,35 +711,42 @@ Before we package it up and run it, let's take a look at the full code now, with } } + Run it as a command line application ------------------------------------ -To build and run the tutorial from the command line, you need to have the Scala library JAR on the classpath. +If you have not typed in (or copied) the code for the tutorial as +``$AKKA_HOME/tutorial/akka/tutorial/first/java/Pi.java`` then now is the +time. When that's done open up a shell and step in to the Akka distribution +(``cd $AKKA_HOME``). -Scala can be downloaded from `http://www.scala-lang.org/downloads `_. Browse there and download the Scala 2.9.0.RC1 release. If you pick the ``tgz`` or ``zip`` distribution then just unzip it where you want it installed. If you pick the IzPack Installer then double click on it and follow the instructions. +First we need to compile the source file. That is done with Java's compiler +``javac``. Our application depends on the ``akka-actor-1.1.jar`` and the +``scala-library.jar`` JAR files, so let's add them to the compiler classpath +when we compile the source:: -The ``scala-library.jar`` resides in the ``scala-2.9.0.RC1/lib`` directory. Copy that to your project directory. + $ javac -cp lib/scala-library.jar:lib/akka/akka-actor-1.1.jar tutorial/akka/tutorial/first/java/Pi.java -If you have not typed in (or copied) the code for the tutorial as ``$AKKA_HOME/tutorial/akka/tutorial/first/java/Pi.java`` then now is the time. When that's done open up a shell and step in to the Akka distribution (``cd $AKKA_HOME``). - -First we need to compile the source file. That is done with Java's compiler ``javac``. Our application depends on the ``akka-actor-1.1.jar`` and the ``scala-library.jar`` JAR files, so let's add them to the compiler classpath when we compile the source:: - - $ javac -cp dist/akka-actor-1.1.jar:scala-library.jar tutorial/Pi.scala - -When we have compiled the source file we are ready to run the application. This is done with ``java`` but yet again we need to add the ``akka-actor-1.1.jar`` and the ``scala-library.jar`` JAR files to the classpath as well as the classes we compiled ourselves:: +When we have compiled the source file we are ready to run the application. This +is done with ``java`` but yet again we need to add the ``akka-actor-1.1.jar`` +and the ``scala-library.jar`` JAR files to the classpath as well as the classes +we compiled ourselves:: $ java \ - -cp dist/akka-actor-1.1.jar:scala-library.jar:tutorial \ + -cp lib/scala-library.jar:lib/akka/akka-actor-1.1.jar:tutorial \ akka.tutorial.java.first.Pi - AKKA_HOME is defined as [/Users/jboner/src/akka-stuff/akka-core] - loading config from [/Users/jboner/src/akka-stuff/akka-core/config/akka.conf]. + AKKA_HOME is defined as [/Users/jboner/tools/akka-actors-1.1] + loading config from [/Users/jboner/tools/akka-actors-1.1/config/akka.conf]. Pi estimate: 3.1435501812459323 Calculation time: 822 millis Yippee! It is working. -If you have not defined the ``AKKA_HOME`` environment variable then Akka can't find the ``akka.conf`` configuration file and will print out a ``Can’t load akka.conf`` warning. This is ok since it will then just use the defaults. +If you have not defined the ``AKKA_HOME`` environment variable then Akka can't +find the ``akka.conf`` configuration file and will print out a ``Can’t load +akka.conf`` warning. This is ok since it will then just use the defaults. + Run it inside Maven ------------------- @@ -719,7 +755,7 @@ If you used Maven, then you can run the application directly inside Maven. First $ mvn compile -When this in done we can run our application directly inside SBT:: +When this in done we can run our application directly inside Maven:: $ mvn exec:java -Dexec.mainClass="akka.tutorial.first.java.Pi" ... diff --git a/akka-docs/intro/getting-started-first-scala-eclipse.rst b/akka-docs/intro/getting-started-first-scala-eclipse.rst index ebd0064620..bfe533f148 100644 --- a/akka-docs/intro/getting-started-first-scala-eclipse.rst +++ b/akka-docs/intro/getting-started-first-scala-eclipse.rst @@ -1,10 +1,12 @@ +.. _getting-started-first-scala-eclipse: + Getting Started Tutorial (Scala with Eclipse): First Chapter ============================================================ Introduction ------------ -Welcome to the first tutorial on how to get started with Akka and Scala. We assume that you already know what Akka and Scala are and will now focus on the steps necessary to start your first project. We will be using `Eclipse `_, and the `Scala plugin for Eclipse `_. +Welcome to the first tutorial on how to get started with `Akka `_ and `Scala `_. We assume that you already know what Akka and Scala are and will now focus on the steps necessary to start your first project. We will be using `Eclipse `_, and the `Scala plugin for Eclipse `_. The sample application that we will create is using actors to calculate the value of Pi. Calculating Pi is a CPU intensive operation and we will utilize Akka Actors to write a concurrent solution that scales out to multi-core processors. This sample will be extended in future tutorials to use Akka Remote Actors to scale out on multiple machines in a cluster. @@ -12,14 +14,17 @@ We will be using an algorithm that is called "embarrassingly parallel" which jus Here is the formula for the algorithm we will use: -.. image:: pi-formula.png +.. image:: ../images/pi-formula.png In this particular algorithm the master splits the series into chunks which are sent out to each worker actor to be processed. When each worker has processed its chunk it sends a result back to the master which aggregates the total result. Tutorial source code -------------------- -If you want don't want to type in the code and/or set up an SBT project then you can check out the full tutorial from the Akka GitHub repository. It is in the ``akka-tutorials/akka-tutorial-first`` module. You can also browse it online `here `_, with the actual source code `here `_. +If you want don't want to type in the code and/or set up an SBT project then you can check out the full tutorial from the Akka GitHub repository. It is in the ``akka-tutorials/akka-tutorial-first`` module. You can also browse it online `here`__, with the actual source code `here`__. + +__ https://github.com/jboner/akka/tree/master/akka-tutorials/akka-tutorial-first +__ https://github.com/jboner/akka/blob/master/akka-tutorials/akka-tutorial-first/src/main/scala/Pi.scala Prerequisites ------------- @@ -38,39 +43,49 @@ You can test your installation by invoking ``java``:: Java(TM) SE Runtime Environment (build 1.6.0_24-b07-334-10M3326) Java HotSpot(TM) 64-Bit Server VM (build 19.1-b02-334, mixed mode) + Downloading and installing Akka ------------------------------- -To build and run the tutorial sample from the command line, you have to download Akka. If you prefer to use SBT to build and run the sample then you can skip this section and jump to the next one. +To build and run the tutorial sample from the command line, you have to download +Akka. If you prefer to use SBT to build and run the sample then you can skip +this section and jump to the next one. -Let's get the ``akka-1.1`` distribution of Akka core (not Akka Modules) from `http://akka.io/downloads `_. Once you have downloaded the distribution unzip it in the folder you would like to have Akka installed in, in my case I choose to install it in ``/Users/jboner/tools/``, simply by unzipping it to this directory. +Let's get the ``akka-actors-1.1.zip`` distribution of Akka from +http://akka.io/downloads/ which includes everything we need for this +tutorial. Once you have downloaded the distribution unzip it in the folder you +would like to have Akka installed in. In my case I choose to install it in +``/Users/jboner/tools/``, simply by unzipping it to this directory. -You need to do one more thing in order to install Akka properly: set the ``AKKA_HOME`` environment variable to the root of the distribution. In my case I'm opening up a shell, navigating down to the distribution, and setting the ``AKKA_HOME`` variable:: +You need to do one more thing in order to install Akka properly: set the +``AKKA_HOME`` environment variable to the root of the distribution. In my case +I'm opening up a shell, navigating down to the distribution, and setting the +``AKKA_HOME`` variable:: - $ cd /Users/jboner/tools/akka-1.1 + $ cd /Users/jboner/tools/akka-actors-1.1 $ export AKKA_HOME=`pwd` $ echo $AKKA_HOME - /Users/jboner/tools/akka-1.1 + /Users/jboner/tools/akka-actors-1.1 The distribution looks like this:: - $ ls -l - total 16944 - drwxr-xr-x 7 jboner staff 238 Apr 6 11:15 . - drwxr-xr-x 28 jboner staff 952 Apr 6 11:16 .. - drwxr-xr-x 17 jboner staff 578 Apr 6 11:16 deploy - drwxr-xr-x 26 jboner staff 884 Apr 6 11:16 dist - drwxr-xr-x 3 jboner staff 102 Apr 6 11:15 lib_managed - -rwxr-xr-x 1 jboner staff 8674105 Apr 6 11:15 scala-library.jar - drwxr-xr-x 4 jboner staff 136 Apr 6 11:16 scripts + $ ls -1 + config + doc + lib + src -- In the ``dist`` directory we have the Akka JARs, including sources and docs. -- In the ``lib_managed/compile`` directory we have Akka's dependency JARs. -- In the ``deploy`` directory we have the sample JARs. -- In the ``scripts`` directory we have scripts for running Akka. -- Finally ``scala-library.jar`` is the JAR for the latest Scala distribution that Akka depends on. +- In the ``config`` directory we have the Akka conf files. +- In the ``doc`` directory we have the documentation, API, doc JARs, and also + the source files for the tutorials. +- In the ``lib`` directory we have the Scala and Akka JARs. +- In the ``src`` directory we have the source JARs for Akka. -The only JAR we will need for this tutorial (apart from the ``scala-library.jar`` JAR) is the ``akka-actor-1.1.jar`` JAR in the ``dist`` directory. This is a self-contained JAR with zero dependencies and contains everything we need to write a system using Actors. + +The only JAR we will need for this tutorial (apart from the +``scala-library.jar`` JAR) is the ``akka-actor-1.1.jar`` JAR in the ``lib/akka`` +directory. This is a self-contained JAR with zero dependencies and contains +everything we need to write a system using Actors. Akka is very modular and has many JARs for containing different features. The core distribution has seven modules: @@ -79,10 +94,13 @@ Akka is very modular and has many JARs for containing different features. The co - ``akka-remote-1.1.jar`` -- Remote Actors - ``akka-stm-1.1.jar`` -- STM (Software Transactional Memory), transactors and transactional datastructures - ``akka-http-1.1.jar`` -- Akka Mist for continuation-based asynchronous HTTP and also Jersey integration -- ``akka-slf4j-1.1.jar`` -- SLF4J Event Handler Listener +- ``akka-slf4j-1.1.jar`` -- SLF4J Event Handler Listener for logging with SLF4J - ``akka-testkit-1.1.jar`` -- Toolkit for testing Actors -We also have Akka Modules containing add-on modules outside the core of Akka. You can download the Akka Modules distribution from TODO. It contains Akka core as well. We will not be needing any modules there today, but for your information the module JARs are these: +We also have Akka Modules containing add-on modules outside the core of +Akka. You can download the Akka Modules distribution from ``_. It contains Akka +core as well. We will not be needing any modules there today, but for your +information the module JARs are these: - ``akka-kernel-1.1.jar`` -- Akka microkernel for running a bare-bones mini application server (embeds Jetty etc.) - ``akka-amqp-1.1.jar`` -- AMQP integration @@ -92,6 +110,7 @@ We also have Akka Modules containing add-on modules outside the core of Akka. Yo - ``akka-spring-1.1.jar`` -- Spring framework integration - ``akka-osgi-dependencies-bundle-1.1.jar`` -- OSGi support + Downloading and installing the Scala IDE for Eclipse ---------------------------------------------------- @@ -99,19 +118,19 @@ If you want to use Eclipse for coding your Akka tutorial, you need to install th You can install this plugin using the regular update mechanism. First choose a version of the IDE from `http://download.scala-ide.org `_. We recommend you choose 2.0.x, which comes with Scala 2.9. Copy the corresponding URL and then choose ``Help/Install New Software`` and paste the URL you just copied. You should see something similar to the following image. -.. image:: install-beta2-updatesite.png +.. image:: ../images/install-beta2-updatesite.png Make sure you select both the ``JDT Weaving for Scala`` and the ``Scala IDE for Eclipse`` plugins. The other plugin is optional, and contains the source code of the plugin itself. Once the installation is finished, you need to restart Eclipse. The first time the plugin starts it will open a diagnostics window and offer to fix several settings, such as the delay for content assist (code-completion) or the shown completion proposal types. -.. image:: diagnostics-window.png +.. image:: ../images/diagnostics-window.png Accept the recommended settings, and follow the instructions if you need to increase the heap size of Eclipse. Check that the installation succeeded by creating a new Scala project (``File/New>Scala Project``), and typing some code. You should have content-assist, hyperlinking to definitions, instant error reporting, and so on. -.. image:: example-code.png +.. image:: ../images/example-code.png You are ready to code now! @@ -120,16 +139,16 @@ Downloading and installing Scala To build and run the tutorial sample from the command line, you have to install the Scala distribution. If you prefer to use Eclipse to build and run the sample then you can skip this section and jump to the next one. -Scala can be downloaded from `http://www.scala-lang.org/downloads `_. Browse there and download the Scala 2.9.0.RC1 release. If you pick the ``tgz`` or ``zip`` distribution then just unzip it where you want it installed. If you pick the IzPack Installer then double click on it and follow the instructions. +Scala can be downloaded from `http://www.scala-lang.org/downloads `_. Browse there and download the Scala 2.9.0 release. If you pick the ``tgz`` or ``zip`` distribution then just unzip it where you want it installed. If you pick the IzPack Installer then double click on it and follow the instructions. -You also need to make sure that the ``scala-2.9.0.RC1/bin`` (if that is the directory where you installed Scala) is on your ``PATH``:: +You also need to make sure that the ``scala-2.9.0/bin`` (if that is the directory where you installed Scala) is on your ``PATH``:: - $ export PATH=$PATH:scala-2.9.0.RC1/bin + $ export PATH=$PATH:scala-2.9.0/bin You can test your installation by invoking scala:: $ scala -version - Scala code runner version 2.9.0.RC1 -- Copyright 2002-2011, LAMP/EPFL + Scala code runner version 2.9.0.final -- Copyright 2002-2011, LAMP/EPFL Looks like we are all good. Finally let's create a source file ``Pi.scala`` for the tutorial and put it in the root of the Akka distribution in the ``tutorial`` directory (you have to create it first). @@ -140,12 +159,12 @@ Creating an Akka project in Eclipse If you have not already done so, now is the time to create an Eclipse project for our tutorial. Use the ``New Scala Project`` wizard and accept the default settings. Once the project is open, we need to add the akka libraries to the *build path*. Right click on the project and choose ``Properties``, then click on ``Java Build Path``. Go to ``Libraries`` and click on ``Add External Jars..``, then navigate to the location where you installed akka and choose ``akka-actor.jar``. You should see something similar to this: -.. image:: build-path.png +.. image:: ../images/build-path.png Using SBT in Eclipse ^^^^^^^^^^^^^^^^^^^^ -If you are an `SBT `_ user, you can follow the :doc:`Akka Tutorial in Scala ` and additionally install the ``sbt-eclipse`` plugin. This adds support for generating Eclipse project files from your SBT project. You need to update your SBT plugins definition in ``project/plugins``:: +If you are an `SBT `_ user, you can follow the :ref:`getting-started-first-scala-download-sbt` instruction and additionally install the ``sbt-eclipse`` plugin. This adds support for generating Eclipse project files from your SBT project. You need to update your SBT plugins definition in ``project/plugins``:: import sbt._ @@ -171,7 +190,7 @@ and then update your SBT project definition by mixing in ``Eclipsify`` in your p Then run the ``eclipse`` target to generate the Eclipse project:: dragos@dragos-imac pi $ sbt eclipse - [info] Building project AkkaPi 1.0 against Scala 2.9.0.RC1 + [info] Building project AkkaPi 1.0 against Scala 2.9.0 [info] using MySbtProject with sbt 0.7.4 and Scala 2.7.7 [info] [info] == eclipse == @@ -186,7 +205,7 @@ Then run the ``eclipse`` target to generate the Eclipse project:: Next you need to import this project in Eclipse, by choosing ``Eclipse/Import.. Existing Projects into Workspace``. Navigate to the directory where you defined your SBT project and choose import: -.. image:: import-project.png +.. image:: ../images/import-project.png Now we have the basis for an Akka Eclipse application, so we can.. @@ -234,7 +253,7 @@ Now we can create the worker actor. Create a new class called ``Worker`` as bef The ``Actor`` trait is defined in ``akka.actor`` and you can either import it explicitly, or let Eclipse do it for you when it cannot resolve the ``Actor`` trait. The quick fix option (``Ctrl-F1``) will offer two options: -.. image:: quickfix.png +.. image:: ../images/quickfix.png Choose the Akka Actor and move on. @@ -307,11 +326,11 @@ Here is the master actor:: def receive = { ... } - override def preStart { + override def preStart() { start = System.currentTimeMillis } - override def postStop { + override def postStop() { // tell the world that the calculation is complete println( "\n\tPi estimate: \t\t%s\n\tCalculation time: \t%s millis" @@ -393,8 +412,8 @@ Run it from Eclipse Eclipse builds your project on every save when ``Project/Build Automatically`` is set. If not, bring you project up to date by clicking ``Project/Build Project``. If there are no compilation errors, you can right-click in the editor where ``Pi`` is defined, and choose ``Run as.. /Scala application``. If everything works fine, you should see:: - AKKA_HOME is defined as [/Users/jboner/tools/akka-modules-1.1-M1/] - loading config from [/Users/jboner/tools/akka-modules-1.1-M1/config/akka.conf]. + AKKA_HOME is defined as [/Users/jboner/tools/akka-actors-1.1] + loading config from [/Users/jboner/tools/akka-actors-1.1/config/akka.conf]. Pi estimate: 3.1435501812459323 Calculation time: 858 millis @@ -403,7 +422,7 @@ If you have not defined an the ``AKKA_HOME`` environment variable then Akka can' You can also define a new Run configuration, by going to ``Run/Run Configurations``. Create a new ``Scala application`` and choose the tutorial project and the main class to be ``akkatutorial.Pi``. You can pass additional command line arguments to the JVM on the ``Arguments`` page, for instance to define where ``akka.conf`` is: -.. image:: run-config.png +.. image:: ../images/run-config.png Once you finished your run configuration, click ``Run``. You should see the same output in the ``Console`` window. You can use the same configuration for debugging the application, by choosing ``Run/Debug History`` or just ``Debug As``. diff --git a/akka-docs/intro/getting-started-first-scala.rst b/akka-docs/intro/getting-started-first-scala.rst index c6ea2f4fe1..f72d5812e9 100644 --- a/akka-docs/intro/getting-started-first-scala.rst +++ b/akka-docs/intro/getting-started-first-scala.rst @@ -1,3 +1,5 @@ +.. _getting-started-first-scala: + Getting Started Tutorial (Scala): First Chapter =============================================== @@ -19,19 +21,30 @@ We will be using an algorithm that is called "embarrassingly parallel" which jus Here is the formula for the algorithm we will use: -.. image:: pi-formula.png +.. image:: ../images/pi-formula.png In this particular algorithm the master splits the series into chunks which are sent out to each worker actor to be processed. When each worker has processed its chunk it sends a result back to the master which aggregates the total result. Tutorial source code -------------------- -If you want don't want to type in the code and/or set up an SBT project then you can check out the full tutorial from the Akka GitHub repository. It is in the ``akka-tutorials/akka-tutorial-first`` module. You can also browse it online `here `_, with the actual source code `here `_. +If you want don't want to type in the code and/or set up an SBT project then you can check out the full tutorial from the Akka GitHub repository. It is in the ``akka-tutorials/akka-tutorial-first`` module. You can also browse it online `here`__, with the actual source code `here`__. + +__ https://github.com/jboner/akka/tree/master/akka-tutorials/akka-tutorial-first +__ https://github.com/jboner/akka/blob/master/akka-tutorials/akka-tutorial-first/src/main/scala/Pi.scala + +To check out the code using Git invoke the following:: + + $ git clone git://github.com/jboner/akka.git + +Then you can navigate down to the tutorial:: + + $ cd akka/akka-tutorials/akka-tutorial-first Prerequisites ------------- -This tutorial assumes that you have Jave 1.6 or later installed on you machine and ``java`` on your ``PATH``. You also need to know how to run commands in a shell (ZSH, Bash, DOS etc.) and a decent text editor or IDE to type in the Scala code. +This tutorial assumes that you have Java 1.6 or later installed on you machine and ``java`` on your ``PATH``. You also need to know how to run commands in a shell (ZSH, Bash, DOS etc.) and a decent text editor or IDE to type in the Scala code. You need to make sure that ``$JAVA_HOME`` environment variable is set to the root of the Java distribution. You also need to make sure that the ``$JAVA_HOME/bin`` is on your ``PATH``:: @@ -45,39 +58,49 @@ You can test your installation by invoking ``java``:: Java(TM) SE Runtime Environment (build 1.6.0_24-b07-334-10M3326) Java HotSpot(TM) 64-Bit Server VM (build 19.1-b02-334, mixed mode) + Downloading and installing Akka ------------------------------- -To build and run the tutorial sample from the command line, you have to download Akka. If you prefer to use SBT to build and run the sample then you can skip this section and jump to the next one. +To build and run the tutorial sample from the command line, you have to download +Akka. If you prefer to use SBT to build and run the sample then you can skip +this section and jump to the next one. -Let's get the ``akka-1.1`` distribution of Akka core (not Akka Modules) from `http://akka.io/downloads `_. Once you have downloaded the distribution unzip it in the folder you would like to have Akka installed in, in my case I choose to install it in ``/Users/jboner/tools/``, simply by unzipping it to this directory. +Let's get the ``akka-actors-1.1.zip`` distribution of Akka from +http://akka.io/downloads/ which includes everything we need for this +tutorial. Once you have downloaded the distribution unzip it in the folder you +would like to have Akka installed in. In my case I choose to install it in +``/Users/jboner/tools/``, simply by unzipping it to this directory. -You need to do one more thing in order to install Akka properly: set the ``AKKA_HOME`` environment variable to the root of the distribution. In my case I'm opening up a shell, navigating down to the distribution, and setting the ``AKKA_HOME`` variable:: +You need to do one more thing in order to install Akka properly: set the +``AKKA_HOME`` environment variable to the root of the distribution. In my case +I'm opening up a shell, navigating down to the distribution, and setting the +``AKKA_HOME`` variable:: - $ cd /Users/jboner/tools/akka-1.1 + $ cd /Users/jboner/tools/akka-actors-1.1 $ export AKKA_HOME=`pwd` $ echo $AKKA_HOME - /Users/jboner/tools/akka-1.1 + /Users/jboner/tools/akka-actors-1.1 The distribution looks like this:: - $ ls -l - total 16944 - drwxr-xr-x 7 jboner staff 238 Apr 6 11:15 . - drwxr-xr-x 28 jboner staff 952 Apr 6 11:16 .. - drwxr-xr-x 17 jboner staff 578 Apr 6 11:16 deploy - drwxr-xr-x 26 jboner staff 884 Apr 6 11:16 dist - drwxr-xr-x 3 jboner staff 102 Apr 6 11:15 lib_managed - -rwxr-xr-x 1 jboner staff 8674105 Apr 6 11:15 scala-library.jar - drwxr-xr-x 4 jboner staff 136 Apr 6 11:16 scripts + $ ls -1 + config + doc + lib + src -- In the ``dist`` directory we have the Akka JARs, including sources and docs. -- In the ``lib_managed/compile`` directory we have Akka's dependency JARs. -- In the ``deploy`` directory we have the sample JARs. -- In the ``scripts`` directory we have scripts for running Akka. -- Finally ``scala-library.jar`` is the JAR for the latest Scala distribution that Akka depends on. +- In the ``config`` directory we have the Akka conf files. +- In the ``doc`` directory we have the documentation, API, doc JARs, and also + the source files for the tutorials. +- In the ``lib`` directory we have the Scala and Akka JARs. +- In the ``src`` directory we have the source JARs for Akka. -The only JAR we will need for this tutorial (apart from the ``scala-library.jar`` JAR) is the ``akka-actor-1.1.jar`` JAR in the ``dist`` directory. This is a self-contained JAR with zero dependencies and contains everything we need to write a system using Actors. + +The only JAR we will need for this tutorial (apart from the +``scala-library.jar`` JAR) is the ``akka-actor-1.1.jar`` JAR in the ``lib/akka`` +directory. This is a self-contained JAR with zero dependencies and contains +everything we need to write a system using Actors. Akka is very modular and has many JARs for containing different features. The core distribution has seven modules: @@ -86,10 +109,13 @@ Akka is very modular and has many JARs for containing different features. The co - ``akka-remote-1.1.jar`` -- Remote Actors - ``akka-stm-1.1.jar`` -- STM (Software Transactional Memory), transactors and transactional datastructures - ``akka-http-1.1.jar`` -- Akka Mist for continuation-based asynchronous HTTP and also Jersey integration -- ``akka-slf4j-1.1.jar`` -- SLF4J Event Handler Listener +- ``akka-slf4j-1.1.jar`` -- SLF4J Event Handler Listener for logging with SLF4J - ``akka-testkit-1.1.jar`` -- Toolkit for testing Actors -We also have Akka Modules containing add-on modules outside the core of Akka. You can download the Akka Modules distribution from TODO. It contains Akka core as well. We will not be needing any modules there today, but for your information the module JARs are these: +We also have Akka Modules containing add-on modules outside the core of +Akka. You can download the Akka Modules distribution from ``_. It contains Akka +core as well. We will not be needing any modules there today, but for your +information the module JARs are these: - ``akka-kernel-1.1.jar`` -- Akka microkernel for running a bare-bones mini application server (embeds Jetty etc.) - ``akka-amqp-1.1.jar`` -- AMQP integration @@ -99,26 +125,29 @@ We also have Akka Modules containing add-on modules outside the core of Akka. Yo - ``akka-spring-1.1.jar`` -- Spring framework integration - ``akka-osgi-dependencies-bundle-1.1.jar`` -- OSGi support + Downloading and installing Scala -------------------------------- To build and run the tutorial sample from the command line, you have to install the Scala distribution. If you prefer to use SBT to build and run the sample then you can skip this section and jump to the next one. -Scala can be downloaded from `http://www.scala-lang.org/downloads `_. Browse there and download the Scala 2.9.0.RC1 release. If you pick the ``tgz`` or ``zip`` distribution then just unzip it where you want it installed. If you pick the IzPack Installer then double click on it and follow the instructions. +Scala can be downloaded from `http://www.scala-lang.org/downloads `_. Browse there and download the Scala 2.9.0 release. If you pick the ``tgz`` or ``zip`` distribution then just unzip it where you want it installed. If you pick the IzPack Installer then double click on it and follow the instructions. -You also need to make sure that the ``scala-2.9.0.RC1/bin`` (if that is the directory where you installed Scala) is on your ``PATH``:: +You also need to make sure that the ``scala-2.9.0/bin`` (if that is the directory where you installed Scala) is on your ``PATH``:: - $ export PATH=$PATH:scala-2.9.0.RC1/bin + $ export PATH=$PATH:scala-2.9.0/bin You can test your installation by invoking scala:: $ scala -version - Scala code runner version 2.9.0.RC1 -- Copyright 2002-2011, LAMP/EPFL + Scala code runner version 2.9.0.final -- Copyright 2002-2011, LAMP/EPFL Looks like we are all good. Finally let's create a source file ``Pi.scala`` for the tutorial and put it in the root of the Akka distribution in the ``tutorial`` directory (you have to create it first). Some tools require you to set the ``SCALA_HOME`` environment variable to the root of the Scala distribution, however Akka does not require that. +.. _getting-started-first-scala-download-sbt: + Downloading and installing SBT ------------------------------ @@ -140,7 +169,7 @@ If you have not already done so, now is the time to create an SBT project for ou Name: Tutorial 1 Organization: Hakkers Inc Version [1.0]: - Scala version [2.9.0.RC1]: + Scala version [2.9.0]: sbt version [0.7.6.RC0]: Now we have the basis for an SBT project. Akka has an SBT Plugin making it very easy to use Akka is an SBT-based project so let's use that. @@ -158,9 +187,7 @@ Now we need to create a project definition using our Akka SBT plugin. We do that import sbt._ - class TutorialOneProject(info: ProjectInfo) extends DefaultProject(info) with AkkaProject { - val akkaRepo = "Akka Repo" at "http://akka.io/repository" - } + class TutorialOneProject(info: ProjectInfo) extends DefaultProject(info) with AkkaProject The magic is in mixing in the ``AkkaProject`` trait. @@ -173,8 +200,11 @@ Not needed in this tutorial, but if you would like to use additional Akka module So, now we are all set. Just one final thing to do; make SBT download the dependencies it needs. That is done by invoking:: + > reload > update +The first reload command is needed because we have changed the project definition since the sbt session started. + SBT itself needs a whole bunch of dependencies but our project will only need one; ``akka-actor-1.1.jar``. SBT downloads that as well. Start writing the code @@ -291,11 +321,11 @@ Here is the master actor:: def receive = { ... } - override def preStart { + override def preStart() { start = System.currentTimeMillis } - override def postStop { + override def postStop() { // tell the world that the calculation is complete println( "\n\tPi estimate: \t\t%s\n\tCalculation time: \t%s millis" @@ -451,11 +481,11 @@ But before we package it up and run it, let's take a look at the full code now, if (nrOfResults == nrOfMessages) self.stop() } - override def preStart { + override def preStart() { start = System.currentTimeMillis } - override def postStop { + override def postStop() { // tell the world that the calculation is complete println( "\n\tPi estimate: \t\t%s\n\tCalculation time: \t%s millis" @@ -491,15 +521,15 @@ If you have not typed in (or copied) the code for the tutorial as ``$AKKA_HOME/t First we need to compile the source file. That is done with Scala's compiler ``scalac``. Our application depends on the ``akka-actor-1.1.jar`` JAR file, so let's add that to the compiler classpath when we compile the source:: - $ scalac -cp dist/akka-actor-1.1.jar tutorial/Pi.scala + $ scalac -cp lib/akka/akka-actor-1.1.jar tutorial/Pi.scala When we have compiled the source file we are ready to run the application. This is done with ``java`` but yet again we need to add the ``akka-actor-1.1.jar`` JAR file to the classpath, and this time we also need to add the Scala runtime library ``scala-library.jar`` and the classes we compiled ourselves:: $ java \ - -cp dist/akka-actor-1.1.jar:scala-library.jar:tutorial \ + -cp lib/scala-library.jar:lib/akka/akka-actor-1.1.jar:. \ akka.tutorial.first.scala.Pi - AKKA_HOME is defined as [/Users/jboner/src/akka-stuff/akka-core] - loading config from [/Users/jboner/src/akka-stuff/akka-core/config/akka.conf]. + AKKA_HOME is defined as [/Users/jboner/tools/akka-actors-1.1] + loading config from [/Users/jboner/tools/akka-actors-1.1/config/akka.conf]. Pi estimate: 3.1435501812459323 Calculation time: 858 millis diff --git a/akka-docs/intro/getting-started.rst b/akka-docs/intro/getting-started.rst new file mode 100644 index 0000000000..049dc103e2 --- /dev/null +++ b/akka-docs/intro/getting-started.rst @@ -0,0 +1,198 @@ +Getting Started +=============== + +.. sidebar:: Contents + + .. contents:: :local: + +The best way to start learning Akka is to try the Getting Started Tutorial, which comes in several flavours +depending on you development environment preferences: + +- :ref:`getting-started-first-java` for Java development, either + + - as standalone project, running from the command line, + - or as Maven project and running it from within Maven + +- :ref:`getting-started-first-scala` for Scala development, either + + - as standalone project, running from the command line, + - or as SBT (Simple Build Tool) project and running it from within SBT + +- :ref:`getting-started-first-scala-eclipse` for Scala development with Eclipse + +The Getting Started Tutorial describes everything you need to get going, and you don't need to read the rest of +this page if you study the tutorial. For later look back reference this page describes the +essential parts for getting started with different development environments. + +Prerequisites +------------- + +Akka requires that you have `Java 1.6 `_ or +later installed on you machine. + +Download +-------- + +There are several ways to download Akka. You can download the full distribution with microkernel, which includes +all modules. You can download just the core distribution. Or you can use a build tool like Maven or SBT to download +dependencies from the Akka Maven repository. + +Modules +------- + +Akka is split up into two different parts: + +* Akka - The core modules. Reflects all the sections under :ref:`scala-api` and :ref:`java-api`. +* Akka Modules - The microkernel and add-on modules, described in :ref:`add-on-modules`. + +Akka is very modular and has many JARs for containing different features. The core distribution has seven modules: + +- ``akka-actor-1.1.jar`` -- Standard Actors +- ``akka-typed-actor-1.1.jar`` -- Typed Actors +- ``akka-remote-1.1.jar`` -- Remote Actors +- ``akka-stm-1.1.jar`` -- STM (Software Transactional Memory), transactors and transactional datastructures +- ``akka-http-1.1.jar`` -- Akka Mist for continuation-based asynchronous HTTP and also Jersey integration +- ``akka-slf4j-1.1.jar`` -- SLF4J Event Handler Listener +- ``akka-testkit-1.1.jar`` -- Toolkit for testing Actors + +We also have Akka Modules containing add-on modules outside the core of Akka. + +- ``akka-kernel-1.1.jar`` -- Akka microkernel for running a bare-bones mini application server (embeds Jetty etc.) +- ``akka-amqp-1.1.jar`` -- AMQP integration +- ``akka-camel-1.1.jar`` -- Apache Camel Actors integration (it's the best way to have your Akka application communicate with the rest of the world) +- ``akka-camel-typed-1.1.jar`` -- Apache Camel Typed Actors integration +- ``akka-scalaz-1.1.jar`` -- Support for the Scalaz library +- ``akka-spring-1.1.jar`` -- Spring framework integration +- ``akka-osgi-dependencies-bundle-1.1.jar`` -- OSGi support + + +How to see the JARs dependencies of each Akka module is described in the :ref:`dependencies` section. Worth noting +is that ``akka-actor`` has zero external dependencies (apart from the ``scala-library.jar`` JAR). + +Using a release distribution +---------------------------- + +Download the release you need, Akka core or Akka Modules, from ``_ and unzip it. + +Microkernel +^^^^^^^^^^^ + +The Akka Modules distribution includes the microkernel. To run the microkernel: + +* Set the AKKA_HOME environment variable to the root of the Akka distribution. +* To start the kernel use the scripts in the ``bin`` directory and deploy all samples applications from ``./deploy`` dir. + +More information is available in the documentation of the Microkernel in :ref:`add-on-modules`. + +Using a build tool +------------------ + +Akka can be used with build tools that support Maven repositories. The Akka Maven repository can be found at ``_. + +Using Akka with Maven +--------------------- + +Information about how to use Akka with Maven, including how to create an Akka Maven project from scratch, +can be found in the :ref:`getting-started-first-java`. + +Summary of the essential parts for using Akka with Maven: + +1) Add this repository to your ``pom.xml``: + +.. code-block:: xml + + + Akka + Akka Maven2 Repository + http://akka.io/repository/ + + +2) Add the Akka dependencies. For example, here is the dependency for Akka Actor 1.1: + +.. code-block:: xml + + + se.scalablesolutions.akka + akka-actor + 1.1 + + + + +Using Akka with SBT +------------------- + +Information about how to use Akka with SBT, including how to create an Akka SBT project from scratch, +can be found in the :ref:`getting-started-first-scala`. + +Summary of the essential parts for using Akka with SBT: + +1) Akka has an SBT plugin which makes it very easy to get started with Akka and SBT. + +The Scala version in your SBT project needs to match the version that Akka is built against. For Akka 1.1 this is +Scala version 2.9.0. + +To use the plugin, first add a plugin definition to your SBT project by creating project/plugins/Plugins.scala with: + +.. code-block:: scala + + import sbt._ + + class Plugins(info: ProjectInfo) extends PluginDefinition(info) { + val akkaRepo = "Akka Repo" at "http://akka.io/repository" + val akkaPlugin = "se.scalablesolutions.akka" % "akka-sbt-plugin" % "1.1" + } + +*Note: the plugin version matches the Akka version provided. The current release is 1.1.* + +2) Then mix the AkkaProject trait into your project definition. For example: + +.. code-block:: scala + + class MyProject(info: ProjectInfo) extends DefaultProject(info) with AkkaProject + +*Note: This adds akka-actor as a dependency by default.* + +If you also want to include other Akka modules there is a convenience method: ``akkaModule``. For example, you can add extra Akka modules by adding any of the following lines to your project class: + +.. code-block:: scala + + val akkaStm = akkaModule("stm") + val akkaTypedActor = akkaModule("typed-actor") + val akkaRemote = akkaModule("remote") + val akkaHttp = akkaModule("http") + val akkaAmqp = akkaModule("amqp") + val akkaCamel = akkaModule("camel") + val akkaCamelTyped = akkaModule("camel-typed") + val akkaSpring = akkaModule("spring") + + +Using Akka with Eclipse +----------------------- + +Information about how to use Akka with Eclipse, including how to create an Akka Eclipse project from scratch, +can be found in the :ref:`getting-started-first-scala-eclipse`. + +Using Akka with IntelliJ IDEA +----------------------------- + +Setup SBT project and then use `sbt-idea `_ to generate IntelliJ IDEA project. + +Build from sources +------------------ + +Akka uses Git and is hosted at `Github `_. + +* Akka: clone the Akka repository from ``_ +* Akka Modules: clone the Akka Modules repository from ``_ + +Continue reading the page on :ref:`building-akka` + +Need help? +---------- + +If you have questions you can get help on the `Akka Mailing List `_. + +You can also ask for `commercial support `_. + +Thanks for being a part of the Akka community. diff --git a/akka-docs/intro/index.rst b/akka-docs/intro/index.rst index 8df1a87a5d..b04b877827 100644 --- a/akka-docs/intro/index.rst +++ b/akka-docs/intro/index.rst @@ -4,9 +4,12 @@ Introduction .. toctree:: :maxdepth: 2 + what-is-akka why-akka + getting-started getting-started-first-scala getting-started-first-scala-eclipse getting-started-first-java - building-akka - configuration + deployment-scenarios + use-cases + diff --git a/akka-docs/intro/use-cases.rst b/akka-docs/intro/use-cases.rst new file mode 100644 index 0000000000..fd434e89cc --- /dev/null +++ b/akka-docs/intro/use-cases.rst @@ -0,0 +1,48 @@ +Examples of use-cases for Akka +============================== + +There is a great discussion on use-cases for Akka with some good write-ups by production users `here `_ + +Here are some of the areas where Akka is being deployed into production +----------------------------------------------------------------------- + +**Transaction processing (Online Gaming, Finance/Banking, Trading, Statistics, Betting, Social Media, Telecom)** +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + Scale up, scale out, fault-tolerance / HA + +**Service backend (any industry, any app)** +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + Service REST, SOAP, Cometd, WebSockets etc + Act as message hub / integration layer + Scale up, scale out, fault-tolerance / HA + +**Concurrency/parallelism (any app)** +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + Correct + Simple to work with and understand + Just add the jars to your existing JVM project (use Scala, Java, Groovy or JRuby) + +**Simulation** +^^^^^^^^^^^^^^ + Master/Worker, Compute Grid, MapReduce etc. + +**Batch processing (any industry)** +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + Camel integration to hook up with batch data sources + Actors divide and conquer the batch workloads + +**Communications Hub (Telecom, Web media, Mobile media)** +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + Scale up, scale out, fault-tolerance / HA + +**Gaming and Betting (MOM, online gaming, betting)** +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + Scale up, scale out, fault-tolerance / HA + +**Business Intelligence/Data Mining/general purpose crunching** +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + Scale up, scale out, fault-tolerance / HA + +**Complex Event Stream Processing** +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + Scale up, scale out, fault-tolerance / HA diff --git a/akka-docs/intro/what-is-akka.rst b/akka-docs/intro/what-is-akka.rst new file mode 100644 index 0000000000..dbfcf63916 --- /dev/null +++ b/akka-docs/intro/what-is-akka.rst @@ -0,0 +1,62 @@ + +.. _what-is-akka: + +############### + What is Akka? +############### + + +**Simpler Scalability, Fault-Tolerance, Concurrency & Remoting through Actors** + +We believe that writing correct concurrent, fault-tolerant and scalable +applications is too hard. Most of the time it's because we are using the wrong +tools and the wrong level of abstraction. Akka is here to change that. Using the +Actor Model together with Software Transactional Memory we raise the abstraction +level and provide a better platform to build correct concurrent and scalable +applications. For fault-tolerance we adopt the Let it crash/Embrace failure +model which have been used with great success in the telecom industry to build +applications that self-heals, systems that never stop. Actors also provides the +abstraction for transparent distribution and the basis for truly scalable and +fault-tolerant applications. Akka is Open Source and available under the Apache +2 License. + + +Download from http://akka.io/downloads/ + + +Akka implements a unique hybrid +=============================== + +- :ref:`untyped-actors-java`, which gives you: + + - Simple and high-level abstractions for concurrency and parallelism. + - Asynchronous, non-blocking and highly performant event-driven programming model. + - Very lightweight event-driven processes (create ~6.5 million actors on 4GB RAM). + +- :ref:`fault-tolerance-java` through supervisor hierarchies with "let-it-crash" + semantics. Excellent for writing highly fault-tolerant systems that never + stop, systems that self-heal. + +- :ref:`stm-java` (STM). (Distributed transactions coming soon). + +- :ref:`transactors-java`: combine actors and STM into transactional + actors. Allows you to compose atomic message flows with automatic retry and + rollback. + +- :ref:`remote-actors-java`: highly performant distributed actors with remote + supervision and error management. + +- :ref:`java-api` and :ref:`scala-api` + + +Akka can be used in two different ways +====================================== + +- As a library: used by a web app, to be put into ‘WEB-INF/lib’ or as a regular + JAR on your classpath. + +- As a microkernel: stand-alone kernel, embedding a servlet container and all + the other modules. + + +See the :ref:`deployment-scenarios` for details. diff --git a/akka-docs/intro/why-akka.rst b/akka-docs/intro/why-akka.rst index 512a669b2f..08c82b2bc4 100644 --- a/akka-docs/intro/why-akka.rst +++ b/akka-docs/intro/why-akka.rst @@ -53,16 +53,15 @@ And that's all in the ApacheV2-licensed open source project. On top of that we have a commercial product called Cloudy Akka which provides the following features: -#. Dynamically clustered ActorRegistry with both automatic and manual migration - of actors +#. Management through Dashboard, JMX and REST +#. Monitoring through Dashboard, JMX and SNMP +#. Dapper-style tracing of messages across components and remote nodes +#. A configurable alert system +#. Real-time statistics +#. Very low overhead monitoring agents (should always be on in production) +#. Consolidation of statistics and logging information to a single node +#. Data analysis through Hadoop +#. Storage of statistics data for later processing +#. Provisioning and rolling upgrades through a dashboard -#. Cluster membership and cluster event subscriptions - -#. Durable actor mailboxes of different sizes and shapes - file-backed, - Redis-backed, ZooKeeper-backed, Beanstalkd-backed and with AMQP and JMS-based - in the works - -#. Monitoring influenced by Dapper for cross-machine message tracing and - JMX-exposed statistics - -Read more `here `_. +Read more `here `_. diff --git a/akka-docs/pending/actor-registry-java.rst b/akka-docs/java/actor-registry.rst similarity index 72% rename from akka-docs/pending/actor-registry-java.rst rename to akka-docs/java/actor-registry.rst index 67be08b2a8..41d50c05c3 100644 --- a/akka-docs/pending/actor-registry-java.rst +++ b/akka-docs/java/actor-registry.rst @@ -8,12 +8,12 @@ ActorRegistry: Finding Actors Actors can be looked up using the 'akka.actor.Actors.registry()' object. Through this registry you can look up actors by: -* uuid com.eaio.uuid.UUID – this uses the ‘uuid’ field in the Actor class, returns the actor reference for the actor with specified uuid, if one exists, otherwise None -* id string – this uses the ‘id’ field in the Actor class, which can be set by the user (default is the class name), returns all actor references to actors with specified id -* parameterized type - returns a 'ActorRef[]' with all actors that are a subtype of this specific type -* specific actor class - returns a 'ActorRef[]' with all actors of this exact class +* uuid com.eaio.uuid.UUID – this uses the ``uuid`` field in the Actor class, returns the actor reference for the actor with specified uuid, if one exists, otherwise None +* id string – this uses the ``id`` field in the Actor class, which can be set by the user (default is the class name), returns all actor references to actors with specified id +* parameterized type - returns a ``ActorRef[]`` with all actors that are a subtype of this specific type +* specific actor class - returns a ``ActorRef[]`` with all actors of this exact class -Actors are automatically registered in the ActorRegistry when they are started and removed when they are stopped. But you can explicitly register and unregister ActorRef's if you need to using the 'register' and 'unregister' methods. +Actors are automatically registered in the ActorRegistry when they are started and removed when they are stopped. But you can explicitly register and unregister ActorRef's if you need to using the ``register`` and ``unregister`` methods. Here is a summary of the API for finding actors: @@ -31,7 +31,7 @@ You can shut down all Actors in the system by invoking: registry().shutdownAll(); -If you want to know when a new Actor is added or to or removed from the registry, you can use the subscription API. You can register an Actor that should be notified when an event happens in the ActorRegistry: +If you want to know when a new Actor is added to or removed from the registry, you can use the subscription API on the registry. You can register an Actor that should be notified when an event happens in the ActorRegistry: .. code-block:: java diff --git a/akka-docs/pending/dataflow-java.rst b/akka-docs/java/dataflow.rst similarity index 99% rename from akka-docs/pending/dataflow-java.rst rename to akka-docs/java/dataflow.rst index a5f1929431..52437647a5 100644 --- a/akka-docs/pending/dataflow-java.rst +++ b/akka-docs/java/dataflow.rst @@ -1,6 +1,10 @@ Dataflow Concurrency (Java) =========================== +.. sidebar:: Contents + + .. contents:: :local: + Introduction ------------ @@ -13,6 +17,7 @@ Dataflow concurrency is deterministic. This means that it will always behave the The best way to learn how to program with dataflow variables is to read the fantastic book `Concepts, Techniques, and Models of Computer Programming `_. By Peter Van Roy and Seif Haridi. The documentation is not as complete as it should be, something we will improve shortly. For now, besides above listed resources on dataflow concurrency, I recommend you to read the documentation for the GPars implementation, which is heavily influenced by the Akka implementation: + * ``_ * ``_ @@ -138,6 +143,7 @@ Shows how to shutdown dataflow variables and bind threads to values to be able t Example in Akka: .. code-block:: java + import static akka.dataflow.DataFlow.*; import akka.japi.Effect; diff --git a/akka-docs/pending/dispatchers-java.rst b/akka-docs/java/dispatchers.rst similarity index 95% rename from akka-docs/pending/dispatchers-java.rst rename to akka-docs/java/dispatchers.rst index b9d5ee9ee8..578fcd4ff5 100644 --- a/akka-docs/pending/dispatchers-java.rst +++ b/akka-docs/java/dispatchers.rst @@ -1,6 +1,10 @@ Dispatchers (Java) ================== +.. sidebar:: Contents + + .. contents:: :local: + Module stability: **SOLID** The Dispatcher is an important piece that allows you to configure the right semantics and parameters for optimal performance, throughput and scalability. Different Actors have different needs. @@ -125,10 +129,10 @@ Setting this to a higher number will increase throughput but lower fairness, and If you don't define a the 'throughput' option in the configuration file then the default value of '5' will be used. -Browse the `ScalaDoc `_ or look at the code for all the options available. +Browse the :ref:`scaladoc` or look at the code for all the options available. Priority event-based -^^^^^^^^^^^ +^^^^^^^^^^^^^^^^^^^^ Sometimes it's useful to be able to specify priority order of messages, that is done by using PriorityExecutorBasedEventDrivenDispatcher and supply a java.util.Comparator[MessageInvocation] or use a akka.dispatch.PriorityGenerator (recommended): @@ -137,7 +141,7 @@ Creating a PriorityExecutorBasedEventDrivenDispatcher using PriorityGenerator: .. code-block:: java - package some.package; + package some.pkg; import akka.actor.*; import akka.dispatch.*; @@ -249,13 +253,14 @@ For the 'ExecutorBasedEventDrivenDispatcher' and the 'ExecutorBasedWorkStealingD For the 'ThreadBasedDispatcher', it is non-shareable between actors, and associates a dedicated Thread with the actor. Making it bounded (by specifying a capacity) is optional, but if you do, you need to provide a pushTimeout (default is 10 seconds). When trying to send a message to the Actor it will throw a MessageQueueAppendFailedException("BlockingMessageTransferQueue transfer timed out") if the message cannot be added to the mailbox within the time specified by the pushTimeout. -``_ -class MyActor extends UntypedActor { - public MyActor() { - int mailboxCapacity = 100; - Duration pushTimeout = new FiniteDuration(10, TimeUnit.SECONDS); - getContext().setDispatcher(Dispatchers.newThreadBasedDispatcher(getContext(), mailboxCapacity, pushTimeout)); +.. code-block:: java + + class MyActor extends UntypedActor { + public MyActor() { + int mailboxCapacity = 100; + Duration pushTimeout = new FiniteDuration(10, TimeUnit.SECONDS); + getContext().setDispatcher(Dispatchers.newThreadBasedDispatcher(getContext(), mailboxCapacity, pushTimeout)); + } + ... } - ... -} -``_ + diff --git a/akka-docs/pending/fault-tolerance-java.rst b/akka-docs/java/fault-tolerance.rst similarity index 94% rename from akka-docs/pending/fault-tolerance-java.rst rename to akka-docs/java/fault-tolerance.rst index 96190c7b8e..b89b3978b4 100644 --- a/akka-docs/pending/fault-tolerance-java.rst +++ b/akka-docs/java/fault-tolerance.rst @@ -1,6 +1,12 @@ +.. _fault-tolerance-java: + Fault Tolerance Through Supervisor Hierarchies (Java) ===================================================== +.. sidebar:: Contents + + .. contents:: :local: + Module stability: **SOLID** The "let it crash" approach to fault/error handling, implemented by linking actors, is very different to what Java and most non-concurrency oriented languages/frameworks have adopted. It’s a way of dealing with failure that is designed for concurrent and distributed systems. @@ -10,14 +16,14 @@ Concurrency Throwing an exception in concurrent code (let’s assume we are using non-linked actors), will just simply blow up the thread that currently executes the actor. -# There is no way to find out that things went wrong (apart from inspecting the stack trace). -# There is nothing you can do about it. +- There is no way to find out that things went wrong (apart from inspecting the stack trace). +- There is nothing you can do about it. Here actors provide a clean way of getting notification of the error and do something about it. Linking actors also allow you to create sets of actors where you can be sure that either: -# All are dead -# None are dead +- All are dead +- None are dead This is very useful when you have thousands of concurrent actors. Some actors might have implicit dependencies and together implement a service, computation, user session etc. @@ -56,8 +62,8 @@ Restart callbacks There are two different callbacks that an UntypedActor or TypedActor can hook in to: -* Pre restart -* Post restart +- Pre restart +- Post restart These are called prior to and after the restart upon failure and can be used to clean up and reset/reinitialize state upon restart. This is important in order to reset the component failure and leave the component in a fresh and stable state before consuming further messages. @@ -66,8 +72,8 @@ Defining a supervisor's restart strategy Both the Typed Actor supervisor configuration and the Actor supervisor configuration take a ‘FaultHandlingStrategy’ instance which defines the fault management. The different strategies are: -* AllForOne -* OneForOne +- AllForOne +- OneForOne These have the semantics outlined in the section above. @@ -86,8 +92,8 @@ Defining actor life-cycle The other common configuration element is the ‘LifeCycle’ which defines the life-cycle. The supervised actor can define one of two different life-cycle configurations: -* Permanent: which means that the actor will always be restarted. -* Temporary: which means that the actor will **not** be restarted, but it will be shut down through the regular shutdown process so the 'postStop' callback function will called. +- Permanent: which means that the actor will always be restarted. +- Temporary: which means that the actor will **not** be restarted, but it will be shut down through the regular shutdown process so the 'postStop' callback function will called. Here is an example of how to define the life-cycle: @@ -126,7 +132,7 @@ The Actor’s supervision can be declaratively defined by creating a ‘Supervis Supervisors created like this are implicitly instantiated and started. To configure a handler function for when the actor underlying the supervisor receives a MaximumNumberOfRestartsWithinTimeRangeReached message, you can specify - a Procedure2 when creating the SupervisorConfig. This handler will be called with the ActorRef of the supervisor and the +a Procedure2 when creating the SupervisorConfig. This handler will be called with the ActorRef of the supervisor and the MaximumNumberOfRestartsWithinTimeRangeReached message. .. code-block:: java @@ -254,10 +260,13 @@ The supervising Actor also needs to define a fault handler that defines the rest The different options are: -* AllForOneStrategy(trapExit, maxNrOfRetries, withinTimeRange) - * trapExit is an Array of classes inheriting from Throwable, they signal which types of exceptions this actor will handle -* OneForOneStrategy(trapExit, maxNrOfRetries, withinTimeRange) - * trapExit is an Array of classes inheriting from Throwable, they signal which types of exceptions this actor will handle +- AllForOneStrategy(trapExit, maxNrOfRetries, withinTimeRange) + + - trapExit is an Array of classes inheriting from Throwable, they signal which types of exceptions this actor will handle + +- OneForOneStrategy(trapExit, maxNrOfRetries, withinTimeRange) + + - trapExit is an Array of classes inheriting from Throwable, they signal which types of exceptions this actor will handle Here is an example: @@ -346,8 +355,8 @@ Supervised actors have the option to reply to the initial sender within preResta } } -* A reply within preRestart or postRestart must be a safe reply via getContext().replySafe() because a getContext().replyUnsafe() will throw an exception when the actor is restarted without having failed. This can be the case in context of AllForOne restart strategies. -* A reply within postStop must be a safe reply via getContext().replySafe() because a getContext().replyUnsafe() will throw an exception when the actor has been stopped by the application (and not by a supervisor) after successful execution of receive (or no execution at all). +- A reply within preRestart or postRestart must be a safe reply via getContext().replySafe() because a getContext().replyUnsafe() will throw an exception when the actor is restarted without having failed. This can be the case in context of AllForOne restart strategies. +- A reply within postStop must be a safe reply via getContext().replySafe() because a getContext().replyUnsafe() will throw an exception when the actor has been stopped by the application (and not by a supervisor) after successful execution of receive (or no execution at all). Handling too many actor restarts within a specific time limit ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ @@ -366,10 +375,10 @@ Now, what happens if this limit is reached? What will happen is that the failing actor will send a system message to its supervisor called 'MaximumNumberOfRestartsWithinTimeRangeReached' with the following these properties: -* victim: ActorRef -* maxNrOfRetries: int -* withinTimeRange: int -* lastExceptionCausingRestart: Throwable +- victim: ActorRef +- maxNrOfRetries: int +- withinTimeRange: int +- lastExceptionCausingRestart: Throwable If you want to be able to take action upon this event (highly recommended) then you have to create a message handle for it in the supervisor. @@ -479,6 +488,7 @@ If the parent TypedActor (supervisor) wants to be able to do handle failing chil For convenience there is an overloaded link that takes trapExit and faultHandler for the supervisor as arguments. Here is an example: .. code-block:: java + import static akka.actor.TypedActor.*; import static akka.config.Supervision.*; diff --git a/akka-docs/java/futures.rst b/akka-docs/java/futures.rst new file mode 100644 index 0000000000..660dc6bd05 --- /dev/null +++ b/akka-docs/java/futures.rst @@ -0,0 +1,269 @@ +.. _futures-java: + +Futures (Java) +=============== + +.. sidebar:: Contents + + .. contents:: :local: + +Introduction +------------ + +In Akka, a `Future `_ is a data structure used to retrieve the result of some concurrent operation. This operation is usually performed by an ``Actor`` or by the ``Dispatcher`` directly. This result can be accessed synchronously (blocking) or asynchronously (non-blocking). + +Use with Actors +--------------- + +There are generally two ways of getting a reply from an ``UntypedActor``: the first is by a sent message (``actorRef.sendOneWay(msg);``), which only works if the original sender was an ``UntypedActor``) and the second is through a ``Future``. + +Using the ``ActorRef``\'s ``sendRequestReplyFuture`` method to send a message will return a Future. To wait for and retrieve the actual result the simplest method is: + +.. code-block:: java + + Future[Object] future = actorRef.sendRequestReplyFuture[Object](msg); + Object result = future.get(); //Block until result is available, usually bad practice + +This will cause the current thread to block and wait for the ``UntypedActor`` to 'complete' the ``Future`` with it's reply. Due to the dynamic nature of Akka's ``UntypedActor``\s this result can be anything. The safest way to deal with this is to specify the result to an ``Object`` as is shown in the above example. You can also use the expected result type instead of ``Any``, but if an unexpected type were to be returned you will get a ``ClassCastException``. For more elegant ways to deal with this and to use the result without blocking, refer to `Functional Futures`_. + +Use Directly +------------ + +A common use case within Akka is to have some computation performed concurrently without needing the extra utility of an ``UntypedActor``. If you find yourself creating a pool of ``UntypedActor``\s for the sole reason of performing a calculation in parallel, there is an easier (and faster) way: + +.. code-block:: java + + import akka.dispatch.Future; + import static akka.dispatch.Futures.future; + import java.util.concurrent.Callable; + + Future f = future(new Callable() { + public String call() { + return "Hello" + "World!"; + } + }); + String result = f.get(); //Blocks until timeout, default timeout is set in akka.conf, otherwise 5 seconds + +In the above code the block passed to ``future`` will be executed by the default ``Dispatcher``, with the return value of the block used to complete the ``Future`` (in this case, the result would be the string: "HelloWorld"). Unlike a ``Future`` that is returned from an ``UntypedActor``, this ``Future`` is properly typed, and we also avoid the overhead of managing an ``UntypedActor``. + +Functional Futures +------------------ + +A recent addition to Akka's ``Future`` is several monadic methods that are very similar to the ones used by ``Scala``'s collections. These allow you to create 'pipelines' or 'streams' that the result will travel through. + +Future is a Monad +^^^^^^^^^^^^^^^^^ + +The first method for working with ``Future`` functionally is ``map``. This method takes a ``Function`` which performs some operation on the result of the ``Future``, and returning a new result. The return value of the ``map`` method is another ``Future`` that will contain the new result: + +.. code-block:: java + + import akka.dispatch.Future; + import static akka.dispatch.Futures.future; + import static akka.japi.Function; + import java.util.concurrent.Callable; + + Future f1 = future(new Callable() { + public String call() { + return "Hello" + "World"; + } + }); + + Future f2 = f1.map(new Function() { + public Integer apply(String s) { + return s.length(); + } + }); + + Integer result = f2.get(); + +In this example we are joining two strings together within a Future. Instead of waiting for f1 to complete, we apply our function that calculates the length of the string using the ``map`` method. Now we have a second Future, f2, that will eventually contain an ``Integer``. When our original ``Future``, f1, completes, it will also apply our function and complete the second Future with it's result. When we finally ``get`` the result, it will contain the number 10. Our original Future still contains the string "HelloWorld" and is unaffected by the ``map``. + +Something to note when using these methods: if the ``Future`` is still being processed when one of these methods are called, it will be the completing thread that actually does the work. If the ``Future`` is already complete though, it will be run in our current thread. For example: + +.. code-block:: java + + import akka.dispatch.Future; + import static akka.dispatch.Futures.future; + import static akka.japi.Function; + import java.util.concurrent.Callable; + + Future f1 = future(new Callable() { + public String call() { + Thread.sleep(1000); + return "Hello" + "World"; + } + }); + + Future f2 = f1.map(new Function() { + public Integer apply(String s) { + return s.length(); + } + }); + + Integer result = f2.get(); + +The original ``Future`` will take at least 1 second to execute now, which means it is still being processed at the time we call ``map``. The function we provide gets stored within the ``Future`` and later executed automatically by the dispatcher when the result is ready. + +If we do the opposite: + +.. code-block:: java + + import akka.dispatch.Future; + import static akka.dispatch.Futures.future; + import static akka.japi.Function; + import java.util.concurrent.Callable; + + Future f1 = future(new Callable() { + public String call() { + return "Hello" + "World"; + } + }); + + Thread.sleep(1000); + + Future f2 = f1.map(new Function() { + public Integer apply(String s) { + return s.length(); + } + }); + + Integer result = f2.get(); + +Our little string has been processed long before our 1 second sleep has finished. Because of this, the dispatcher has moved onto other messages that need processing and can no longer calculate the length of the string for us, instead it gets calculated in the current thread just as if we weren't using a ``Future``. + +Normally this works quite well as it means there is very little overhead to running a quick function. If there is a possibility of the function taking a non-trivial amount of time to process it might be better to have this done concurrently, and for that we use ``flatMap``: + +.. code-block:: java + + import akka.dispatch.Future; + import static akka.dispatch.Futures.future; + import static akka.japi.Function; + import java.util.concurrent.Callable; + + Future f1 = future(new Callable() { + public String call() { + return "Hello" + "World"; + } + }); + + Future f2 = f1.flatMap(new Function>() { + public Future apply(final String s) { + return future( + new Callable() { + public Integer call() { + return s.length(); + } + }); + } + }); + + Integer result = f2.get(); + +Now our second Future is executed concurrently as well. This technique can also be used to combine the results of several Futures into a single calculation, which will be better explained in the following sections. + +Composing Futures +^^^^^^^^^^^^^^^^^ + +It is very often desirable to be able to combine different Futures with eachother, below are some examples on how that can be done in a non-blocking fashion. + +.. code-block:: java + + import akka.dispatch.Future; + import static akka.dispatch.Futures.sequence; + import akka.japi.Function; + import java.lang.Iterable; + + Iterable> listOfFutureInts = ... //Some source generating a sequence of Future:s + + // now we have a Future[Iterable[Int]] + Future> futureListOfInts = sequence(listOfFutureInts); + + // Find the sum of the odd numbers + Long totalSum = futureListOfInts.map( + new Function, Long>() { + public Long apply(LinkedList ints) { + long sum = 0; + for(Integer i : ints) + sum += i; + return sum; + } + }).get(); + +To better explain what happened in the example, ``Future.sequence`` is taking the ``Iterable>`` and turning it into a ``Future>``. We can then use ``map`` to work with the ``Iterable`` directly, and we aggregate the sum of the ``Iterable``. + +The ``traverse`` method is similar to ``sequence``, but it takes a sequence of ``A``s and applies a function from ``A`` to ``Future`` and returns a ``Future>``, enabling parallel ``map`` over the sequence, if you use ``Futures.future`` to create the ``Future``. + +.. code-block:: java + + import akka.dispatch.Future; + import static akka.dispatch.Futures.traverse; + import static akka.dispatch.Futures.future; + import java.lang.Iterable; + import akka.japi.Function; + + Iterable listStrings = ... //Just a sequence of Strings + + Future> result = traverse(listStrings, new Function>(){ + public Future apply(final String r) { + return future(new Callable() { + public String call() { + return r.toUpperCase(); + } + }); + } + }); + + result.get(); //Returns the sequence of strings as upper case + +It's as simple as that! + +Then there's a method that's called ``fold`` that takes a start-value, a sequence of ``Future``:s and a function from the type of the start-value, a timeout, and the type of the futures and returns something with the same type as the start-value, and then applies the function to all elements in the sequence of futures, non-blockingly, the execution will run on the Thread of the last completing Future in the sequence. + +.. code-block:: java + + import akka.dispatch.Future; + import static akka.dispatch.Futures.fold; + import java.lang.Iterable; + import akka.japi.Function2; + + Iterable> futures = ... //A sequence of Futures, in this case Strings + + Future result = fold("", 15000, futures, new Function2(){ //Start value is the empty string, timeout is 15 seconds + public String apply(String r, String t) { + return r + t; //Just concatenate + } + }); + + result.get(); // Will produce a String that says "testtesttesttest"(... and so on). + +That's all it takes! + + +If the sequence passed to ``fold`` is empty, it will return the start-value, in the case above, that will be 0. In some cases you don't have a start-value and you're able to use the value of the first completing Future in the sequence as the start-value, you can use ``reduce``, it works like this: + +.. code-block:: java + + import akka.dispatch.Future; + import static akka.dispatch.Futures.reduce; + import java.util.Iterable; + import akka.japi.Function2; + + Iterable> futures = ... //A sequence of Futures, in this case Strings + + Future result = reduce(futures, 15000, new Function2(){ //Timeout is 15 seconds + public String apply(String r, String t) { + return r + t; //Just concatenate + } + }); + + result.get(); // Will produce a String that says "testtesttesttest"(... and so on). + +Same as with ``fold``, the execution will be done by the Thread that completes the last of the Futures, you can also parallize it by chunking your futures into sub-sequences and reduce them, and then reduce the reduced results again. + +This is just a sample of what can be done. + +Exceptions +---------- + +Since the result of a ``Future`` is created concurrently to the rest of the program, exceptions must be handled differently. It doesn't matter if an ``UntypedActor`` or the dispatcher is completing the ``Future``, if an ``Exception`` is caught the ``Future`` will contain it instead of a valid result. If a ``Future`` does contain an ``Exception``, calling ``get`` will cause it to be thrown again so it can be handled properly. \ No newline at end of file diff --git a/akka-docs/pending/guice-integration.rst b/akka-docs/java/guice-integration.rst similarity index 93% rename from akka-docs/pending/guice-integration.rst rename to akka-docs/java/guice-integration.rst index 6392bddfd3..de00b701cb 100644 --- a/akka-docs/pending/guice-integration.rst +++ b/akka-docs/java/guice-integration.rst @@ -45,6 +45,8 @@ Retrieve the external Guice dependency -------------------------------------- The external dependency can be retrieved like this: -``_ -Ext ext = manager.getExternalDependency(Ext.class); -``_ + +.. code-block:: java + + Ext ext = manager.getExternalDependency(Ext.class); + diff --git a/akka-docs/java/index.rst b/akka-docs/java/index.rst new file mode 100644 index 0000000000..d9e900fd22 --- /dev/null +++ b/akka-docs/java/index.rst @@ -0,0 +1,21 @@ +.. _java-api: + +Java API +========= + +.. toctree:: + :maxdepth: 2 + + untyped-actors + typed-actors + actor-registry + futures + dataflow + stm + transactors + remote-actors + serialization + fault-tolerance + dispatchers + routing + guice-integration diff --git a/akka-docs/pending/remote-actors-java.rst b/akka-docs/java/remote-actors.rst similarity index 73% rename from akka-docs/pending/remote-actors-java.rst rename to akka-docs/java/remote-actors.rst index 47f27d6cef..8eb5573e16 100644 --- a/akka-docs/pending/remote-actors-java.rst +++ b/akka-docs/java/remote-actors.rst @@ -1,11 +1,17 @@ +.. _remote-actors-java: + Remote Actors (Java) ==================== +.. sidebar:: Contents + + .. contents:: :local: + Module stability: **SOLID** -Akka supports starting UntypedActors and TypedActors on remote nodes using a very efficient and scalable NIO implementation built upon `JBoss Netty `_ and `Google Protocol Buffers `_ . +Akka supports starting interacting with UntypedActors and TypedActors on remote nodes using a very efficient and scalable NIO implementation built upon `JBoss Netty `_ and `Google Protocol Buffers `_ . -The usage is completely transparent both in regards to sending messages and error handling and propagation as well as supervision, linking and restarts. You can send references to other Actors as part of the message. +The usage is completely transparent with local actors, both in regards to sending messages and error handling and propagation as well as supervision, linking and restarts. You can send references to other Actors as part of the message. **WARNING**: For security reasons, do not run an Akka node with a Remote Actor port reachable by untrusted connections unless you have supplied a classloader that restricts access to the JVM. @@ -142,12 +148,6 @@ The default behavior is that the remote client will maintain a transaction log o If you choose a capacity higher than 0, then a bounded queue will be used and if the limit of the queue is reached then a 'RemoteClientMessageBufferException' will be thrown. -You can also get an Array with all the messages that the remote client has failed to send. Since the remote client events passes you an instance of the RemoteClient you have an easy way to act upon failure and do something with these messages (while waiting for them to be retried). - -.. code-block:: java - - Object[] pending = Actors.remote().pendingMessages(); - Running Remote Server in untrusted mode --------------------------------------- @@ -253,21 +253,13 @@ You can also generate the secure cookie by using the 'Crypt' object and its 'gen The secure cookie is a cryptographically secure randomly generated byte array turned into a SHA-1 hash. -Remote Actors -------------- - -Akka has two types of remote actors: - -* Client-initiated and managed. Here it is the client that creates the remote actor and "moves it" to the server. -* Server-initiated and managed. Here it is the server that creates the remote actor and the client can ask for a handle to this actor. - -They are good for different use-cases. The client-initiated are great when you want to monitor an actor on another node since it allows you to link to it and supervise it using the regular supervision semantics. They also make RPC completely transparent. The server-initiated, on the other hand, are great when you have a service running on the server that you want clients to connect to, and you want full control over the actor on the server side for security reasons etc. - Client-managed Remote UntypedActor -^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ +---------------------------------- DEPRECATED AS OF 1.1 +The client creates the remote actor and "moves it" to the server. + When you define an actors as being remote it is instantiated as on the remote host and your local actor becomes a proxy, it works as a handle to the remote actor. The real execution is always happening on the remote node. Here is an example: @@ -291,26 +283,31 @@ An UntypedActor can also start remote child Actors through one of the “spawn/l .. code-block:: java ... - getContext().spawnRemote(MyActor.class, hostname, port); + getContext().spawnRemote(MyActor.class, hostname, port, timeoutInMsForFutures); getContext().spawnLinkRemote(MyActor.class, hostname, port, timeoutInMsForFutures); ... Server-managed Remote UntypedActor -^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ +---------------------------------- + +Here it is the server that creates the remote actor and the client can ask for a handle to this actor. Server side setup -***************** +^^^^^^^^^^^^^^^^^ The API for server managed remote actors is really simple. 2 methods only: .. code-block:: java + import akka.actor.Actors; + import akka.actor.UntypedActor; + class MyActor extends UntypedActor { public void onReceive(Object message) throws Exception { ... } } - Actors.remote().start("localhost", 2552).register("hello-service", Actors.actorOf(HelloWorldActor.class); + Actors.remote().start("localhost", 2552).register("hello-service", Actors.actorOf(HelloWorldActor.class)); Actors created like this are automatically started. @@ -322,88 +319,6 @@ You can also register an actor by its UUID rather than ID or handle. This is don server.unregister("uuid:" + actor.uuid); -Client side usage -***************** - -.. code-block:: java - - ActorRef actor = Actors.remote().actorFor("hello-service", "localhost", 2552); - actor.sendOneWay("Hello"); - -There are many variations on the 'remote()#actorFor' method. Here are some of them: - -.. code-block:: java - - ... = actorFor(className, hostname, port); - ... = actorFor(className, timeout, hostname, port); - ... = actorFor(uuid, className, hostname, port); - ... = actorFor(uuid, className, timeout, hostname, port); - ... // etc - -All of these also have variations where you can pass in an explicit 'ClassLoader' which can be used when deserializing messages sent from the remote actor. - -Client-managed Remote TypedActor -^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ - -DEPRECATED AS OF 1.1 - -Remote Typed Actors are created through the 'TypedActor.newRemoteInstance' factory method. - -.. code-block:: java - - MyPOJO remoteActor = (MyPOJO)TypedActor.newRemoteInstance(MyPOJO.class, MyPOJOImpl.class, , "localhost", 2552); - -And if you want to specify the timeout: - -.. code-block:: java - - MyPOJO remoteActor = (MyPOJO)TypedActor.newRemoteInstance(MyPOJO.class, MyPOJOImpl.class, timeout, "localhost", 2552); - -You can also define the Typed Actor to be a client-managed-remote service by adding the ‘RemoteAddress’ configuration element in the declarative supervisor configuration: - -.. code-block:: java - - new Component( - Foo.class, - FooImpl.class, - new LifeCycle(new Permanent(), 1000), - 1000, - new RemoteAddress("localhost", 2552)) - -Server-managed Remote TypedActor -^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ - -WARNING: Remote TypedActors do not work with overloaded methods on your TypedActor, refrain from using overloading. - -Server side setup -***************** - -The API for server managed remote typed actors is nearly the same as for untyped actor: - -.. code-block:: java - - import static akka.actor.Actors.*; - remote().start("localhost", 2552); - - RegistrationService typedActor = TypedActor.newInstance(RegistrationService.class, RegistrationServiceImpl.class, 2000); - remote().registerTypedActor("user-service", typedActor); - -Client side usage - -.. code-block:: java - - import static akka.actor.Actors.*; - RegistrationService actor = remote().typedActorFor(RegistrationService.class, "user-service", 5000L, "localhost", 2552); - actor.registerUser(...); - -There are variations on the 'remote()#typedActorFor' method. Here are some of them: - -.. code-block:: java - - ... = typedActorFor(interfaceClazz, serviceIdOrClassName, hostname, port); - ... = typedActorFor(interfaceClazz, serviceIdOrClassName, timeout, hostname, port); - ... = typedActorFor(interfaceClazz, serviceIdOrClassName, timeout, hostname, port, classLoader); - Session bound server side setup ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ @@ -414,17 +329,19 @@ Session bound actors are useful if you need to keep state per session, e.g. user .. code-block:: java import static akka.actor.Actors.*; + import akka.japi.Creator; + class HelloWorldActor extends Actor { ... } remote().start("localhost", 2552); - remote().registerPerSession("hello-service", new Creator[ActorRef]() { + remote().registerPerSession("hello-service", new Creator() { public ActorRef create() { return actorOf(HelloWorldActor.class); } - }) + }); Note that the second argument in registerPerSession is a Creator, it means that the create method will create a new ActorRef each invocation. It will be called to create an actor every time a session is established. @@ -443,19 +360,22 @@ There are many variations on the 'remote()#actorFor' method. Here are some of th .. code-block:: java - ... = actorFor(className, hostname, port); - ... = actorFor(className, timeout, hostname, port); - ... = actorFor(uuid, className, hostname, port); - ... = actorFor(uuid, className, timeout, hostname, port); + ... = remote().actorFor(className, hostname, port); + ... = remote().actorFor(className, timeout, hostname, port); + ... = remote().actorFor(uuid, className, hostname, port); + ... = remote().actorFor(uuid, className, timeout, hostname, port); ... // etc -Automatic remote 'sender' reference management -^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ +All of these also have variations where you can pass in an explicit 'ClassLoader' which can be used when deserializing messages sent from the remote actor. -Akka is automatically remote-enabling the sender Actor reference for you in order to allow the receiver to respond to the message using 'getContext().getSender().sendOneWay(msg);' or 'getContext().reply(msg);'. By default it is registering the sender reference in the remote server with the 'hostname' and 'port' from the akka.conf configuration file. The default is "localhost" and 2552 and if there is no remote server with this hostname and port then it creates and starts it. +Automatic remote 'sender' reference management +---------------------------------------------- + +The sender of a remote message will be reachable with a reply through the remote server on the node that the actor is residing, automatically. +Please note that firewalled clients won't work right now. [2011-01-05] Identifying remote actors -^^^^^^^^^^^^^^^^^^^^^^^^^ +------------------------- The 'id' field in the 'Actor' class is of importance since it is used as identifier for the remote actor. If you want to create a brand new actor every time you instantiate a remote actor then you have to set the 'id' field to a unique 'String' for each instance. If you want to reuse the same remote actor instance for each new remote actor (of the same class) you create then you don't have to do anything since the 'id' field by default is equal to the name of the actor class. @@ -463,18 +383,83 @@ Here is an example of overriding the 'id' field: .. code-block:: java - import akka.util.UUID; + import akka.actor.UntypedActor; + import com.eaio.uuid.UUID; class MyActor extends UntypedActor { public MyActor() { - getContext().setId(UUID.newUuid().toString()); + getContext().setId(new UUID().toString()); } public void onReceive(Object message) throws Exception { - ... + // ... } } +Client-managed Remote Typed Actors +---------------------------------- + +DEPRECATED AS OF 1.1 + +Remote Typed Actors are created through the 'TypedActor.newRemoteInstance' factory method. + +.. code-block:: java + + MyPOJO remoteActor = (MyPOJO) TypedActor.newRemoteInstance(MyPOJO.class, MyPOJOImpl.class, "localhost", 2552); + +And if you want to specify the timeout: + +.. code-block:: java + + MyPOJO remoteActor = (MyPOJO)TypedActor.newRemoteInstance(MyPOJO.class, MyPOJOImpl.class, timeout, "localhost", 2552); + +You can also define the Typed Actor to be a client-managed-remote service by adding the ‘RemoteAddress’ configuration element in the declarative supervisor configuration: + +.. code-block:: java + + new Component( + Foo.class, + FooImpl.class, + new LifeCycle(new Permanent(), 1000), + 1000, + new RemoteAddress("localhost", 2552)) + +Server-managed Remote Typed Actors +---------------------------------- + +WARNING: Remote TypedActors do not work with overloaded methods on your TypedActor, refrain from using overloading. + +Server side setup +^^^^^^^^^^^^^^^^^ + +The API for server managed remote typed actors is nearly the same as for untyped actor: + +.. code-block:: java + + import static akka.actor.Actors.*; + remote().start("localhost", 2552); + + RegistrationService typedActor = TypedActor.newInstance(RegistrationService.class, RegistrationServiceImpl.class, 2000); + remote().registerTypedActor("user-service", typedActor); + + +Client side usage +^^^^^^^^^^^^^^^^^ + +.. code-block:: java + + import static akka.actor.Actors.*; + RegistrationService actor = remote().typedActorFor(RegistrationService.class, "user-service", 5000L, "localhost", 2552); + actor.registerUser(...); + +There are variations on the 'remote()#typedActorFor' method. Here are some of them: + +.. code-block:: java + + ... = remote().typedActorFor(interfaceClazz, serviceIdOrClassName, hostname, port); + ... = remote().typedActorFor(interfaceClazz, serviceIdOrClassName, timeout, hostname, port); + ... = remote().typedActorFor(interfaceClazz, serviceIdOrClassName, timeout, hostname, port, classLoader); + Data Compression Configuration ------------------------------ @@ -493,44 +478,55 @@ You can configure it like this: } } +Code provisioning +----------------- + +Akka does currently not support automatic code provisioning but requires you to have the remote actor class files available on both the "client" the "server" nodes. +This is something that will be addressed soon. Until then, sorry for the inconvenience. + Subscribe to Remote Client events --------------------------------- Akka has a subscription API for remote client events. You can register an Actor as a listener and this actor will have to be able to process these events: -RemoteClientError { Throwable cause; RemoteClientModule client; InetSocketAddress remoteAddress; } -RemoteClientDisconnected { RemoteClientModule client; InetSocketAddress remoteAddress; } -RemoteClientConnected { RemoteClientModule client; InetSocketAddress remoteAddress; } -RemoteClientStarted { RemoteClientModule client; InetSocketAddress remoteAddress; } -RemoteClientShutdown { RemoteClientModule client; InetSocketAddress remoteAddress; } -RemoteClientWriteFailed { Object message; Throwable cause; RemoteClientModule client; InetSocketAddress remoteAddress; } +.. code-block:: java + + class RemoteClientError { Throwable cause; RemoteClientModule client; InetSocketAddress remoteAddress; } + class RemoteClientDisconnected { RemoteClientModule client; InetSocketAddress remoteAddress; } + class RemoteClientConnected { RemoteClientModule client; InetSocketAddress remoteAddress; } + class RemoteClientStarted { RemoteClientModule client; InetSocketAddress remoteAddress; } + class RemoteClientShutdown { RemoteClientModule client; InetSocketAddress remoteAddress; } + class RemoteClientWriteFailed { Object message; Throwable cause; RemoteClientModule client; InetSocketAddress remoteAddress; } So a simple listener actor can look like this: .. code-block:: java + import akka.actor.UntypedActor; + import akka.remoteinterface.*; + class Listener extends UntypedActor { public void onReceive(Object message) throws Exception { if (message instanceof RemoteClientError) { - RemoteClientError event = (RemoteClientError)message; - Exception cause = event.getCause(); - ... + RemoteClientError event = (RemoteClientError) message; + Throwable cause = event.getCause(); + // ... } else if (message instanceof RemoteClientConnected) { - RemoteClientConnected event = (RemoteClientConnected)message; - ... + RemoteClientConnected event = (RemoteClientConnected) message; + // ... } else if (message instanceof RemoteClientDisconnected) { - RemoteClientDisconnected event = (RemoteClientDisconnected)message; - ... + RemoteClientDisconnected event = (RemoteClientDisconnected) message; + // ... } else if (message instanceof RemoteClientStarted) { - RemoteClientStarted event = (RemoteClientStarted)message; - ... + RemoteClientStarted event = (RemoteClientStarted) message; + // ... } else if (message instanceof RemoteClientShutdown) { - RemoteClientShutdown event = (RemoteClientShutdown)message; - ... + RemoteClientShutdown event = (RemoteClientShutdown) message; + // ... } else if (message instanceof RemoteClientWriteFailed) { - RemoteClientWriteFailed event = (RemoteClientWriteFailed)message; - ... + RemoteClientWriteFailed event = (RemoteClientWriteFailed) message; + // ... } } } @@ -550,43 +546,45 @@ Subscribe to Remote Server events Akka has a subscription API for the server events. You can register an Actor as a listener and this actor will have to be able to process these events: -RemoteServerStarted { RemoteServerModule server; } -RemoteServerShutdown { RemoteServerModule server; } -RemoteServerError { Throwable cause; RemoteServerModule server; } -RemoteServerClientConnected { RemoteServerModule server; Option clientAddress; } -RemoteServerClientDisconnected { RemoteServerModule server; Option clientAddress; } -RemoteServerClientClosed { RemoteServerModule server; Option clientAddress; } -RemoteServerWriteFailed { Object request; Throwable cause; RemoteServerModule server; Option clientAddress; } +.. code-block:: java + + class RemoteServerStarted { RemoteServerModule server; } + class RemoteServerShutdown { RemoteServerModule server; } + class RemoteServerError { Throwable cause; RemoteServerModule server; } + class RemoteServerClientConnected { RemoteServerModule server; Option clientAddress; } + class RemoteServerClientDisconnected { RemoteServerModule server; Option clientAddress; } + class RemoteServerClientClosed { RemoteServerModule server; Option clientAddress; } + class RemoteServerWriteFailed { Object request; Throwable cause; RemoteServerModule server; Option clientAddress; } So a simple listener actor can look like this: .. code-block:: java + import akka.actor.UntypedActor; + import akka.remoteinterface.*; + class Listener extends UntypedActor { public void onReceive(Object message) throws Exception { - if (message instanceof RemoteServerError) { - RemoteServerError event = (RemoteServerError)message; - Exception cause = event.getCause(); - ... - } else if (message instanceof RemoteServerStarted) { - RemoteServerStarted event = (RemoteServerStarted)message; - ... - } else if (message instanceof RemoteServerShutdown) { - RemoteServerShutdown event = (RemoteServerShutdown)message; - ... - } else if (message instanceof RemoteServerClientConnected) { - RemoteServerClientConnected event = (RemoteServerClientConnected)message; - ... - } else if (message instanceof RemoteServerClientDisconnected) { - RemoteServerClientDisconnected event = (RemoteServerClientDisconnected)message; - ... - } else if (message instanceof RemoteServerClientClosed) { - RemoteServerClientClosed event = (RemoteServerClientClosed)message; - ... - } else if (message instanceof RemoteServerWriteFailed) { - RemoteServerWriteFailed event = (RemoteServerWriteFailed)message; - ... + if (message instanceof RemoteClientError) { + RemoteClientError event = (RemoteClientError) message; + Throwable cause = event.getCause(); + // ... + } else if (message instanceof RemoteClientConnected) { + RemoteClientConnected event = (RemoteClientConnected) message; + // ... + } else if (message instanceof RemoteClientDisconnected) { + RemoteClientDisconnected event = (RemoteClientDisconnected) message; + // ... + } else if (message instanceof RemoteClientStarted) { + RemoteClientStarted event = (RemoteClientStarted) message; + // ... + } else if (message instanceof RemoteClientShutdown) { + RemoteClientShutdown event = (RemoteClientShutdown) message; + // ... + } else if (message instanceof RemoteClientWriteFailed) { + RemoteClientWriteFailed event = (RemoteClientWriteFailed) message; + // ... } } } @@ -608,10 +606,27 @@ Message Serialization All messages that are sent to remote actors needs to be serialized to binary format to be able to travel over the wire to the remote node. This is done by letting your messages extend one of the traits in the 'akka.serialization.Serializable' object. If the messages don't implement any specific serialization trait then the runtime will try to use standard Java serialization. -Read more about that in the `Serialization section `_. +Here is one example, but full documentation can be found in the :ref:`serialization-java`. -Code provisioning ------------------ +Protobuf +^^^^^^^^ -Akka does currently not support automatic code provisioning but requires you to have the remote actor class files available on both the "client" the "server" nodes. -This is something that will be addressed soon. Until then, sorry for the inconvenience. +Protobuf message specification needs to be compiled with 'protoc' compiler. + +:: + + message ProtobufPOJO { + required uint64 id = 1; + required string name = 2; + required bool status = 3; + } + +Using the generated message builder to send the message to a remote actor: + +.. code-block:: java + + actor.sendOneWay(ProtobufPOJO.newBuilder() + .setId(11) + .setStatus(true) + .setName("Coltrane") + .build()); diff --git a/akka-docs/pending/routing-java.rst b/akka-docs/java/routing.rst similarity index 93% rename from akka-docs/pending/routing-java.rst rename to akka-docs/java/routing.rst index 2c818af896..4caae2a7d0 100644 --- a/akka-docs/pending/routing-java.rst +++ b/akka-docs/java/routing.rst @@ -1,8 +1,8 @@ Routing (Java) ============== -**UntypedDispatcher** ---------------------- +UntypedDispatcher +----------------- An UntypedDispatcher is an actor that routes incoming messages to outbound actors. @@ -42,8 +42,8 @@ An UntypedDispatcher is an actor that routes incoming messages to outbound actor dispatcher.sendOneWay("Ping"); //Prints "Pinger: Ping" dispatcher.sendOneWay("Pong"); //Prints "Ponger: Pong" -**UntypedLoadBalancer** ------------------------ +UntypedLoadBalancer +------------------- An UntypedLoadBalancer is an actor that forwards messages it receives to a boundless sequence of destination actors. @@ -88,6 +88,7 @@ An UntypedLoadBalancer is an actor that forwards messages it receives to a bound You can also send a 'new Routing.Broadcast(msg)' message to the router to have it be broadcasted out to all the actors it represents. -``_ -router.sendOneWay(new Routing.Broadcast(new PoisonPill())); -``_ +.. code-block:: java + + router.sendOneWay(new Routing.Broadcast(new PoisonPill())); + diff --git a/akka-docs/pending/serialization-java.rst b/akka-docs/java/serialization.rst similarity index 64% rename from akka-docs/pending/serialization-java.rst rename to akka-docs/java/serialization.rst index 1206211b8d..0a41941ba5 100644 --- a/akka-docs/pending/serialization-java.rst +++ b/akka-docs/java/serialization.rst @@ -1,38 +1,23 @@ +.. _serialization-java: + Serialization (Java) ==================== -Akka serialization module has been documented extensively under the Scala API section. In this section we will point out the different APIs that are available in Akka for Java based serialization of ActorRefs. The Scala APIs of ActorSerialization has implicit Format objects that set up the type class based serialization. In the Java API, the Format objects need to be specified explicitly. +.. sidebar:: Contents -Serialization of ActorRef -========================= + .. contents:: :local: -The following are the Java APIs for serialization of local ActorRefs: - -.. code-block:: scala - - /** - * Module for local actor serialization. - */ - object ActorSerialization { - // wrapper for implicits to be used by Java - def fromBinaryJ[T <: Actor](bytes: Array[Byte], format: Format[T]): ActorRef = - fromBinary(bytes)(format) - - // wrapper for implicits to be used by Java - def toBinaryJ[T <: Actor](a: ActorRef, format: Format[T], srlMailBox: Boolean = true): Array[Byte] = - toBinary(a, srlMailBox)(format) - } - -The following steps describe the procedure for serializing an Actor and ActorRef. +Akka serialization module has been documented extensively under the :ref:`serialization-scala` section. In this section we will point out the different APIs that are available in Akka for Java based serialization of ActorRefs. The Scala APIs of ActorSerialization has implicit Format objects that set up the type class based serialization. In the Java API, the Format objects need to be specified explicitly. Serialization of a Stateless Actor -================================== +---------------------------------- Step 1: Define the Actor ------------------------- .. code-block:: scala + import akka.actor.UntypedActor; + public class SerializationTestActor extends UntypedActor { public void onReceive(Object msg) { getContext().replySafe("got it!"); @@ -40,12 +25,13 @@ Step 1: Define the Actor } Step 2: Define the typeclass instance for the actor ---------------------------------------------------- Note how the generated Java classes are accessed using the $class based naming convention of the Scala compiler. .. code-block:: scala + import akka.serialization.StatelessActorFormat; + class SerializationTestActorFormat implements StatelessActorFormat { @Override public SerializationTestActor fromBinary(byte[] bytes, SerializationTestActor act) { @@ -58,12 +44,20 @@ Note how the generated Java classes are accessed using the $class based naming c } } -**Step 3: Serialize and de-serialize** +Step 3: Serialize and de-serialize The following JUnit snippet first creates an actor using the default constructor. The actor is, as we saw above a stateless one. Then it is serialized and de-serialized to get back the original actor. Being stateless, the de-serialized version behaves in the same way on a message as the original actor. .. code-block:: java + import akka.actor.ActorRef; + import akka.actor.ActorTimeoutException; + import akka.actor.Actors; + import akka.actor.UntypedActor; + import akka.serialization.Format; + import akka.serialization.StatelessActorFormat; + import static akka.serialization.ActorSerialization.*; + @Test public void mustBeAbleToSerializeAfterCreateActorRefFromClass() { ActorRef ref = Actors.actorOf(SerializationTestActor.class); assertNotNull(ref); @@ -91,60 +85,69 @@ The following JUnit snippet first creates an actor using the default constructor } Serialization of a Stateful Actor -================================= +--------------------------------- Let's now have a look at how to serialize an actor that carries a state with it. Here the expectation is that the serialization of the actor will also persist the state information. And after de-serialization we will get back the state with which it was serialized. Step 1: Define the Actor ------------------------- - -Here we consider an actor defined in Scala. We will however serialize using the Java APIs. .. code-block:: scala - class MyUntypedActor extends UntypedActor { - var count = 0 - def onReceive(message: Any): Unit = message match { - case m: String if m == "hello" => - count = count + 1 - getContext.replyUnsafe("world " + count) - case m: String => - count = count + 1 - getContext.replyUnsafe("hello " + m + " " + count) - case _ => - throw new Exception("invalid message type") + import akka.actor.UntypedActor; + + public class MyUntypedActor extends UntypedActor { + int count = 0; + + public void onReceive(Object msg) { + if (msg.equals("hello")) { + count = count + 1; + getContext().replyUnsafe("world " + count); + } else if (msg instanceof String) { + count = count + 1; + getContext().replyUnsafe("hello " + msg + " " + count); + } else { + throw new IllegalArgumentException("invalid message type"); + } } } Note the actor has a state in the form of an Integer. And every message that the actor receives, it replies with an addition to the integer member. Step 2: Define the instance of the typeclass --------------------------------------------- .. code-block:: java + import akka.actor.UntypedActor; + import akka.serialization.Format; + import akka.serialization.SerializerFactory; + class MyUntypedActorFormat implements Format { - @Override - public MyUntypedActor fromBinary(byte[] bytes, MyUntypedActor act) { - ProtobufProtocol.Counter p = - (ProtobufProtocol.Counter) new SerializerFactory().getProtobuf().fromBinary(bytes, ProtobufProtocol.Counter.class); - act.count_$eq(p.getCount()); - return act; - } - - @Override - public byte[] toBinary(MyUntypedActor ac) { - return ProtobufProtocol.Counter.newBuilder().setCount(ac.count()).build().toByteArray(); - } + @Override + public MyUntypedActor fromBinary(byte[] bytes, MyUntypedActor act) { + ProtobufProtocol.Counter p = + (ProtobufProtocol.Counter) new SerializerFactory().getProtobuf().fromBinary(bytes, ProtobufProtocol.Counter.class); + act.count = p.getCount(); + return act; } -Note the usage of Protocol Buffers to serialize the state of the actor. + @Override + public byte[] toBinary(MyUntypedActor ac) { + return ProtobufProtocol.Counter.newBuilder().setCount(ac.count()).build().toByteArray(); + } + } + +Note the usage of Protocol Buffers to serialize the state of the actor. ProtobufProtocol.Counter is something +you need to define yourself Step 3: Serialize and de-serialize ----------------------------------- .. code-block:: java + import akka.actor.ActorRef; + import akka.actor.ActorTimeoutException; + import akka.actor.Actors; + import static akka.serialization.ActorSerialization.*; + @Test public void mustBeAbleToSerializeAStatefulActor() { ActorRef ref = Actors.actorOf(MyUntypedActor.class); assertNotNull(ref); diff --git a/akka-docs/pending/stm-java.rst b/akka-docs/java/stm.rst similarity index 71% rename from akka-docs/pending/stm-java.rst rename to akka-docs/java/stm.rst index 7873e38a7a..ebdd553465 100644 --- a/akka-docs/pending/stm-java.rst +++ b/akka-docs/java/stm.rst @@ -1,10 +1,16 @@ +.. _stm-java: + Software Transactional Memory (Java) ==================================== +.. sidebar:: Contents + + .. contents:: :local: + Module stability: **SOLID** Overview of STM -=============== +--------------- An `STM `_ turns the Java heap into a transactional data set with begin/commit/rollback semantics. Very much like a regular database. It implements the first three letters in ACID; ACI: * (failure) Atomicity: all changes during the execution of a transaction make it, or none make it. This only counts for transactional datastructures. @@ -12,9 +18,10 @@ An `STM `_ turns the * Isolated: changes made by concurrent execution transactions are not visible to each other. Generally, the STM is not needed that often when working with Akka. Some use-cases (that we can think of) are: -# When you really need composable message flows across many actors updating their **internal local** state but need them to do that atomically in one big transaction. Might not often, but when you do need this then you are screwed without it. -# When you want to share a datastructure across actors. -# When you need to use the persistence modules. + +- When you really need composable message flows across many actors updating their **internal local** state but need them to do that atomically in one big transaction. Might not often, but when you do need this then you are screwed without it. +- When you want to share a datastructure across actors. +- When you need to use the persistence modules. Akka’s STM implements the concept in `Clojure’s `_ STM view on state in general. Please take the time to read `this excellent document `_ and view `this presentation `_ by Rich Hickey (the genius behind Clojure), since it forms the basis of Akka’s view on STM and state in general. @@ -23,7 +30,7 @@ The STM is based on Transactional References (referred to as Refs). Refs are mem Working with immutable collections can sometimes give bad performance due to extensive copying. Scala provides so-called persistent datastructures which makes working with immutable collections fast. They are immutable but with constant time access and modification. The use of structural sharing and an insert or update does not ruin the old structure, hence “persistent”. Makes working with immutable composite types fast. The persistent datastructures currently consist of a Map and Vector. Simple example -============== +-------------- Here is a simple example of an incremental counter using STM. This shows creating a ``Ref``, a transactional reference, and then modifying it within a transaction, which is delimited by an ``Atomic`` anonymous inner class. @@ -49,15 +56,14 @@ Here is a simple example of an incremental counter using STM. This shows creatin counter(); // -> 2 ----- Ref -=== +--- Refs (transactional references) are mutable references to values and through the STM allow the safe sharing of mutable data. To ensure safety the value stored in a Ref should be immutable. The value referenced by a Ref can only be accessed or swapped within a transaction. Refs separate identity from value. Creating a Ref --------------- +^^^^^^^^^^^^^^ You can create a Ref with or without an initial value. @@ -72,7 +78,7 @@ You can create a Ref with or without an initial value. final Ref ref = new Ref(); Accessing the value of a Ref ----------------------------- +^^^^^^^^^^^^^^^^^^^^^^^^^^^^ Use ``get`` to access the value of a Ref. Note that if no initial value has been given then the value is initially ``null``. @@ -90,7 +96,7 @@ Use ``get`` to access the value of a Ref. Note that if no initial value has been // -> value = 0 Changing the value of a Ref ---------------------------- +^^^^^^^^^^^^^^^^^^^^^^^^^^^ To set a new value for a Ref you can use ``set`` (or equivalently ``swap``), which sets the new value and returns the old value. @@ -106,10 +112,9 @@ To set a new value for a Ref you can use ``set`` (or equivalently ``swap``), whi } }.execute(); ----- Transactions -============ +------------ A transaction is delimited using an ``Atomic`` anonymous inner class. @@ -124,24 +129,24 @@ A transaction is delimited using an ``Atomic`` anonymous inner class. All changes made to transactional objects are isolated from other changes, all make it or non make it (so failure atomicity) and are consistent. With the AkkaSTM you automatically have the Oracle version of the SERIALIZED isolation level, lower isolation is not possible. To make it fully serialized, set the writeskew property that checks if a writeskew problem is allowed to happen. Retries -------- +^^^^^^^ A transaction is automatically retried when it runs into some read or write conflict, until the operation completes, an exception (throwable) is thrown or when there are too many retries. When a read or writeconflict is encountered, the transaction uses a bounded exponential backoff to prevent cause more contention and give other transactions some room to complete. If you are using non transactional resources in an atomic block, there could be problems because a transaction can be retried. If you are using print statements or logging, it could be that they are called more than once. So you need to be prepared to deal with this. One of the possible solutions is to work with a deferred or compensating task that is executed after the transaction aborts or commits. Unexpected retries ------------------- +^^^^^^^^^^^^^^^^^^ It can happen for the first few executions that you get a few failures of execution that lead to unexpected retries, even though there is not any read or writeconflict. The cause of this is that speculative transaction configuration/selection is used. There are transactions optimized for a single transactional object, for 1..n and for n to unlimited. So based on the execution of the transaction, the system learns; it begins with a cheap one and upgrades to more expensive ones. Once it has learned, it will reuse this knowledge. It can be activated/deactivated using the speculative property on the TransactionFactoryBuilder. In most cases it is best use the default value (enabled) so you get more out of performance. Coordinated transactions and Transactors ----------------------------------------- +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ -If you need coordinated transactions across actors or threads then see `Transactors `_. +If you need coordinated transactions across actors or threads then see :ref:`transactors-java`. Configuring transactions ------------------------- +^^^^^^^^^^^^^^^^^^^^^^^^ It's possible to configure transactions. The ``Atomic`` class can take a ``TransactionFactory``, which can determine properties of the transaction. A default transaction factory is used if none is specified. You can create a ``TransactionFactory`` with a ``TransactionFactoryBuilder``. @@ -163,37 +168,40 @@ Configuring transactions with a ``TransactionFactory``: }.execute(); The following settings are possible on a TransactionFactory: -* familyName - Family name for transactions. Useful for debugging because the familyName is shown in exceptions, logging and in the future also will be used for profiling. -* readonly - Sets transaction as readonly. Readonly transactions are cheaper and can be used to prevent modification to transactional objects. -* maxRetries - The maximum number of times a transaction will retry. -* timeout - The maximum time a transaction will block for. -* trackReads - Whether all reads should be tracked. Needed for blocking operations. Readtracking makes a transaction more expensive, but makes subsequent reads cheaper and also lowers the chance of a readconflict. -* writeSkew - Whether writeskew is allowed. Disable with care. -* blockingAllowed - Whether explicit retries are allowed. -* interruptible - Whether a blocking transaction can be interrupted if it is blocked. -* speculative - Whether speculative configuration should be enabled. -* quickRelease - Whether locks should be released as quickly as possible (before whole commit). -* propagation - For controlling how nested transactions behave. -* traceLevel - Transaction trace level. + +- familyName - Family name for transactions. Useful for debugging because the familyName is shown in exceptions, logging and in the future also will be used for profiling. +- readonly - Sets transaction as readonly. Readonly transactions are cheaper and can be used to prevent modification to transactional objects. +- maxRetries - The maximum number of times a transaction will retry. +- timeout - The maximum time a transaction will block for. +- trackReads - Whether all reads should be tracked. Needed for blocking operations. Readtracking makes a transaction more expensive, but makes subsequent reads cheaper and also lowers the chance of a readconflict. +- writeSkew - Whether writeskew is allowed. Disable with care. +- blockingAllowed - Whether explicit retries are allowed. +- interruptible - Whether a blocking transaction can be interrupted if it is blocked. +- speculative - Whether speculative configuration should be enabled. +- quickRelease - Whether locks should be released as quickly as possible (before whole commit). +- propagation - For controlling how nested transactions behave. +- traceLevel - Transaction trace level. You can also specify the default values for some of these options in akka.conf. Here they are with their default values: :: stm { - max-retries = 1000 - timeout = 10 - write-skew = true + fair = on # Should global transactions be fair or non-fair (non fair yield better performance) + max-retries = 1000 + timeout = 5 # Default timeout for blocking transactions and transaction set (in unit defined by + # the time-unit property) + write-skew = true blocking-allowed = false - interruptible = false - speculative = true - quick-release = true - propagation = requires - trace-level = none + interruptible = false + speculative = true + quick-release = true + propagation = "requires" + trace-level = "none" } Transaction lifecycle listeners -------------------------------- +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ It's possible to have code that will only run on the successful commit of a transaction, or when a transaction aborts. You can do this by adding ``deferred`` or ``compensating`` blocks to a transaction. @@ -221,7 +229,7 @@ It's possible to have code that will only run on the successful commit of a tran }.execute(); Blocking transactions ---------------------- +^^^^^^^^^^^^^^^^^^^^^ You can block in a transaction until a condition is met by using an explicit ``retry``. To use ``retry`` you also need to configure the transaction to allow explicit retries. @@ -232,15 +240,19 @@ Here is an example of using ``retry`` to block until an account has enough money import akka.stm.*; public class Transfer { - public Ref from; - public Ref to; - public double amount; + private final Ref from; + private final Ref to; + private final double amount; - public Transfer(Ref from, Ref to, double amount) { - this.from = from; - this.to = to; - this.amount = amount; - } + public Transfer(Ref from, Ref to, double amount) { + this.from = from; + this.to = to; + this.amount = amount; + } + + public Ref getFrom() { return from; } + public Ref getTo() { return to; } + public double getAmount() { return amount; } } .. code-block:: java @@ -250,6 +262,7 @@ Here is an example of using ``retry`` to block until an account has enough money import akka.actor.*; import akka.util.FiniteDuration; import java.util.concurrent.TimeUnit; + import akka.event.EventHandler; public class Transferer extends UntypedActor { TransactionFactory txFactory = new TransactionFactoryBuilder() @@ -261,16 +274,16 @@ Here is an example of using ``retry`` to block until an account has enough money public void onReceive(Object message) throws Exception { if (message instanceof Transfer) { Transfer transfer = (Transfer) message; - final Ref from = transfer.from; - final Ref to = transfer.to; - final double amount = transfer.amount; + final Ref from = transfer.getFrom(); + final Ref to = transfer.getTo(); + final double amount = transfer.getAmount(); new Atomic(txFactory) { public Object atomically() { if (from.get() < amount) { - System.out.println("Transferer: not enough money - retrying"); + EventHandler.info(this, "not enough money - retrying"); retry(); } - System.out.println("Transferer: transferring"); + EventHandler.info(this, "transferring"); from.set(from.get() - amount); to.set(to.get() + amount); return null; @@ -285,43 +298,51 @@ Here is an example of using ``retry`` to block until an account has enough money import akka.stm.*; import akka.actor.*; - final Ref account1 = new Ref(100.0); - final Ref account2 = new Ref(100.0); + public class Main { + public static void main(String...args) throws Exception { + final Ref account1 = new Ref(100.0); + final Ref account2 = new Ref(100.0); - ActorRef transferer = Actors.actorOf(Transferer.class).start(); + ActorRef transferer = Actors.actorOf(Transferer.class).start(); - transferer.sendOneWay(new Transfer(account1, account2, 500.0)); - // Transferer: not enough money - retrying + transferer.sendOneWay(new Transfer(account1, account2, 500.0)); + // Transferer: not enough money - retrying - new Atomic() { - public Object atomically() { - return account1.set(account1.get() + 2000); - } - }.execute(); - // Transferer: transferring + new Atomic() { + public Object atomically() { + return account1.set(account1.get() + 2000); + } + }.execute(); + // Transferer: transferring - Double acc1 = new Atomic() { - public Double atomically() { - return account1.get(); - } - }.execute(); + Thread.sleep(1000); - Double acc2 = new Atomic() { - public Double atomically() { - return account2.get(); - } - }.execute(); + Double acc1 = new Atomic() { + public Double atomically() { + return account1.get(); + } + }.execute(); - System.out.println("Account 1: " + acc1); - // Account 1: 1600.0 + Double acc2 = new Atomic() { + public Double atomically() { + return account2.get(); + } + }.execute(); - System.out.println("Account 2: " + acc2); - // Account 2: 600.0 - transferer.stop(); + + System.out.println("Account 1: " + acc1); + // Account 1: 1600.0 + + System.out.println("Account 2: " + acc2); + // Account 2: 600.0 + + transferer.stop(); + } + } Alternative blocking transactions ---------------------------------- +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ You can also have two alternative blocking transactions, one of which can succeed first, with ``EitherOrElse``. @@ -330,24 +351,31 @@ You can also have two alternative blocking transactions, one of which can succee import akka.stm.*; public class Branch { - public Ref left; - public Ref right; - public int amount; + private final Ref left; + private final Ref right; + private final double amount; - public Branch(Ref left, Ref right, int amount) { - this.left = left; - this.right = right; - this.amount = amount; - } + public Branch(Ref left, Ref right, int amount) { + this.left = left; + this.right = right; + this.amount = amount; + } + + public Ref getLeft() { return left; } + + public Ref getRight() { return right; } + + public double getAmount() { return amount; } } .. code-block:: java + import akka.actor.*; import akka.stm.*; import static akka.stm.StmUtils.retry; - import akka.actor.*; import akka.util.FiniteDuration; import java.util.concurrent.TimeUnit; + import akka.event.EventHandler; public class Brancher extends UntypedActor { TransactionFactory txFactory = new TransactionFactoryBuilder() @@ -359,26 +387,26 @@ You can also have two alternative blocking transactions, one of which can succee public void onReceive(Object message) throws Exception { if (message instanceof Branch) { Branch branch = (Branch) message; - final Ref left = branch.left; - final Ref right = branch.right; - final double amount = branch.amount; + final Ref left = branch.getLeft(); + final Ref right = branch.getRight(); + final double amount = branch.getAmount(); new Atomic(txFactory) { public Integer atomically() { return new EitherOrElse() { public Integer either() { if (left.get() < amount) { - System.out.println("not enough on left - retrying"); + EventHandler.info(this, "not enough on left - retrying"); retry(); } - System.out.println("going left"); + EventHandler.info(this, "going left"); return left.get(); } public Integer orElse() { if (right.get() < amount) { - System.out.println("not enough on right - retrying"); + EventHandler.info(this, "not enough on right - retrying"); retry(); } - System.out.println("going right"); + EventHandler.info(this, "going right"); return right.get(); } }.execute(); @@ -393,32 +421,40 @@ You can also have two alternative blocking transactions, one of which can succee import akka.stm.*; import akka.actor.*; - final Ref left = new Ref(100); - final Ref right = new Ref(100); + public class Main2 { + public static void main(String...args) throws Exception { + final Ref left = new Ref(100); + final Ref right = new Ref(100); - ActorRef brancher = Actors.actorOf(Brancher.class).start(); + ActorRef brancher = Actors.actorOf(Brancher.class).start(); - brancher.sendOneWay(new Branch(left, right, 500)); - // not enough on left - retrying - // not enough on right - retrying + brancher.sendOneWay(new Branch(left, right, 500)); + // not enough on left - retrying + // not enough on right - retrying - new Atomic() { - public Object atomically() { - return right.set(right.get() + 1000); - } - }.execute(); - // going right + Thread.sleep(1000); - brancher.stop(); + new Atomic() { + public Object atomically() { + return right.set(right.get() + 1000); + } + }.execute(); + // going right + + + + brancher.stop(); + } + } ----- Transactional datastructures -============================ +---------------------------- Akka provides two datastructures that are managed by the STM. -* TransactionalMap -* TransactionalVector + +- TransactionalMap +- TransactionalVector TransactionalMap and TransactionalVector look like regular mutable datastructures, they even implement the standard Scala 'Map' and 'RandomAccessSeq' interfaces, but they are implemented using persistent datastructures and managed references under the hood. Therefore they are safe to use in a concurrent environment. Underlying TransactionalMap is HashMap, an immutable Map but with near constant time access and modification operations. Similarly TransactionalVector uses a persistent Vector. See the Persistent Datastructures section below for more details. @@ -477,46 +513,19 @@ Here is an example of creating and accessing a TransactionalVector: } }.execute(); ----- Persistent datastructures -========================= +------------------------- Akka's STM should only be used with immutable data. This can be costly if you have large datastructures and are using a naive copy-on-write. In order to make working with immutable datastructures fast enough Scala provides what are called Persistent Datastructures. There are currently two different ones: -* HashMap (`scaladoc `_) -* Vector (`scaladoc `_) + +- HashMap (`scaladoc `__) +- Vector (`scaladoc `__) They are immutable and each update creates a completely new version but they are using clever structural sharing in order to make them almost as fast, for both read and update, as regular mutable datastructures. This illustration is taken from Rich Hickey's presentation. Copyright Rich Hickey 2009. -``_ +.. image:: ../images/clojure-trees.png ----- -JTA integration -=============== - -The STM has JTA (Java Transaction API) integration. This means that it will, if enabled, hook in to JTA and start a JTA transaction when the STM transaction is started. It will also rollback the STM transaction if the JTA transaction has failed and vice versa. This does not mean that the STM is made durable, if you need that you should use one of the `persistence modules `_. It simply means that the STM will participate and interact with and external JTA provider, for example send a message using JMS atomically within an STM transaction, or use Hibernate to persist STM managed data etc. - -Akka also has an API for using JTA explicitly. Read the `section on JTA `_ for details. - -You can enable JTA support in the 'stm' section in the config: - -:: - - stm { - jta-aware = off # 'on' means that if there JTA Transaction Manager available then the STM will - # begin (or join), commit or rollback the JTA transaction. Default is 'off'. - } - -You also have to configure which JTA provider to use etc in the 'jta' config section: - -``_ - jta { - provider = "from-jndi" # Options: "from-jndi" (means that Akka will try to detect a TransactionManager in the JNDI) - # "atomikos" (means that Akka will use the Atomikos based JTA impl in 'akka-jta', - # e.g. you need the akka-jta JARs on classpath). - timeout = 60 - } -``_ diff --git a/akka-docs/pending/transactors-java.rst b/akka-docs/java/transactors.rst similarity index 84% rename from akka-docs/pending/transactors-java.rst rename to akka-docs/java/transactors.rst index 9cc4d522f4..b724ef89b6 100644 --- a/akka-docs/pending/transactors-java.rst +++ b/akka-docs/java/transactors.rst @@ -1,10 +1,16 @@ -**Transactors (Java)** -============================================================ +.. _transactors-java: + +Transactors (Java) +================== + +.. sidebar:: Contents + + .. contents:: :local: Module stability: **SOLID** Why Transactors? -================ +---------------- Actors are excellent for solving problems where you have many independent processes that can work in isolation and only interact with other Actors through message passing. This model fits many problems. But the actor model is unfortunately a terrible model for implementing truly shared state. E.g. when you need to have consensus and a stable view of state across many components. The classic example is the bank account where clients can deposit and withdraw, in which each operation needs to be atomic. For detailed discussion on the topic see `this JavaOne presentation `_. @@ -15,21 +21,21 @@ Akka's Transactors combine Actors and STM to provide the best of the Actor model If you need Durability then you should not use one of the in-memory data structures but one of the persistent ones. Generally, the STM is not needed very often when working with Akka. Some use-cases (that we can think of) are: -# When you really need composable message flows across many actors updating their **internal local** state but need them to do that atomically in one big transaction. Might not often, but when you do need this then you are screwed without it. -# When you want to share a datastructure across actors. -# When you need to use the persistence modules. + +- When you really need composable message flows across many actors updating their **internal local** state but need them to do that atomically in one big transaction. Might not often, but when you do need this then you are screwed without it. +- When you want to share a datastructure across actors. +- When you need to use the persistence modules. Actors and STM --------------- +^^^^^^^^^^^^^^ You can combine Actors and STM in several ways. An Actor may use STM internally so that particular changes are guaranteed to be atomic. Actors may also share transactional datastructures as the STM provides safe shared state across threads. It's also possible to coordinate transactions across Actors or threads so that either the transactions in a set all commit successfully or they all fail. This is the focus of Transactors and the explicit support for coordinated transactions in this section. ----- Coordinated transactions -======================== +------------------------ Akka provides an explicit mechanism for coordinating transactions across actors. Under the hood it uses a ``CountDownCommitBarrier``, similar to a CountDownLatch. @@ -40,9 +46,11 @@ Here is an example of coordinating two simple counter UntypedActors so that they import akka.actor.ActorRef; public class Increment { - private ActorRef friend = null; + private final ActorRef friend; - public Increment() {} + public Increment() { + this.friend = null; + } public Increment(ActorRef friend) { this.friend = friend; @@ -59,9 +67,7 @@ Here is an example of coordinating two simple counter UntypedActors so that they .. code-block:: java - import akka.actor.ActorRef; import akka.actor.UntypedActor; - import static akka.actor.Actors.*; import akka.stm.Ref; import akka.transactor.Atomically; import akka.transactor.Coordinated; @@ -88,11 +94,8 @@ Here is an example of coordinating two simple counter UntypedActors so that they } }); } - } else if (incoming instanceof String) { - String message = (String) incoming; - if (message.equals("GetCount")) { - getContext().replyUnsafe(count.get()); - } + } else if (incoming.equals("GetCount")) { + getContext().replyUnsafe(count.get()); } } } @@ -104,7 +107,7 @@ Here is an example of coordinating two simple counter UntypedActors so that they counter1.sendOneWay(new Coordinated(new Increment(counter2))); -To start a new coordinated transaction set that you will also participate in, just create a ``Coordinated`` object: +To start a new coordinated transaction that you will also participate in, just create a ``Coordinated`` object: .. code-block:: java @@ -116,7 +119,7 @@ To start a coordinated transaction that you won't participate in yourself you ca actor.sendOneWay(new Coordinated(new Message())); -To include another actor in the same coordinated transaction set that you've created or received, use the ``coordinate`` method on that object. This will increment the number of parties involved by one and create a new ``Coordinated`` object to be sent. +To include another actor in the same coordinated transaction that you've created or received, use the ``coordinate`` method on that object. This will increment the number of parties involved by one and create a new ``Coordinated`` object to be sent. .. code-block:: java @@ -134,10 +137,9 @@ To enter the coordinated transaction use the atomic method of the coordinated ob The coordinated transaction will wait for the other transactions before committing. If any of the coordinated transactions fail then they all fail. ----- UntypedTransactor -================= +----------------- UntypedTransactors are untyped actors that provide a general pattern for coordinating transactions, using the explicit coordination described above. @@ -146,10 +148,12 @@ Here's an example of a simple untyped transactor that will join a coordinated tr .. code-block:: java import akka.transactor.UntypedTransactor; + import akka.stm.Ref; public class Counter extends UntypedTransactor { Ref count = new Ref(0); + @Override public void atomically(Object message) { if (message instanceof Increment) { count.set(count.get() + 1); @@ -174,7 +178,8 @@ Example of coordinating an increment, similar to the explicitly coordinated exam public class Counter extends UntypedTransactor { Ref count = new Ref(0); - @Override public Set coordinate(Object message) { + @Override + public Set coordinate(Object message) { if (message instanceof Increment) { Increment increment = (Increment) message; if (increment.hasFriend()) @@ -183,6 +188,7 @@ Example of coordinating an increment, similar to the explicitly coordinated exam return nobody(); } + @Override public void atomically(Object message) { if (message instanceof Increment) { count.set(count.get() + 1); @@ -190,14 +196,13 @@ Example of coordinating an increment, similar to the explicitly coordinated exam } } -To execute directly before or after the coordinated transaction, override the ``before`` and ``after`` methods. These methods also expect partial functions like the receive method. They do not execute within the transaction. +To execute directly before or after the coordinated transaction, override the ``before`` and ``after`` methods. They do not execute within the transaction. To completely bypass coordinated transactions override the ``normally`` method. Any message matched by ``normally`` will not be matched by the other methods, and will not be involved in coordinated transactions. In this method you can implement normal actor behavior, or use the normal STM atomic for local transactions. ----- Coordinating Typed Actors -========================= +------------------------- It's also possible to use coordinated transactions with typed actors. You can explicitly pass around ``Coordinated`` objects, or use built-in support with the ``@Coordinated`` annotation and the ``Coordination.coordinate`` method. @@ -249,17 +254,18 @@ Here's an example of using ``@Coordinated`` with a TypedActor to coordinate incr } } -``_ -Counter counter1 = (Counter) TypedActor.newInstance(Counter.class, CounterImpl.class); -Counter counter2 = (Counter) TypedActor.newInstance(Counter.class, CounterImpl.class); +.. code-block:: java -Coordination.coordinate(true, new Atomically() { + Counter counter1 = (Counter) TypedActor.newInstance(Counter.class, CounterImpl.class); + Counter counter2 = (Counter) TypedActor.newInstance(Counter.class, CounterImpl.class); + + Coordination.coordinate(true, new Atomically() { public void atomically() { - counter1.increment(); - counter2.increment(); + counter1.increment(); + counter2.increment(); } -}); + }); + + TypedActor.stop(counter1); + TypedActor.stop(counter2); -TypedActor.stop(counter1); -TypedActor.stop(counter2); -``_ diff --git a/akka-docs/pending/typed-actors-java.rst b/akka-docs/java/typed-actors.rst similarity index 93% rename from akka-docs/pending/typed-actors-java.rst rename to akka-docs/java/typed-actors.rst index 0f6c9563b5..8de961e515 100644 --- a/akka-docs/pending/typed-actors-java.rst +++ b/akka-docs/java/typed-actors.rst @@ -1,9 +1,13 @@ Typed Actors (Java) =================== +.. sidebar:: Contents + + .. contents:: :local: + Module stability: **SOLID** -The Typed Actors are implemented through `Typed Actors `_. It uses AOP through `AspectWerkz `_ to turn regular POJOs into asynchronous non-blocking Actors with semantics of the Actor Model. E.g. each message dispatch is turned into a message that is put on a queue to be processed by the Typed Actor sequentially one by one. +The Typed Actors are implemented through `Typed Actors `_. It uses AOP through `AspectWerkz `_ to turn regular POJOs into asynchronous non-blocking Actors with semantics of the Actor Model. Each method dispatch is turned into a message that is put on a queue to be processed by the Typed Actor sequentially one by one. If you are using the `Spring Framework `_ then take a look at Akka's `Spring integration `_. @@ -50,8 +54,8 @@ Then you can create an Typed Actor out of it by creating it through the 'TypedAc (RegistrationService) TypedActor.newInstance(RegistrationService.class, RegistrationServiceImpl.class, 1000); // The last parameter defines the timeout for Future calls -**Creating Typed Actors with non-default constructor** -^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ +Creating Typed Actors with non-default constructor +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ To create a typed actor that takes constructor arguments use a variant of 'newInstance' or 'newRemoteInstance' that takes an instance of a 'TypedActorFactory' in which you can create the TypedActor in any way you like. If you use this method then make sure that no one can get a reference to the actor instance. Touching actor state directly is bypassing the whole actor dispatching mechanism and create race conditions which can lead to corrupt data. @@ -80,7 +84,7 @@ Using a configuration object: RegistrationService service = (RegistrationService) TypedActor.newInstance(RegistrationService.class, config); -However, often you will not use these factory methods but declaratively define the Typed Actors as part of a supervisor hierarchy. More on that in the `Fault Tolerance `_ section. +However, often you will not use these factory methods but declaratively define the Typed Actors as part of a supervisor hierarchy. More on that in the :ref:`fault-tolerance-java` section. Sending messages ---------------- @@ -140,7 +144,7 @@ Here is an example: Stopping Typed Actors --------------------- -Once Typed Actors have been created with one of the TypedActor.newInstance methods they need to be stopped with TypedActor.stop to free resources allocated by the created Typed Actor (this is not needed when the Typed Actor is `supervised `_). +Once Typed Actors have been created with one of the TypedActor.newInstance methods they need to be stopped with TypedActor.stop to free resources allocated by the created Typed Actor (this is not needed when the Typed Actor is supervised). .. code-block:: java @@ -152,7 +156,7 @@ Once Typed Actors have been created with one of the TypedActor.newInstance metho // Free Typed Actor resources TypedActor.stop(service); -When the Typed Actor defines a `shutdown callback `_ method it will be invoked on TypedActor.stop. +When the Typed Actor defines a shutdown callback method (:ref:`fault-tolerance-java`) it will be invoked on TypedActor.stop. How to use the TypedActorContext for runtime information access --------------------------------------------------------------- @@ -171,7 +175,7 @@ Here is an example how you can use it to in a 'void' (e.g. fire-forget) method t } } -If the sender, sender future etc. is not available, then these methods will return 'null' so you should have a way of dealing with scenario. +If the sender, sender future etc. is not available, then these methods will return 'null' so you should have a way of dealing with that scenario. Messages and immutability ------------------------- @@ -189,3 +193,4 @@ Akka can help you in this regard. It allows you to turn on an option for seriali } This will make a deep clone (using Java serialization) of all parameters. + diff --git a/akka-docs/pending/untyped-actors-java.rst b/akka-docs/java/untyped-actors.rst similarity index 97% rename from akka-docs/pending/untyped-actors-java.rst rename to akka-docs/java/untyped-actors.rst index 35e97011af..c03dd8e50c 100644 --- a/akka-docs/pending/untyped-actors-java.rst +++ b/akka-docs/java/untyped-actors.rst @@ -1,7 +1,11 @@ +.. _untyped-actors-java: + Actors (Java) ============= -= +.. sidebar:: Contents + + .. contents:: :local: Module stability: **SOLID** @@ -16,11 +20,15 @@ Here is an example: .. code-block:: java + import akka.actor.UntypedActor; + import akka.event.EventHandler; + public class SampleUntypedActor extends UntypedActor { public void onReceive(Object message) throws Exception { if (message instanceof String) - EventHandler.info(this, String.format("Received String message: %s", message)); + EventHandler.info(this, String.format("Received String message: %s", + message)); else throw new IllegalArgumentException("Unknown message: " + message); } @@ -219,6 +227,27 @@ Here is an example: Reply to messages ----------------- +Reply using the channel +^^^^^^^^^^^^^^^^^^^^^^^ + +If you want to have a handle to an object to whom you can reply to the message, you can use the Channel abstraction. +Simply call getContext().channel() and then you can forward that to others, store it away or otherwise until you want to reply, +which you do by Channel.sendOneWay(msg) + +.. code-block:: java + + public void onReceive(Object message) throws Exception { + if (message instanceof String) { + String msg = (String)message; + if (msg.equals("Hello") && getContext().getSenderFuture().isDefined()) { + // Reply to original sender of message using the channel + getContext().channel().sendOneWay(msg + " from " + getContext().getUuid()); + } + } + } + +We recommend that you as first choice use the channel abstraction instead of the other ways described in the following sections. + Reply using the 'replySafe' and 'replyUnsafe' methods ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ @@ -299,24 +328,6 @@ Here is an example of how it can be used: } } -Reply using the channel -^^^^^^^^^^^^^^^^^^^^^^^ - -If you want to have a handle to an object to whom you can reply to the message, you can use the Channel abstraction. -Simply call getContext().channel() and then you can forward that to others, store it away or otherwise until you want to reply, -which you do by Channel.sendOneWay(msg) - -.. code-block:: java - - public void onReceive(Object message) throws Exception { - if (message instanceof String) { - String msg = (String)message; - if (msg.equals("Hello") && getContext().getSenderFuture().isDefined()) { - // Reply to original sender of message using the channel - getContext().channel().sendOneWay(msg + " from " + getContext().getUuid()); - } - } - } Summary of reply semantics and options ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ @@ -409,8 +420,9 @@ Actor life-cycle The actor has a well-defined non-circular life-cycle. -``_ -NEW (newly created actor) - can't receive messages (yet) - => STARTED (when 'start' is invoked) - can receive messages - => SHUT DOWN (when 'exit' or 'stop' is invoked) - can't do anything -``_ +:: + + NEW (newly created actor) - can't receive messages (yet) + => STARTED (when 'start' is invoked) - can receive messages + => SHUT DOWN (when 'exit' or 'stop' is invoked) - can't do anything + diff --git a/akka-docs/pending/Feature Stability Matrix.rst b/akka-docs/pending/Feature Stability Matrix.rst deleted file mode 100644 index cdbd6b3ad9..0000000000 --- a/akka-docs/pending/Feature Stability Matrix.rst +++ /dev/null @@ -1,31 +0,0 @@ -Feature Stability Matrix -======================== - -Akka is comprised of a number if modules, with different levels of maturity and in different parts of their lifecycle, the matrix below gives you get current stability level of the modules. - -Explanation of the different levels of stability ------------------------------------------------- - -* **Solid** - Proven solid in heavy production usage -* **Stable** - Ready for use in production environment -* **In progress** - Not enough feedback/use to claim it's ready for production use - -||~ Feature ||~ Solid ||~ Stable ||~ In progress || -||= ====`Actors (Scala) `_ ==== ||= Solid ||= ||= || -||= ====`Actors (Java) `_ ==== ||= Solid ||= ||= || -||= ====` Typed Actors (Scala) `_ ==== ||= Solid ||= ||= || -||= ====` Typed Actors (Java) `_ ==== ||= Solid ||= ||= || -||= ====`STM (Scala) `_ ==== ||= Solid ||= ||= || -||= ====`STM (Java) `_ ==== ||= Solid ||= ||= || -||= ====`Transactors (Scala) `_ ==== ||= Solid ||= ||= || -||= ====`Transactors (Java) `_ ==== ||= Solid ||= ||= || -||= ====`Remote Actors (Scala) `_ ==== ||= Solid ||= ||= || -||= ====`Remote Actors (Java) `_ ==== ||= Solid ||= ||= || -||= ====`Camel `_ ==== ||= Solid ||= ||= || -||= ====`AMQP `_ ==== ||= Solid ||= ||= || -||= ====`HTTP `_ ==== ||= Solid ||= ||= || -||= ====`Integration Guice `_ ==== ||= ||= Stable ||= || -||= ====`Integration Spring `_ ==== ||= ||= Stable ||= || -||= ====`JTA `_ ==== ||= ||= Stable ||= || -||= ====`Scheduler `_ ==== ||= Solid ||= ||= || -||= ====`Redis Pub Sub `_ ==== ||= ||= ||= In progress || diff --git a/akka-docs/pending/Home.rst b/akka-docs/pending/Home.rst deleted file mode 100644 index 73c9f31172..0000000000 --- a/akka-docs/pending/Home.rst +++ /dev/null @@ -1,60 +0,0 @@ -Akka -==== - -**Simpler Scalability, Fault-Tolerance, Concurrency & Remoting through Actors** - -We believe that writing correct concurrent, fault-tolerant and scalable applications is too hard. Most of the time it's because we are using the wrong tools and the wrong level of abstraction. Akka is here to change that. Using the Actor Model together with Software Transactional Memory we raise the abstraction level and provide a better platform to build correct concurrent and scalable applications. For fault-tolerance we adopt the "Let it crash" / "Embrace failure" model which have been used with great success in the telecom industry to build applications that self-heals, systems that never stop. Actors also provides the abstraction for transparent distribution and the basis for truly scalable and fault-tolerant applications. Akka is Open Source and available under the Apache 2 License. - -Akka is split up into two different parts: -* Akka - Reflects all the sections under 'Scala API' and 'Java API' in the navigation bar. -* Akka Modules - Reflects all the sections under 'Add-on modules' in the navigation bar. - -Download from ``_ - -News: Akka 1.0 final is released --------------------------------- - -1.0 documentation ------------------ - -This documentation covers the latest release ready code in 'master' branch in the repository. -If you want the documentation for the 1.0 release you can find it `here `_. - -You can watch the recording of the `Akka talk at JFokus in Feb 2011 `_. - -``_ - -**Akka implements a unique hybrid of:** -^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ - -* `Actors `_, which gives you: - * Simple and high-level abstractions for concurrency and parallelism. - * Asynchronous, non-blocking and highly performant event-driven programming model. - * Very lightweight event-driven processes (create ~6.5 million actors on 4 G RAM). -* `Failure management `_ through supervisor hierarchies with `let-it-crash `_ semantics. Excellent for writing highly fault-tolerant systems that never stop, systems that self-heal. -* `Software Transactional Memory `_ (STM). (Distributed transactions coming soon). -* `Transactors `_: combine actors and STM into transactional actors. Allows you to compose atomic message flows with automatic retry and rollback. -* `Remote actors `_: highly performant distributed actors with remote supervision and error management. -* Java and Scala API. - -**Akka also has a set of add-on modules:** -^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ - -* `Camel `_: Expose actors as Apache Camel endpoints. -* `Spring `_: Wire up typed actors in the Spring config using Akka's namespace. -* `REST `_ (JAX-RS): Expose actors as REST services. -* `OSGi `_: Akka and all its dependency is OSGi enabled. -* `Mist `_: Expose actors as asynchronous HTTP services. -* `Security `_: Basic, Digest and Kerberos based security. -* `Microkernel `_: Run Akka as a stand-alone self-hosted kernel. -* `FSM `_: Finite State Machine support. -* `JTA `_: Let the STM interoperate with other transactional resources. -* `Pub/Sub `_: Publish-Subscribe across remote nodes. - -**Akka can be used in two different ways:** -^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ - -* As a library: used by a web app, to be put into ‘WEB-INF/lib’ or as a regular JAR on your classpath. -* As a microkernel: stand-alone kernel, embedding a servlet container and all the other modules. - -See the `Use-case and Deployment Scenarios `_ for details. diff --git a/akka-docs/pending/benchmarks.rst b/akka-docs/pending/benchmarks.rst deleted file mode 100644 index 6352040d32..0000000000 --- a/akka-docs/pending/benchmarks.rst +++ /dev/null @@ -1,31 +0,0 @@ -Benchmarks -========== - -Scalability, Throughput and Latency benchmark ---------------------------------------------- - -``_ - -Simple Trading system. -* `Here is the result with some graphs `_ -* `Here is the article `_ -* `Here is the code `_ - -Compares: -* Synchronous Scala solution -* Scala library Actors -** Fire-forget -** Request-reply -* Akka -** Request-reply -** Fire-forget with default dispatcher -** Fire-forget with Hawt dispatcher - -Performance benchmark ---------------------- - -Benchmarking Akka against: -* Scala Library Actors -* Raw Java concurrency -* Jetlang (Java actors lib) -``_ diff --git a/akka-docs/pending/buildr.rst b/akka-docs/pending/buildr.rst deleted file mode 100644 index a684463270..0000000000 --- a/akka-docs/pending/buildr.rst +++ /dev/null @@ -1,55 +0,0 @@ -Using Akka in a Buildr project -============================== - -This is an example on how to use Akka in a project based on Buildr - -.. code-block:: ruby - - require 'buildr/scala' - - VERSION_NUMBER = "0.6" - GROUP = "se.scalablesolutions.akka" - - repositories.remote << "http://www.ibiblio.org/maven2/" - repositories.remote << "http://www.lag.net/repo" - repositories.remote << "http://multiverse.googlecode.com/svn/maven-repository/releases" - - AKKA = group('akka-core', 'akka-comet', 'akka-util','akka-kernel', 'akka-rest', 'akka-util-java', - 'akka-security','akka-persistence-common', 'akka-persistence-redis', - 'akka-amqp', - :under=> 'se.scalablesolutions.akka', - :version => '0.6') - ASPECTJ = "org.codehaus.aspectwerkz:aspectwerkz-nodeps-jdk5:jar:2.1" - SBINARY = "sbinary:sbinary:jar:0.3" - COMMONS_IO = "commons-io:commons-io:jar:1.4" - CONFIGGY = "net.lag:configgy:jar:1.4.7" - JACKSON = group('jackson-core-asl', 'jackson-mapper-asl', - :under=> 'org.codehaus.jackson', - :version => '1.2.1') - MULTIVERSE = "org.multiverse:multiverse-alpha:jar:jar-with-dependencies:0.3" - NETTY = "org.jboss.netty:netty:jar:3.2.0.ALPHA2" - PROTOBUF = "com.google.protobuf:protobuf-java:jar:2.2.0" - REDIS = "com.redis:redisclient:jar:1.0.1" - SJSON = "sjson.json:sjson:jar:0.3" - - Project.local_task "run" - - desc "Akka Chat Sample Module" - define "akka-sample-chat" do - project.version = VERSION_NUMBER - project.group = GROUP - - compile.with AKKA, CONFIGGY - - p artifact(MULTIVERSE).to_s - - package(:jar) - - task "run" do - Java.java "scala.tools.nsc.MainGenericRunner", - :classpath => [ compile.dependencies, compile.target, - ASPECTJ, COMMONS_IO, JACKSON, NETTY, MULTIVERSE, PROTOBUF, REDIS, - SBINARY, SJSON], - :java_args => ["-server"] - end - end diff --git a/akka-docs/pending/deployment-scenarios.rst b/akka-docs/pending/deployment-scenarios.rst deleted file mode 100644 index 9c67cda10d..0000000000 --- a/akka-docs/pending/deployment-scenarios.rst +++ /dev/null @@ -1,100 +0,0 @@ - - -Use-case and Deployment Scenarios -================================= - -= - -How and in which use-case and deployment scenarios can I use Akka? -================================================================== - -Akka can be used in two different ways: -* As a library: used as a regular JAR on the classpath and/or in a web app, to be put into ‘WEB-INF/lib’ -* As a microkernel: stand-alone microkernel, embedding a servlet container along with many other services. - -Using Akka as library ---------------------- - -This is most likely what you want if you are building Web applications. -There are several ways you can use Akka in Library mode by adding more and more modules to the stack. - -Actors as services -^^^^^^^^^^^^^^^^^^ - -The simplest way you can use Akka is to use the actors as services in your Web application. All that’s needed to do that is to put the Akka charts as well as its dependency jars into ‘WEB-INF/lib’. You also need to put the ‘akka.conf’ config file in the ‘$AKKA_HOME/config’ directory. -Now you can create your Actors as regular services referenced from your Web application. You should also be able to use the Remoting service, e.g. be able to make certain Actors remote on other hosts. Please note that remoting service does not speak HTTP over port 80, but a custom protocol over the port is specified in ‘akka.conf’. -``_ - -^ - -Actors as services with Software Transactional Memory (STM) -^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ - -As in the above, but with the addition of using the STM module to allow transactional memory across many Actors (no persistence, just in-memory). -``_ - -^ - -Actors as services with Persistence module as cache -^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ - -As in the above, but with the addition of using the Persistence module to allow transactional persistent cache. This use case scenario you would still use a regular relational database (RDBMS) but use Akka’s transactional persistent storage as a performant scalable cache alongside the RDBMS. -``_ - -^ - -Actors as services with Persistence module as primary storage/Service of Record (SoR) -^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ - -As in the above, but with the addition of using the Persistence module as the primary storage/SoR. In this use case you wouldn’t use a RDBMS at all but rely on one of the Akka backends (Cassandra, Terracotta, Redis, MongoDB etc.) as transactional persistent storage. This is great if have either high performance, scalability or high-availability requirements where a RDBMS would be either single point of failure or single point of bottleneck or just be too slow. -If the storage API (Maps, Vectors or Refs) is too constrained for some use cases we can bypass it and use the storage directly. However, please note that then we will lose the transactional semantics. -``_ - -^ - -Actors as REST/Comet (push) services -^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ - -You can also expose your library Actors directly as REST (`JAX `_`-RS `_) or Comet (`Atmosphere `_) services by deploying the ‘AkkaServlet’ in your servlet container. In order for this to work in each define a so-called “boot” class which bootstraps the Actor configuration, wiring and startup. This is done in the ‘akka.conf’ file. -``_ - -- - -Using Akka as a stand alone microkernel ---------------------------------------- - -Akka can also be run as a stand-alone microkernel. It implements a full enterprise stack: - -^ - -Web/REST/Comet layer -^^^^^^^^^^^^^^^^^^^^ - -Akka currently embeds the `Grizzly/GlassFish `_ servlet container (but will soon be pluggable with Jetty as well) which allows to build REST-based using `JAX `_`-RS `_ and Comet-based services using `Atmosphere `_ as well as regular Web applications using JAX-RS’s `implicit views `_ (see also `James Strachan’s article `_). - -^ - -Service layer -^^^^^^^^^^^^^ - -The service layer is implemented using fault tolerant, asynchronous, throttled message passing; like `SEDA-in-a-box `_ using Actors. - -Persistence layer -^^^^^^^^^^^^^^^^^ - - Implemented using pluggable storage engines for both partitioned distributed massively scalable storage (like Cassandra) as well as single node storage (like MongoDB). A different storage and gives also provides different consistency/availability trade-offs implementing either Eventually Consistency (BASE) or Atomicity (ACID). - -Monitoring and Management layer -^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ - - Providing both JMX management and monitoring as well as w3c logging. - ``_ - -Use BivySack for packaging your application -------------------------------------------- - -"BivySack" For Akka - SBT plugin which creates a full akka microkernel deployment for your project. - -Quick and dirty SBT Plugin for creating Akka Microkernel deployments of your SBT Project. This creates a proper "akka deploy" setup with all of your dependencies and configuration files loaded, with a bootable version of your project that you can run cleanly. - -Read more about it here ``_. diff --git a/akka-docs/pending/futures-scala.rst b/akka-docs/pending/futures-scala.rst deleted file mode 100644 index f03990a1cf..0000000000 --- a/akka-docs/pending/futures-scala.rst +++ /dev/null @@ -1,197 +0,0 @@ -Futures (Scala) -=============== - -Introduction ------------- - -In Akka, a `Future `_ is a data structure used to retrieve the result of some concurrent operation. This operation is usually performed by an ``Actor`` or by the ``Dispatcher`` directly. This result can be accessed synchronously (blocking) or asynchronously (non-blocking). - -Use with Actors ---------------- - -There are generally two ways of getting a reply from an ``Actor``: the first is by a sent message (``actor ! msg``), which only works if the original sender was an ``Actor``) and the second is through a ``Future``. - -Using an ``Actor``\'s ``!!!`` method to send a message will return a Future. To wait for and retrieve the actual result the simplest method is: - -.. code-block:: scala - - val future = actor !!! msg - val result: Any = future.apply - // or more simply - val result: Any = future() - -This will cause the current thread to block and wait for the ``Actor`` to 'complete' the ``Future`` with it's reply. Due to the dynamic nature of Akka's ``Actor``\s this result will be untyped and will default to ``Nothing``. The safest way to deal with this is to cast the result to an ``Any`` as is shown in the above example. You can also use the expected result type instead of ``Any``, but if an unexpected type were to be returned you will get a ``ClassCastException``. For more elegant ways to deal with this and to use the result without blocking refer to `Functional Futures`_. - -Use Directly ------------- - -A common use case within Akka is to have some computation performed concurrently without needing the extra utility of an ``Actor``. If you find yourself creating a pool of ``Actor``\s for the sole reason of performing a calculation in parallel, there is an easier (and faster) way: - -.. code-block:: scala - - import akka.dispatch.Future - - val future = Future { - "Hello" + "World" - } - val result = future() - -In the above code the block passed to ``Future`` will be executed by the default ``Dispatcher``, with the return value of the block used to complete the ``Future`` (in this case, the result would be the string: "HelloWorld"). Unlike a ``Future`` that is returned from an ``Actor``, this ``Future`` is properly typed, and we also avoid the overhead of managing an ``Actor``. - -Functional Futures ------------------- - -A recent addition to Akka's ``Future`` is several monadic methods that are very similar to the ones used by Scala's collections. These allow you to create 'pipelines' or 'streams' that the result will travel through. - -Future is a Monad -^^^^^^^^^^^^^^^^^ - -The first method for working with ``Future`` functionally is ``map``. This method takes a ``Function`` which performs some operation on the result of the ``Future``, and returning a new result. The return value of the ``map`` method is another ``Future`` that will contain the new result: - -.. code-block:: scala - - val f1 = Future { - "Hello" + "World" - } - - val f2 = f1 map { x => - x.length - } - - val result = f2() - -In this example we are joining two strings together within a Future. Instead of waiting for this to complete, we apply our Function that calculates the length of the string using the 'map' method. Now we have a second Future that will contain an Int. When our original Future completes, it will also apply our Function and complete the second Future with that result. When we finally await the result, it will contain the number 10. Our original Future still contains the string "HelloWorld" and is unaffected by the 'map'. - -Something to note when using these methods: if the Future is still being processed when one of these methods are called, it will be the completing thread that actually does the work. If the Future is already complete though, it will be run in our current thread. For example: - -.. code-block:: scala - - val f1 = Future { - Thread.sleep(1000) - "Hello" + "World" - } - - val f2 = f1 map { x => - x.length - } - - val result = fs() - -The original Future will take at least 1 second to execute due to sleep, which means it is still being processed at the time we call 'map'. The Function we provide gets stored within the Future and later executed by the dispatcher when the result is ready. - -If we do the opposite: - -.. code-block:: scala - - val f1 = Future { - "Hello" + "World" - } - - Thread.sleep(1000) - - val f2 = f1 map { x => - x.length - } - - val result = fs() - -Our little string has been processed long before our 1 second sleep has finished. Because of this, the dispatcher has moved onto other messages that need processing and can no longer calculate the length of the string for us, instead it gets calculated in the current thread just as if we weren't using a Future. - -Normally this works quite well for us as it means there is very little overhead to running a quick Function. If there is a possibility of the Function taking a non-trivial amount of time to process it might be better to have this done concurrently, and for that we use 'flatMap': - -.. code-block:: scala - - val f1 = Future { - "Hello" + "World" - } - - val f2 = f1 flatMap {x => - Future(x.length) - } - - val result = fs() - -Now our second Future is executed concurrently as well. This technique can also be used to combine the results of several Futures into a single calculation, which will be better explained in the following sections. - -For Comprehensions -^^^^^^^^^^^^^^^^^^ - -Since Future has a 'map' and 'flatMap' method it can be easily used in a for comprehension: - -.. code-block:: scala - - val f = for { - a <- Future(10 / 2) // 10 / 2 = 5 - b <- Future(a + 1) // 5 + 1 = 6 - c <- Future(a - 1) // 5 - 1 = 4 - } yield b * c // 6 * 4 = 24 - - val result = f() - -Something to keep in mind when doing this is even though it looks like parts of the above example can run in parallel, each step of the for comprehension is run sequentially. This will happen on separate threads for each step but there isn't much benefit over running the calculations all within a single Future. The real benefit comes when the Futures are created first, and then combining them together. - -Composing Futures -^^^^^^^^^^^^^^^^^ - -The example for comprehension above is an example of composing Futures. A common use case for this is combining the replies of several Actors into a single calculation without resorting to calling 'await' to block for each result. For example: - -.. code-block:: scala - - val f1 = actor1 !!! msg1 - val f2 = actor2 !!! msg2 - - val f3 = for { - a: Int <- f1 - b: Int <- f2 - c: String <- actor3 !!! (a + b) - } yield c - - val result = f3() - -Here we have 2 actors processing a single message each. In the for comprehension we need to add the expected types in order to work with the results. Once the 2 results are available, they are being added together and sent to a third actor, which replies with a String, which we assign to 'result'. - -This is fine when dealing with a known amount of Actors, but can grow unwieldy if we have more then a handful. The 'sequence' and 'traverse' helper methods can make it easier to handle more complex use cases. Both of these methods are ways of turning a Traversable[Future[A]] into a Future[Traversable[A]]. For example: - -.. code-block:: scala - - // oddActor returns odd numbers sequentially from 1 - val listOfFutures: List[Future[Int]] = List.fill(100)(oddActor !!! GetNext) - - // now we have a Future[List[Int]] - val futureList = Future.sequence(listOfFutures) - - // Find the sum of the odd numbers - val oddSum = futureList.map(_.sum).apply - -To better explain what happened in the example, Future.sequence is taking the List[Future[Int]] and turning it into a Future[List[Int]]. We can then use 'map' to work with the List[Int] directly, and we find the sum of the List. - -The 'traverse' method is similar to 'sequence', but it takes a Traversable[A] and a Function T => Future[B] to return a Future[Traversable[B]]. For example, to use 'traverse' to sum the first 100 odd numbers: - -.. code-block:: scala - - val oddSum = Future.traverse((1 to 100).toList)(x => Future(x * 2 - 1)).map(_.sum).apply - -This is the same result as this example: - -.. code-block:: scala - - val oddSum = Future.sequence((1 to 100).toList.map(x => Future(x * 2 - 1))).map(_.sum).apply - -But it may be faster to use 'traverse' as it doesn't have to create an intermediate List[Future[Int]]. - -This is just a sample of what can be done, but to use more advanced techniques it is easier to take advantage of Scalaz, which Akka has support for in it's akka-scalaz module. - -Scalaz -^^^^^^ - -Akka also has a `Scalaz module `_ for a more complete support of programming in a functional style. - -Exceptions (TODO) ------------------ - -Handling exceptions. - -Fine Tuning (TODO) ------------------- - -Dispatchers and timeouts diff --git a/akka-docs/pending/getting-started.rst b/akka-docs/pending/getting-started.rst deleted file mode 100644 index d76db9b299..0000000000 --- a/akka-docs/pending/getting-started.rst +++ /dev/null @@ -1,126 +0,0 @@ -Getting Started -=============== - -There are several ways to download Akka. You can download the full distribution with microkernel, which includes all modules. You can download just the core distribution. Or you can use a build tool like Maven or SBT to download dependencies from the Akka Maven repository. - -A list of each of the Akka module JARs dependencies can be found `here `_. - -Using a release distribution ----------------------------- - -Akka is split up into two different parts: - -* Akka - The core modules. Reflects all the sections under 'Scala API' and 'Java API' in the navigation bar. -* Akka Modules - The microkernel and add-on modules. Reflects all the sections under 'Add-on modules' in the navigation bar. - -Download the release you need (Akka core or Akka Modules) from ``_ and unzip it. - -Microkernel -^^^^^^^^^^^ - -The Akka Modules distribution includes the microkernel. To run the microkernel: - -* Set the AKKA_HOME environment variable to the root of the Akka distribution. -* Run ``java -jar akka-modules-1.0.jar``. This will boot up the microkernel and deploy all samples applications from './deploy' dir. - -For example (bash shell): - -:: - - cd akka-modules-1.0 - export AKKA_HOME=`pwd` - java -jar akka-modules-1.0.jar - -Now you can continue with reading the `tutorial `_ and try to build the tutorial sample project step by step. This can be a good starting point before diving into the reference documentation which can be navigated in the left sidebar. - -Using a build tool ------------------- - -Akka can be used with build tools that support Maven repositories. The Akka Maven repository can be found at ``_. - -Using Akka with Maven -^^^^^^^^^^^^^^^^^^^^^ - -If you want to use Akka with Maven then you need to add this repository to your ``pom.xml``: - -.. code-block:: xml - - - Akka - Akka Maven2 Repository - http://akka.io/repository/ - - -Then you can add the Akka dependencies. For example, here is the dependency for Akka Actor 1.0: - -.. code-block:: xml - - - se.scalablesolutions.akka - akka-actor - 1.0 - - -Using Akka with SBT -^^^^^^^^^^^^^^^^^^^ - -Akka has an SBT plugin which makes it very easy to get started with Akka and SBT. - -The Scala version in your SBT project needs to match the version that Akka is built against. For 1.0 this is 2.8.1. - -To use the plugin, first add a plugin definition to your SBT project by creating project/plugins/Plugins.scala with: - -.. code-block:: scala - - import sbt._ - - class Plugins(info: ProjectInfo) extends PluginDefinition(info) { - val akkaRepo = "Akka Repo" at "http://akka.io/repository" - val akkaPlugin = "se.scalablesolutions.akka" % "akka-sbt-plugin" % "1.0" - } - -*Note: the plugin version matches the Akka version provided. The current release is 1.0.* - -Then mix the AkkaProject trait into your project definition. For example: - -.. code-block:: scala - - class MyProject(info: ProjectInfo) extends DefaultProject(info) with AkkaProject - -*Note: This adds akka-actor as a dependency by default.* - -If you also want to include other Akka modules there is a convenience method: ``akkaModule``. For example, you can add extra Akka modules by adding any of the following lines to your project class: - -.. code-block:: scala - - val akkaStm = akkaModule("stm") - val akkaTypedActor = akkaModule("typed-actor") - val akkaRemote = akkaModule("remote") - val akkaHttp = akkaModule("http") - val akkaAmqp = akkaModule("amqp") - val akkaCamel = akkaModule("camel") - val akkaCamelTyped = akkaModule("camel-typed") - val akkaSpring = akkaModule("spring") - val akkaJta = akkaModule("jta") - val akkaCassandra = akkaModule("persistence-cassandra") - val akkaMongo = akkaModule("persistence-mongo") - val akkaRedis = akkaModule("persistence-redis") - -Build from sources ------------------- - -Akka uses Git and is hosted at `Github `_. - -* Akka: clone the Akka repository from ``_ -* Akka Modules: clone the Akka Modules repository from ``_ - -Continue reading the page on `how to build and run Akka `_ - -Need help? ----------- - -If you have questions you can get help on the `Akka Mailing List `_. - -You can also ask for `commercial support `_. - -Thanks for being a part of the Akka community. diff --git a/akka-docs/pending/issue-tracking.rst b/akka-docs/pending/issue-tracking.rst deleted file mode 100644 index fcff2e2c94..0000000000 --- a/akka-docs/pending/issue-tracking.rst +++ /dev/null @@ -1,41 +0,0 @@ -Issue Tracking -============== - -Akka is using Assembla as issue tracking system. - -Browsing --------- - -You can find the Akka tickets here: ``_ -The roadmap for each milestone is here: ``_ - -Creating tickets ----------------- - -In order to create tickets you need to do the following: - -# Register here: ``_ -# Log in -# Create the ticket: ``_ - -Thanks a lot for reporting bugs and suggesting features. - -Failing test ------------- - -Please submit a failing test on the following format: - -``_ - -import org.scalatest.WordSpec -import org.scalatest.matchers.MustMatchers - -class Ticket001Spec extends WordSpec with MustMatchers { - - "An XXX" should { - "do YYY" in { - 1 must be (1) - } - } -} -``_ diff --git a/akka-docs/pending/logging.rst b/akka-docs/pending/logging.rst deleted file mode 100644 index 833f2f419b..0000000000 --- a/akka-docs/pending/logging.rst +++ /dev/null @@ -1,4 +0,0 @@ -Logging -======= - -Logging has been removed. See the `Event Handler `_. diff --git a/akka-docs/pending/migration-guide-0.10.x-1.0.x.rst b/akka-docs/pending/migration-guide-0.10.x-1.0.x.rst deleted file mode 100644 index 300100941f..0000000000 --- a/akka-docs/pending/migration-guide-0.10.x-1.0.x.rst +++ /dev/null @@ -1,432 +0,0 @@ -Migration guide from 0.10.x to 1.0.x -==================================== - ----- - -Akka & Akka Modules separated into two different repositories and distributions -------------------------------------------------------------------------------- - -Akka is split up into two different parts: -* Akka - Reflects all the sections under 'Scala API' and 'Java API' in the navigation bar. -* Akka Modules - Reflects all the sections under 'Add-on modules' in the navigation bar. - -Download the release you need (Akka core or Akka Modules) from ``_ and unzip it. - ----- - -Changed Akka URI ----------------- - -http:*akkasource.org changed to http:*akka.io - -Reflects XSDs, Maven repositories, ScalaDoc etc. - ----- - -Removed 'se.scalablesolutions' prefix -------------------------------------- - -We have removed some boilerplate by shortening the Akka package from -**se.scalablesolutions.akka** to just **akka** so just do a search-replace in your project, -we apologize for the inconvenience, but we did it for our users. - ----- - -Akka-core is no more --------------------- - -Akka-core has been split into akka-actor, akka-stm, akka-typed-actor & akka-remote this means that you need to update any deps you have on akka-core. - ----- - -Config ------- - -Turning on/off modules -^^^^^^^^^^^^^^^^^^^^^^ - -All the 'service = on' elements for turning modules on and off have been replaced by a top-level list of the enabled services. - -Services available for turning on/off are: -* "remote" -* "http" -* "camel" - -**All** services are **OFF** by default. Enable the ones you are using. - -.. code-block:: ruby - - akka { - enabled-modules = [] # Comma separated list of the enabled modules. Options: ["remote", "camel", "http"] - } - -Renames -^^^^^^^ - -* 'rest' section - has been renamed to 'http' to align with the module name 'akka-http'. -* 'storage' section - has been renamed to 'persistence' to align with the module name 'akka-persistence'. - -.. code-block:: ruby - - akka { - http { - .. - } - - persistence { - .. - } - } - ----- - -Important changes from RC2-RC3 ------------------------------- - -**akka.config.SupervisionSupervise** -def apply(actorRef: ActorRef, lifeCycle: LifeCycle, registerAsRemoteService: Boolean = false) -- boolean instead of remoteAddress, registers that actor with it's id as service name on the local server - -**akka.actor.Actors now is the API for Java to interact with Actors, Remoting and ActorRegistry:** - -import static akka.actor.Actors.*; -*actorOf()..* -remote().actorOf()... -*registry().actorsFor("foo")...* - -***akka.actor.Actor now is the API for Scala to interact with Actors, Remoting and ActorRegistry:*** - -*import akka.actor.Actor._* -actorOf()... -*remote.actorOf()...* -registry.actorsFor("foo") - -**object UntypedActor has been deleted and replaced with akka.actor.Actors/akka.actor.Actor (Java/Scala)** -UntypedActor.actorOf -> Actors.actorOf (Java) or Actor.actorOf (Scala) - -**object ActorRegistry has been deleted and replaced with akka.actor.Actors.registry()/akka.actor.Actor.registry (Java/Scala)** -ActorRegistry. -> Actors.registry(). (Java) or Actor.registry. (Scala) - -**object RemoteClient has been deleted and replaced with akka.actor.Actors.remote()/akka.actor.Actor.remote (Java/Scala)** -RemoteClient -> Actors.remote() (Java) or Actor.remote (Scala) - -**object RemoteServer has been deleted and replaced with akka.actor.Actors.remote()/akka.actor.Actor.remote (Java/Scala)** -RemoteServer - deleted -> Actors.remote() (Java) or Actor.remote (Scala) - -**classes RemoteActor, RemoteUntypedActor and RemoteUntypedConsumerActors has been deleted and replaced** -**with akka.actor.Actors.remote().actorOf(x, host port)/akka.actor.Actor.remote.actorOf(x, host, port)** -RemoteActor, RemoteUntypedActor - deleted, use: remote().actorOf(YourActor.class, host, port) (Java) or remote.actorOf[YourActor](host, port) - -**Remoted spring-actors now default to spring id as service-name, use "service-name" attribute on "remote"-tag to override** - -**Listeners for RemoteServer and RemoteClient** are now registered on Actors.remote().addListener (Java) or Actor.remote.addListener (Scala), -this means that all listeners get all remote events, both remote server evens and remote client events, **so adjust your code accordingly.** - -**ActorRef.startLinkRemote has been removed since one specified on creation wether the actor is client-managed or not.** - -Important change from RC3 to RC4 --------------------------------- - -The Akka-Spring namespace has changed from akkasource.org and scalablesolutions.se to http:*akka.io/schema and http:*akka.io/akka-.xsd - ----- - -Module akka-actor ------------------ - -The Actor.init callback has been renamed to "preStart" to align with the general callback naming and is more clear about when it's called. - -The Actor.shutdown callback has been renamed to "postStop" to align with the general callback naming and is more clear about when it's called. - -The Actor.initTransactionalState callback has been removed, logic should be moved to preStart and be wrapped in an atomic block - -**se.scalablesolutions.akka.config.ScalaConfig** and **se.scalablesolutions.akka.config.JavaConfig** have been merged into **akka.config.Supervision** - -**RemoteAddress** has moved from **se.scalablesolutions.akka.config.ScalaConfig** to **akka.config** - -The ActorRef.lifeCycle has changed signature from Option[LifeCycle] to LifeCycle, this means you need to change code that looks like this: -**self.lifeCycle = Some(LifeCycle(Permanent))** to **self.lifeCycle = Permanent** - -The equivalent to **self.lifeCycle = None** is **self.lifeCycle = UndefinedLifeCycle** -**LifeCycle(Permanent)** becomes **Permanent** -**new LifeCycle(permanent())** becomes **permanent()** (need to do: import static se.scalablesolutions.akka.config.Supervision.*; first) - -**JavaConfig.Component** and **ScalaConfig.Component** have been consolidated and renamed as **Supervision.SuperviseTypedActor** - -**self.trapExit** has been moved into the FaultHandlingStrategy, and **ActorRef.faultHandler** has switched type from Option[FaultHandlingStrategy] -to FaultHandlingStrategy: - -|| **Scala** || -|| -``_ -import akka.config.Supervision._ - -self.faultHandler = OneForOneStrategy(List(classOf[Exception]), 3, 5000) - -``_ || -|| **Java** || -|| -``_ -import static akka.Supervision.*; - -getContext().setFaultHandler(new OneForOneStrategy(new Class[] { Exception.class },50,1000)) - -``_ || - -**RestartStrategy, AllForOne, OneForOne** have been replaced with **AllForOneStrategy** and **OneForOneStrategy** in **se.scalablesolutions.akka.config.Supervision** - -|| **Scala** || -|| -``_ -import akka.config.Supervision._ -SupervisorConfig( - OneForOneStrategy(List(classOf[Exception]), 3, 5000), - Supervise(pingpong1,Permanent) :: Nil -) - -``_ || -|| **Java** || -|| -``_ -import static akka.Supervision.*; - -new SupervisorConfig( - new OneForOneStrategy(new Class[] { Exception.class },50,1000), - new Server[] { new Supervise(pingpong1, permanent()) } -) - -``_ || - -We have removed the following factory methods: - -**Actor.actor { case foo => bar }** -**Actor.transactor { case foo => bar }** -**Actor.temporaryActor { case foo => bar }** -**Actor.init {} receive { case foo => bar }** - -They started the actor and no config was possible, it was inconsistent and irreparable. - -replace with your own factories, or: - -**actorOf( new Actor { def receive = { case foo => bar } } ).start** -**actorOf( new Actor { self.lifeCycle = Temporary; def receive = { case foo => bar } } ).start** - -ReceiveTimeout is now rescheduled after every message, before there was only an initial timeout. -To stop rescheduling of ReceiveTimeout, set **receiveTimeout = None** - -HotSwap -------- - -HotSwap does no longer use behavior stacking by default, but that is an option to both "become" and HotSwap. - -HotSwap now takes for Scala a Function from ActorRef to a Receive, the ActorRef passed in is the reference to self, so you can do self.reply() etc. - ----- - -Module akka-stm ---------------- - -The STM stuff is now in its own module. This means that there is no support for transactions or transactors in akka-actor. - -Local and global -^^^^^^^^^^^^^^^^ - -The **local/global** distinction has been dropped. This means that if the following general import was being used: - -.. code-block:: scala - - import akka.stm.local._ - -this is now just: - -.. code-block:: scala - - import akka.stm._ - -Coordinated is the new global -^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ - -There is a new explicit mechanism for coordinated transactions. See the `Scala Transactors `_ and `Java Transactors `_ documentation for more information. Coordinated transactions and transactors are found in the ``akka.transactor`` package now. The usage of transactors has changed. - -Agents -^^^^^^ - -Agent is now in the akka-stm module and has moved to the ``akka.agent`` package. The implementation has been reworked and is now closer to Clojure agents. There is not much difference in general usage, the main changes involve interaction with the STM. - -While updates to Agents are asynchronous, the state of an Agent is always immediately available for reading by any thread. Agents are integrated with the STM - any dispatches made in a transaction are held until that transaction commits, and are discarded if it is retried or aborted. There is a new ``sendOff`` method for long-running or blocking update functions. - ----- - -Module akka-camel ------------------ - -Access to the CamelService managed by CamelServiceManager has changed: - -* Method service renamed to mandatoryService (Scala) -* Method service now returns Option[CamelService] (Scala) -* Introduced method getMandatoryService() (Java) -* Introduced method getService() (Java) - -|| **Scala** || -|| -``_ -import se.scalablesolutions.akka.camel.CamelServiceManager._ -import se.scalablesolutions.akka.camel.CamelService - -val o: Option[CamelService] = service -val s: CamelService = mandatoryService - -``_ || -|| **Java** || -|| -``_ -import se.scalablesolutions.akka.camel.CamelService; -import se.scalablesolutions.akka.japi.Option; -import static se.scalablesolutions.akka.camel.CamelServiceManager.*; - -Option o = getService(); -CamelService s = getMandatoryService(); - -``_ || - -Access to the CamelContext and ProducerTemplate managed by CamelContextManager has changed: - -* Method context renamed to mandatoryContext (Scala) -* Method template renamed to mandatoryTemplate (Scala) -* Method service now returns Option[CamelContext] (Scala) -* Method template now returns Option[ProducerTemplate] (Scala) -* Introduced method getMandatoryContext() (Java) -* Introduced method getContext() (Java) -* Introduced method getMandatoryTemplate() (Java) -* Introduced method getTemplate() (Java) - -|| **Scala** || -|| -``_ -import org.apache.camel.CamelContext -import org.apache.camel.ProducerTemplate - -import se.scalablesolutions.akka.camel.CamelContextManager._ - -val co: Option[CamelContext] = context -val to: Option[ProducerTemplate] = template - -val c: CamelContext = mandatoryContext -val t: ProducerTemplate = mandatoryTemplate - -``_ || -|| **Java** || -|| -``_ -import org.apache.camel.CamelContext; -import org.apache.camel.ProducerTemplate; - -import se.scalablesolutions.akka.japi.Option; -import static se.scalablesolutions.akka.camel.CamelContextManager.*; - -Option co = getContext(); -Option to = getTemplate(); - -CamelContext c = getMandatoryContext(); -ProducerTemplate t = getMandatoryTemplate(); - -``_ || - -The following methods have been renamed on class se.scalablesolutions.akka.camel.Message: - -* bodyAs(Class) has been renamed to getBodyAs(Class) -* headerAs(String, Class) has been renamed to getHeaderAs(String, Class) - -The API for waiting for consumer endpoint activation and de-activation has been changed - -* CamelService.expectEndpointActivationCount has been removed and replaced by CamelService.awaitEndpointActivation -* CamelService.expectEndpointDeactivationCount has been removed and replaced by CamelService.awaitEndpointDeactivation - -|| **Scala** || -|| -``_ -import se.scalablesolutions.akka.actor.Actor -import se.scalablesolutions.akka.camel.CamelServiceManager._ - -val s = startCamelService -val actor = Actor.actorOf[SampleConsumer] - -// wait for 1 consumer being activated -s.awaitEndpointActivation(1) { - actor.start -} - -// wait for 1 consumer being de-activated -s.awaitEndpointDeactivation(1) { - actor.stop -} - -s.stop - -``_ || -|| **Java** || -|| -``_ -import java.util.concurrent.TimeUnit; -import se.scalablesolutions.akka.actor.ActorRef; -import se.scalablesolutions.akka.actor.Actors; -import se.scalablesolutions.akka.camel.CamelService; -import se.scalablesolutions.akka.japi.SideEffect; -import static se.scalablesolutions.akka.camel.CamelServiceManager.*; - -CamelService s = startCamelService(); -final ActorRef actor = Actors.actorOf(SampleUntypedConsumer.class); - -// wait for 1 consumer being activated -s.awaitEndpointActivation(1, new SideEffect() { - public void apply() { - actor.start(); - } -}); - -// wait for 1 consumer being de-activated -s.awaitEndpointDeactivation(1, new SideEffect() { - public void apply() { - actor.stop(); - } -}); - -s.stop(); - -``_ || - -- - -Module Akka-Http ----------------- - -Atmosphere support has been removed. If you were using akka.comet.AkkaServlet for Jersey support only, -you can switch that to: akka.http.AkkaRestServlet and it should work just like before. - -Atmosphere has been removed because we have a new async http support in the form of Akka Mist, a very thin bridge -between Servlet3.0/JettyContinuations and Actors, enabling Http-as-messages, read more about it here: -http://doc.akka.io/http#Mist%20-%20Lightweight%20Asynchronous%20HTTP - -If you really need Atmosphere support, you can add it yourself by following the steps listed at the start of: -http://doc.akka.io/comet - -Module akka-spring ------------------- - -The Akka XML schema URI has changed to http://akka.io/schema/akka - -``_ - - - - - - -``_ diff --git a/akka-docs/pending/release-notes.rst b/akka-docs/pending/release-notes.rst deleted file mode 100644 index 692e7ce244..0000000000 --- a/akka-docs/pending/release-notes.rst +++ /dev/null @@ -1,656 +0,0 @@ -Release Notes -============== - -Changes listed in no particular order. - -Current Development 1.1-SNAPSHOT -================================ - -||~ =Type= ||~ =Changes= ||~ =By= || -|| **UPD** || improve FSM DSL: make onTransition syntax nicer || Roland Kuhn || - -Release 1.1-M1 -============== - -||~ =**Type** - ||~ -===== - -**Changes** - ||~ -===== - -**By**= || -|| **ADD** || #647 Extract an akka-camel-typed module out of akka-camel for optional typed actor support || Martin Krasser || -|| **ADD** || #654 Allow consumer actors to acknowledge in-only message exchanges || Martin Krasser || -|| **ADD** || #669 Support self.reply in preRestart and postStop after exception in receive || Martin Krasser || -|| **ADD** || #682 Support for fault-tolerant Producer actors || Martin Krasser || -|| **ADD** || Move TestKit to akka-testkit and add CallingThreadDispatcher || Roland Kuhn || -|| **ADD** || Remote Client message buffering transaction log for buffering messages failed to send due to network problems. Flushes the buffer on reconnect. || Jonas Bonér || -|| **ADD** || Added trait simulate network problems/errors to be used for remote actor testing || Jonas Bonér || -|| **ADD** || Add future and await methods to Agent || Peter Vlugter || -|| **ADD** || #586 Allow explicit reconnect for RemoteClient || Viktor Klang || -|| **ADD** || #587 Dead letter sink queue for messages sent through RemoteClient that didn't get sent due to connection failure || Viktor Klang || -|| **ADD** || #598 actor.id when using akka-spring should be the id of the spring bean || Viktor Klang || -|| **ADD** || #652 Reap expired futures from ActiveRemoteClientHandler || Viktor Klang || -|| **ADD** || #656 Squeeze more out of EBEDD? || Viktor Klang || -|| **ADD** || #715 EventHandler.error should be usable without Throwable || Viktor Klang || -|| **ADD** || #717 Add ExecutionHandler to NettyRemoteServer for more performance and scalability || Viktor Klang || -|| **ADD** || #497 Optimize remote sends done in local scope || Viktor Klang || -|| **ADD** || #633 Add support for Scalaz in akka-modules || Derek Williams || -|| **ADD** || #677 Add map, flatMap, foreach, and filter to Future || Derek Williams || -|| **ADD** || #661 Optimized Future's internals || Derek Williams || -|| **ADD** || #685 Optimize execution of Futures || Derek Williams || -|| **ADD** || #711 Make Future.completeWith work with an uncompleted Future || Derek Williams || -|| **UPD** || #667 Upgrade to Camel 2.7.0 || Martin Krasser || -|| **UPD** || Updated HawtDispatch to 1.1 || Hiram Chirino || -|| **UPD** || #688 Update Akka 1.1-SNAPSHOT to Scala 2.9.0-RC1 || Viktor Klang || -|| **UPD** || #718 Add HawtDispatcher to akka-modules || Viktor Klang || -|| **UPD** || #698 Deprecate client-managed actors || Viktor Klang || -|| **UPD** || #730 Update Akka and Akka Modules to SBT 0.7.6-RC0 || Viktor Klang || -|| **UPD** || #663 Update to latest scalatest || Derek Williams || -|| **FIX** || Misc cleanup, API changes and refactorings || Jonas Bonér || -|| **FIX** || #675 preStart() is called twice when creating new instance of TypedActor || Debasish Ghosh || -|| **FIX** || #704 Write docs for Java Serialization || Debasish Ghosh || -|| **FIX** || #645 Change Futures.awaitAll to not throw FutureTimeoutException but return a List[Option[Any]] || Viktor Klang || -|| **FIX** || #681 Clean exit using server-managed remote actor via client || Viktor Klang || -|| **FIX** || #720 Connection loss when sending to a dead remote actor || Viktor Klang || -|| **FIX** || #593 Move Jetty specific stuff (with deps) from akka-http to akka-kernel || Viktor Klang || -|| **FIX** || #638 ActiveRemoteClientHandler - Unexpected exception from downstream in remote client || Viktor Klang || -|| **FIX** || #655 Remote actors with non-uuid names doesnt work for req./reply-pattern || Viktor Klang || -|| **FIX** || #588 RemoteClient.shutdown does not remove client from Map with clients || Viktor Klang || -|| **FIX** || #672 Remoting breaks if mutual DNS lookup isn't possible || Viktor Klang || -|| **FIX** || #699 Remote typed actor per-session server won't start if called method has no result || Viktor Klang || -|| **FIX** || #702 Handle ReadTimeoutException in akka-remote || Viktor Klang || -|| **FIX** || #708 Fall back to Akka classloader if event-handler class cannot be found. || Viktor Klang || -|| **FIX** || #716 Split akka-http and clean-up dependencies || Viktor Klang || -|| **FIX** || #721 Inability to parse/load the Config should do a System.exit(-1) || Viktor Klang || -|| **FIX** || #722 Race condition in Actor hotswapping || Viktor Klang || -|| **FIX** || #723 MessageSerializer CNFE regression || Viktor Klang || -|| **FIX** || #680 Remote TypedActor behavior differs from local one when sending to generic interfaces || Viktor Klang || -|| **FIX** || #659 Calling await on a Future that is expired and uncompleted should throw an exception || Derek Williams || -|| **REM** || #626 Update and clean up dependencies || Viktor Klang || -|| **REM** || #623 Remove embedded-repo (Akka + Akka Modules) || Viktor Klang || -|| **REM** || #686 Remove SBinary || Viktor Klang || - -Release 1.0-RC6 -=============== - -||~ =**Type** - ||~ -===== - -**Changes** - ||~ -===== - -**By**= || -|| **FIX** || #628 Supervied TypedActors fails to restart || Viktor Klang || -|| **FIX** || #629 Stuck upon actor invocation || Viktor Klang || - -Release 1.0-RC5 -=============== - -||~ =**Type** - ||~ -===== - -**Changes** - ||~ -===== - -**By**= || -|| **FIX** || Source JARs published to 'src' instead of 'source' || Odd Moller || -|| **FIX** || #612 Conflict between Spring autostart=true for Consumer actors and || Martin Krasser || -|| **FIX** || #613 Change Akka XML schema URI to http://akka.io/schema/akka || Martin Krasser || -|| **FIX** || Spring XSD namespace changed from 'akkasource.org' to 'akka.io' || Viktor Klang || -|| **FIX** || Checking for remote secure cookie is disabled by default if no akka.conf is loaded || Viktor Klang || -|| **FIX** || Changed Casbah to ScalaToolsRepo for akka-sbt-plugin || Viktor Klang || -|| **FIX** || ActorRef.forward now doesn't require the sender to be set on the message || Viktor Klang || - -Release 1.0-RC3 -=============== - -||~ =**Type** - ||~ -===== - -**Changes** - ||~ -===== - -**By**= || -|| **ADD** || #568 Add autostart attribute to Spring actor configuration || Viktor Klang || -|| **ADD** || #586 Allow explicit reconnect for remote clients || Viktor Klang || -|| **ADD** || #587 Add possibility for dead letter queues for failed remote sends || Viktor Klang || -|| **ADD** || #497 Optimize remote send in local scope || Viktor Klang || -|| **ADD** || Improved Java Actor API: akka.actor.Actors || Viktor Klang || -|| **ADD** || Improved Scala Actor API: akka.actor.Actor || Viktor Klang || -|| **ADD** || #148 Create a testing framework for testing Actors || Roland Kuhn || -|| **ADD** || Support Replica Set/Replica Pair connection modes with MongoDB Persistence || Brendan McAdams || -|| **ADD** || User configurable Write Concern settings for MongoDB Persistence || Brendan McAdams || -|| **ADD** || Support for configuring MongoDB Persistence with MongoDB's URI Connection String || Brendan McAdams || -|| **ADD** || Support for Authentication with MongoDB Persistence || Brendan McAdams || -|| **FIX** || Misc bug fixes || Team || -|| **FIX** || #603 Race condition in Remote send || Viktor Klang || -|| **FIX** || #594 Log statement in RemoteClientHandler was wrongly formatted || Viktor Klang || -|| **FIX** || #580 Message uuids must be generated || Viktor Klang || -|| **FIX** || #583 Serialization classloader has a visibility issue || Viktor Klang || -|| **FIX** || #598 By default the bean ID should become the actor id for Spring actor configuration || Viktor Klang || -|| **FIX** || #577 RemoteClientHandler swallows certain exceptions || Viktor Klang || -|| **FIX** || #581 Fix edgecase where an exception could not be deserialized || Viktor Klang || -|| **FIX** || MongoDB write success wasn't being properly checked; fixed (integrated w/ new write concern features) || Brendan McAdams || -|| **UPD** || Improvements to FSM module akka.actor.FSM || Manie & Kuhn || -|| **UPD** || Changed Akka URI to http://akka.io. Reflects both XSDs, Maven repositories etc. || Jonas Bonér || -|| **REM** || #574 Remote RemoteClient, RemoteServer and RemoteNode || Viktor Klang || -|| **REM** || object UntypedActor, object ActorRegistry, class RemoteActor, class RemoteUntypedActor, class RemoteUntypedConsumerActor || Viktor Klang || - -Release 1.0-RC1 -=============== - -||~ =**Type** - ||~ -===== - -**Changes** - ||~ -===== - -**By**= || -|| **ADD** || #477 Added support for Remote Agents || Viktor Klang || -|| **ADD** || #460 Hotswap for Java API (UntypedActor) || Viktor Klang || -|| **ADD** || #471 Added support for TypedActors to return Java Option || Viktor Klang || -|| **ADD** || New design and API for more fluent and intuitive FSM module || Roland Kuhn || -|| **ADD** || Added secure cookie based remote node authentication || Jonas Bonér || -|| **ADD** || Untrusted safe mode for remote server || Jonas Bonér || -|| **ADD** || Refactored config file format - added list of enabled modules etc. || Jonas Bonér || -|| **ADD** || Docs for Dataflow Concurrency || Jonas Bonér || -|| **ADD** || Made remote message frame size configurable || Jonas Bonér || -|| **ADD** || #496 Detect when Remote Client disconnects || Jonas Bonér || -|| **ADD** || #472 Improve API to wait for endpoint activation/deactivation (`more `_ ...) || Martin Krasser || -|| **ADD** || #473 Allow consumer actors to customize their own routes (`more `_ ...) || Martin Krasser || -|| **ADD** || #504 Add session bound server managed remote actors || Paul Pach || -|| **ADD** || DSL for FSM || Irmo Manie || -|| **ADD** || Shared unit test for all dispatchers to enforce Actor Model || Viktor Klang || -|| **ADD** || #522 Make stacking optional for become and HotSwap || Viktor Klang || -|| **ADD** || #524 Make frame size configurable for client&server || Bonér & Klang || -|| **ADD** || #526 Add onComplete callback to Future || Viktor Klang || -|| **ADD** || #536 Document Channel-abstraction for later replies || Viktor Klang || -|| **ADD** || #540 Include self-reference as parameter to HotSwap || Viktor Klang || -|| **ADD** || #546 Include Garrick Evans' Akka-mist into master || Viktor Klang || -|| **ADD** || #438 Support remove operation in PersistentVector || Scott Clasen || -|| **ADD** || #229 Memcached protocol support for Persistence module || Scott Clasen || -|| **ADD** || Amazon SimpleDb support for Persistence module || Scott Clasen || -|| **FIX** || #518 refactor common storage bakend to use bulk puts/gets where possible || Scott Clasen || -|| **FIX** || #532 Prevent persistent datatypes with same uuid from corrupting a TX || Scott Clasen || -|| **FIX** || #464 ThreadPoolBuilder should be rewritten to be an immutable builder || Viktor Klang || -|| **FIX** || #449 Futures.awaitOne now uses onComplete listeners || Viktor Klang || -|| **FIX** || #486 Fixed memory leak caused by Configgy that prevented full unload || Viktor Klang || -|| **FIX** || #488 Fixed race condition in EBEDD restart || Viktor Klang || -|| **FIX** || #492 Fixed race condition in Scheduler || Viktor Klang || -|| **FIX** || #493 Switched to non-https repository for JBoss artifacts || Viktor Klang || -|| **FIX** || #481 Exception when creating an actor now behaves properly when supervised || Viktor Klang || -|| **FIX** || #498 Fixed no-op in supervision DSL || Viktor Klang || -|| **FIX** || #491 reply and reply_? now sets a sender reference || Viktor Klang || -|| **FIX** || #519 NotSerializableError when using Remote Typed Actors || Viktor Klang || -|| **FIX** || #523 Message.toString is called all the time for incomign messages, expensive || Viktor Klang || -|| **FIX** || #537 Make sure top folder is included in sources jar || Viktor Klang || -|| **FIX** || #529 Remove Scala version number from Akka artifact ids || Viktor Klang || -|| **FIX** || #533 Can't set LifeCycle from the Java API || Viktor Klang || -|| **FIX** || #542 Make Future-returning Remote Typed Actor methods use onComplete || Viktor Klang || -|| **FIX** || #479 Do not register listeners when CamelService is turned off by configuration || Martin Krasser || -|| **FIX** || Fixed bug with finding TypedActor by type in ActorRegistry || Jonas Bonér || -|| **FIX** || #515 race condition in FSM StateTimeout Handling || Irmo Manie || -|| **UPD** || Akka package from "se.scalablesolutions.akka" to "akka" || Viktor Klang || -|| **UPD** || Update Netty to 3.2.3.Final || Viktor Klang || -|| **UPD** || #458 Camel to 2.5.0 || Martin Krasser || -|| **UPD** || #458 Spring to 3.0.4.RELEASE || Martin Krasser || -|| **UPD** || #458 Jetty to 7.1.6.v20100715 || Martin Krasser || -|| **UPD** || Update to Scala 2.8.1 || Jonas Bonér || -|| **UPD** || Changed remote server default port to 2552 (AKKA) || Jonas Bonér || -|| **UPD** || Cleaned up and made remote protocol more effifient || Jonas Bonér || -|| **UPD** || #528 RedisPersistentRef should not throw in case of missing key || Debasish Ghosh || -|| **UPD** || #531 Fix RedisStorage add() method in Java API || Debasish Ghosh || -|| **UPD** || #513 Implement snapshot based persistence control in SortedSet || Debasish Ghosh || -|| **UPD** || #547 Update FSM docs || Irmo Manie || -|| **UPD** || #548 Update AMQP docs || Irmo Manie || -|| **REM** || Atmosphere integration, replace with Mist || Klang @ Evans || -|| **REM** || JGroups integration, doesn't play with cloud services :/ || Viktor Klang || - -Release 1.0-MILESTONE1 -====================== - -||~ =**Type** - ||~ -===== - -**Changes** - ||~ -===== - -**By**= || -|| **ADD** || Splitted akka-core up in akka-actor, akka-typed-actor & akka-remote || Jonas Bonér || -|| **ADD** || Added meta-data to network protocol || Jonas Bonér || -|| **ADD** || HotSwap and actor.become now uses a stack of PartialFunctions with API for pushing and popping the stack || Jonas Bonér || -|| **ADD** || #440 Create typed actors with constructor args || Michael Kober || -|| **ADD** || #322 Abstraction for unification of sender and senderFuture for later reply || Michael Kober || -|| **ADD** || #364 Serialization for TypedActor proxy reference || Michael Kober || -|| **ADD** || #423 Support configuration of Akka via Spring || Michael Kober || -|| **FIX** || #426 UUID wrong for remote proxy for server managed actor || Michael Kober || -|| **ADD** || #378 Support for server initiated remote TypedActor and UntypedActor in Spring config || Michael Kober || -||< **ADD** ||< #194 Support for server-managed typed actor ||< Michael Kober || -|| **ADD** || #447 Allow Camel service to be turned off by configuration || Martin Krasser || -|| **ADD** || #457 JavaAPI improvements for akka-camel (please read the `migration guide `_) || Martin Krasser || -|| **ADD** || #465 Dynamic message routing to actors (`more `_ ...) || Martin Krasser || -|| **FIX** || #410 Use log configuration from config directory || Martin Krasser || -|| **FIX** || #343 Some problems with persistent structures || Debasish Ghosh || -|| **FIX** || #430 Refactor / re-implement MongoDB adapter so that it conforms to the guidelines followed in Redis and Cassandra modules || Debasish Ghosh || -|| **FIX** || #436 ScalaJSON serialization does not map Int data types properly when used within a Map || Debasish Ghosh || -|| **ADD** || #230 Update redisclient to be Redis 2.0 compliant || Debasish Ghosh || -|| **FIX** || #435 Mailbox serialization does not retain messages || Debasish Ghosh || -|| **ADD** || #445 Integrate type class based serialization of sjson into Akka || Debasish Ghosh || -|| **FIX** || #480: Regression multibulk replies redis client || Debasish Ghosh || -|| **FIX** || #415 Publish now generate source and doc jars || Viktor Klang || -|| **FIX** || #420 REST endpoints should be able to be processed in parallel || Viktor Klang || -|| **FIX** || #422 Dispatcher config should work for ThreadPoolBuilder-based dispatchers || Viktor Klang || -|| **FIX** || #401 ActorRegistry should not leak memory || Viktor Klang || -|| **FIX** || #250 Performance optimization for ExecutorBasedEventDrivenDispatcher || Viktor Klang || -|| **FIX** || #419 Rename init and shutdown callbacks to preStart and postStop, and remove initTransactionalState || Viktor Klang || -|| **FIX** || #346 Make max no of restarts (and within) are now both optional || Viktor Klang || -|| **FIX** || #424 Actors self.supervisor not set by the time init() is called when started by startLink() || Viktor Klang || -|| **FIX** || #427 spawnLink and startLink now has the same dispatcher semantics || Viktor Klang || -|| **FIX** || #413 Actor shouldn't process more messages when waiting to be restarted (HawtDispatcher still does) || Viktor Klang || -|| **FIX** || !! and !!! now do now not block the actor when used in remote actor || Viktor Klang || -|| **FIX** || RemoteClient now reconnects properly || Viktor Klang || -|| **FIX** || Logger.warn now properly works with varargs || Viktor Klang || -|| **FIX** || #450 Removed ActorRef lifeCycle boilerplate: Some(LifeCycle(Permanent)) => Permanent || Viktor Klang || -|| **FIX** || Moved ActorRef.trapExit into ActorRef.faultHandler and removed Option-boilerplate from faultHandler || Viktor Klang || -|| **FIX** || ThreadBasedDispatcher cheaper for idling actors, also benefits from all that is ExecutorBasedEventDrivenDispatcher || Viktor Klang || -|| **FIX** || Fixing Futures.future, uses Actor.spawn under the hood, specify dispatcher to control where block is executed || Viktor Klang || -|| **FIX** || #469 Akka "dist" now uses a root folder to avoid loitering if unzipped in a folder || Viktor Klang || -|| **FIX** || Removed ScalaConfig, JavaConfig and rewrote Supervision configuration || Viktor Klang || -|| **UPD** || Jersey to 1.3 || Viktor Klang || -|| **UPD** || Atmosphere to 0.6.2 || Viktor Klang || -|| **UPD** || Netty to 3.2.2.Final || Viktor Klang || -|| **ADD** || Changed config file priority loading and added config modes. || Viktor Klang || -|| **ADD** || #411 Bumped Jetty to v 7 and migrated to it's eclipse packages || Viktor Klang || -|| **ADD** || #414 Migrate from Grizzly to Jetty for Akka Microkernel || Viktor Klang || -|| **ADD** || #261 Add Java API for 'routing' module || VIktor Klang || -|| **ADD** || #262 Add Java API for Agent || Viktor Klang || -|| **ADD** || #264 Add Java API for Dataflow || Viktor Klang || -|| **ADD** || Using JerseySimpleBroadcaster instead of JerseyBroadcaster in AkkaBroadcaster || Viktor Klang || -|| **ADD** || #433 Throughput deadline added for ExecutorBasedEventDrivenDispatcher || Viktor Klang || -|| **ADD** || Add possibility to set default cometSupport in akka.conf || Viktor Klang || -|| **ADD** || #451 Added possibility to use akka-http as a standalone REST server || Viktor Klang || -|| **ADD** || #446 Added support for Erlang-style receiveTimeout || Viktor Klang || -|| **ADD** || #462 Added support for suspend/resume of processing individual actors mailbox, should give clearer restart semantics || Viktor Klang || -|| **ADD** || #466 Actor.spawn now takes an implicit dispatcher to specify who should run the block || Viktor Klang || -|| **ADD** || #456 Added map to Future and Futures.awaitMap || Viktor Klang || -|| **REM** || #418 Remove Lift sample module and docs || Viktor Klang || -|| **REM** || Removed all Reactor-based dispatchers || Viktor Klang || -|| **REM** || Removed anonymous actor factories || Viktor Klang || -|| **ADD** || Voldemort support for akka-persistence || Scott Clasen || -|| **ADD** || HBase support for akka-persistence || David Greco || -|| **ADD** || CouchDB support for akka-persistence || Yung-Luen Lan & Kahlen || -|| **ADD** || #265 Java API for AMQP module || Irmo Manie || - -Release 0.10 - Aug 21 2010 -========================== - -``_ - -||~ =**Type** - ||~ -===== - -**Changes** - ||~ -===== - -**By**= || -||< **ADD** ||< Added new Actor type: UntypedActor for Java API ||< Jonas Bonér || -||< **ADD** ||< #26 Deep serialization of Actor including its mailbox ||< Jonas Bonér || -||< **ADD** ||< Rewritten network protocol. More efficient and cleaner. ||< Jonas Bonér || -||< **ADD** ||< Rewritten Java Active Object tests into Scala to be able to run the in SBT. ||< Jonas Bonér || -||< **ADD** ||< Added isDefinedAt method to Actor for checking if it can receive a certain message ||< Jonas Bonér || -||< **ADD** ||< Added caching of Active Object generated class bytes, huge perf improvement ||< Jonas Bonér || -||< **ADD** ||< Added RemoteClient Listener API ||< Jonas Bonér || -||< **ADD** ||< Added methods to retrieve children from a Supervisor ||< Jonas Bonér || -||< **ADD** ||< Rewritten Supervisor to become more clear and "correct" ||< Jonas Bonér || -||< **ADD** ||< Added options to configure a blocking mailbox with custom capacity ||< Jonas Bonér || -||< **ADD** ||< Added RemoteClient reconnection time window configuration option ||< Jonas Bonér || -||< **ADD** ||< Added ActiveObjectContext with sender reference etc ||< Jonas Bonér || -||< **ADD** ||< #293 Changed config format to JSON-style ||< Jonas Bonér || -||< **ADD** ||< #302: Incorporate new ReceiveTimeout in Actor serialization ||< Jonas Bonér || -||< **ADD** ||< Added Java API docs and made it comparable with Scala API docs. 1-1 mirroring ||< Jonas Bonér || -||< **ADD** ||< Renamed Active Object to Typed Actor ||< Jonas Bonér || -||< **ADD** ||< Enhanced Typed Actor: remoting, "real" restart upon failure etc. ||< Jonas Bonér || -||< **ADD** ||< Typed Actor now inherits Actor and is a full citizen in the Actor world. ||< Jonas Bonér || -||< **ADD** ||< Added support for remotely shutting down a remote actor ||< Jonas Bonér || -||< **ADD** ||< #224 Add support for Camel in typed actors (`more `_ ...) ||< Martin Krasser || -||< **ADD** || #282 Producer trait should implement Actor.receive (`more `_ ...) || Martin Krasser || -||< **ADD** || #271 Support for bean scope prototype in akka-spring || Johan Rask || -||< **ADD** || Support for DI of values and bean references on target instance in akka-spring || Johan Rask || -||< **ADD** || #287 Method annotated with @postrestart in ActiveObject is not called during restart || Johan Rask || -|| **ADD** || Support for ApplicationContextAware in akka-spring || Johan Rask || -|| **ADD** || #199 Support shutdown hook in TypedActor || Martin Krasser || -|| **ADD** || #266 Access to typed actors from user-defined Camel routes (`more `_ ...) || Martin Krasser || -|| **ADD** || #268 Revise akka-camel documentation (`more `_ ...) || Martin Krasser || -|| **ADD** || #289 Support for Spring configuration element (`more `_ ...) || Martin Krasser || -|| **ADD** || #296 TypedActor lifecycle management || Martin Krasser || -|| **ADD** || #297 Shutdown routes to typed actors (`more `_ ...) || Martin Krasser || -|| **ADD** || #314 akka-spring to support typed actor lifecycle management (`more `_ ...) || Martin Krasser || -|| **ADD** || #315 akka-spring to support configuration of shutdown callback method (`more `_ ...) || Martin Krasser || -|| **ADD** || Fault-tolerant consumer actors and typed consumer actors (`more `_ ...) || Martin Krasser || -|| **ADD** || #320 Leverage Camel's non-blocking routing engine (`more `_ ...) || Martin Krasser || -|| **ADD** || #335 Producer trait should allow forwarding of results || Martin Krasser || -|| **ADD** || #339 Redesign of Producer trait (pre/post processing hooks, async in-out) (`more `_ ...) || Martin Krasser || -|| **ADD** || Non-blocking, asynchronous routing example for akka-camel (`more `_ ...) || Martin Krasser || -|| **ADD** || #333 Allow applications to wait for endpoints being activated (`more `_ ...) || Martin Krasser || -|| **ADD** || #356 Support @consume annotations on typed actor implementation class || Martin Krasser || -|| **ADD** || #357 Support untyped Java actors as endpoint consumer || Martin Krasser || -|| **ADD** || #366 CamelService should be a singleton || Martin Krasser || -|| **ADD** || #392 Support untyped Java actors as endpoint producer || Martin Krasser || -|| **ADD** || #393 Redesign CamelService singleton to be a CamelServiceManager (`more `_ ...) || Martin Krasser || -|| **ADD** || #295 Refactoring Actor serialization to type classes || Debasish Ghosh || -|| **ADD** || #317 Change documentation for Actor Serialization || Debasish Ghosh || -|| **ADD** || #388 Typeclass serialization of ActorRef/UntypedActor isn't Java friendly || Debasish Ghosh || -|| **ADD** || #292 Add scheduleOnce to Scheduler || Irmo Manie || -|| **ADD** || #308 Initial receive timeout on actor || Irmo Manie || -|| **ADD** || Redesign of AMQP module (`more `_ ...) || Irmo Manie || -|| **ADD** || Added "become(behavior: Option[Receive])" to Actor || Viktor Klang || -|| **ADD** || Added "find[T](f: PartialFunction[ActorRef,T]) : Option[T]" to ActorRegistry || Viktor Klang || -|| **ADD** || #369 Possibility to configure dispatchers in akka.conf || Viktor Klang || -|| **ADD** || #395 Create ability to add listeners to RemoteServer || Viktor Klang || -|| **ADD** || #225 Add possibility to use Scheduler from TypedActor || Viktor Klang || -|| **ADD** || #61 Integrate new persistent datastructures in Scala 2.8 || Peter Vlugter || -|| **ADD** || Expose more of what Multiverse can do || Peter Vlugter || -|| **ADD** || #205 STM transaction settings || Peter Vlugter || -|| **ADD** || #206 STM transaction deferred and compensating || Peter Vlugter || -|| **ADD** || #232 Expose blocking transactions || Peter Vlugter || -|| **ADD** || #249 Expose Multiverse Refs for primitives || Peter Vlugter || -|| **ADD** || #390 Expose transaction propagation level in multiverse || Peter Vlugter || -|| **ADD** || Package objects for importing local/global STM || Peter Vlugter || -|| **ADD** || Java API for the STM || Peter Vlugter || -|| **ADD** || #379 Create STM Atomic templates for Java API || Peter Vlugter || -|| **ADD** || #270 SBT plugin for Akka || Peter Vlugter || -|| **ADD** || #198 support for ThreadBasedDispatcher in Spring config || Michael Kober || -|| **ADD** || #377 support HawtDispatcher in Spring config || Michael Kober || -|| **ADD** || #376 support Spring config for untyped actors || Michael Kober || -|| **ADD** || #200 support WorkStealingDispatcher in Spring config || Michael Kober || -|| **UPD** || #336 RabbitMQ 1.8.1 || Irmo Manie || -|| **UPD** || #288 Netty to 3.2.1.Final || Viktor Klang || -|| **UPD** || Atmosphere to 0.6.1 || Viktor Klang || -|| **UPD** || Lift to 2.8.0-2.1-M1 || Viktor Klang || -|| **UPD** || Camel to 2.4.0 || Martin Krasser || -|| **UPD** || Spring to 3.0.3.RELEASE || Martin Krasser || -|| **UPD** || Multiverse to 0.6 || Peter Vlugter || -|| **FIX** || Fixed bug with stm not being enabled by default when no AKKA_HOME is set || Jonas Bonér || -|| **FIX** || Fixed bug in network manifest serialization || Jonas Bonér || -|| **FIX** || Fixed bug Remote Actors || Jonas Bonér || -|| **FIX** || Fixed memory leak in Active Objects || Jonas Bonér || -|| **FIX** || Fixed indeterministic deadlock in Transactor restart || Jonas Bonér || -|| **FIX** || #325 Fixed bug in STM with dead hanging CountDownCommitBarrier || Jonas Bonér || -|| **FIX** || #316: NoSuchElementException during ActiveObject restart || Jonas Bonér || -|| **FIX** || #256: Tests for ActiveObjectContext || Jonas Bonér || -|| **FIX** || Fixed bug in restart of Actors with 'Temporary' life-cycle || Jonas Bonér || -|| **FIX** || #280 Tests fail if there is no akka.conf set || Jonas Bonér || -|| **FIX** || #286 unwanted transitive dependencies from Geronimo project || Viktor Klang || -|| **FIX** || Atmosphere comet comment to use stream instead of writer || Viktor Klang || -|| **FIX** || #285 akka.conf is now used as defaults for Akka REST servlet init parameters || Viktor Klang || -|| **FIX** || #321 fixed performance regression in ActorRegistry || Viktor Klang || -|| **FIX** || #286 geronimo servlet 2.4 dep is no longer transitively loaded || Viktor Klang || -|| **FIX** || #334 partial lift sample rewrite to fix breakage || Viktor Klang || -|| **FIX** || Fixed a memory leak in ActorRegistry || Viktor Klang || -|| **FIX** || Fixed a race-condition in Cluster || Viktor Klang || -|| **FIX** || #355 Switched to Array instead of List on ActorRegistry return types || Viktor Klang || -|| **FIX** || #352 ActorRegistry.actorsFor(class) now checks isAssignableFrom || Viktor Klang || -|| **FIX** || Fixed a race condition in ActorRegistry.register || Viktor Klang || -|| **FIX** || #337 Switched from Configgy logging to SLF4J, better for OSGi || Viktor Klang || -|| **FIX** || #372 Scheduler now returns Futures to cancel tasks || Viktor Klang || -|| **FIX** || #306 JSON serialization between remote actors is not transparent || Debasish Ghosh || -|| **FIX** || #204 Reduce object creation in STM || Peter Vlugter || -|| **FIX** || #253 Extend Multiverse BasicRef rather than wrap ProgrammaticRef || Peter Vlugter || -|| **REM** || Removed pure POJO-style Typed Actor (old Active Object) || Jonas Bonér || -|| **REM** || Removed Lift as a dependency for Akka-http || Viktor Klang || -|| **REM** || #294 Remove reply and reply_? from Actor || Viktor Klang || -|| **REM** || Removed one field in Actor, should be a minor memory reduction for high actor quantities || Viktor Klang || -|| **FIX** || #301 DI does not work in akka-spring when specifying an interface || Johan Rask || -|| **FIX** || #328 -trapExit should pass through self with Exit to supervisor || Irmo Manie || -|| **FIX** || Fixed warning when deregistering listeners || Martin Krasser || -|| **FIX** || Added camel-jetty-2.4.0.1 to Akka's embedded-repo. -(fixes a concurrency bug in camel-jetty-2.4.0, to be officially released in Camel 2.5.0) || Martin Krasser || -|| **FIX** || #338 RedisStorageBackend fails when redis closes connection to idle client || Debasish Ghosh || -|| **FIX** || #340 RedisStorage Map.get does not throw exception when disconnected from redis but returns None || Debasish Ghosh || - -Release 0.9 - June 2th 2010 -=========================== - -||~ =**Type** - ||~ -===== - -**Changes** - ||~ -===== - -**By**= || -|| || || || -|| **ADD** || Serializable, immutable, network-aware ActorRefs || Jonas Bonér || -|| **ADD** || Optionally JTA-aware STM transactions || Jonas Bonér || -|| **ADD** || Rewritten supervisor management, making use of ActorRef, now really kills the Actor instance and replaces it || Jonas Bonér || -|| **ADD** || Allow linking and unlinking a declaratively configured Supervisor || Jonas Bonér || -|| **ADD** || Remote protocol rewritten to allow passing along sender reference in all situations || Jonas Bonér || -|| **ADD** || #37 API for JTA usage || Jonas Bonér || -|| **ADD** || Added user accessible 'sender' and 'senderFuture' references || Jonas Bonér || -|| **ADD** || Sender actor is now passed along for all message send functions (!, !!, !!!, forward) || Jonas Bonér || -|| **ADD** || Subscription API for listening to RemoteClient failures || Jonas Bonér || -|| **ADD** || Implemented link/unlink for ActiveObjects || Jan Kronquist / Michael Kober || -|| **ADD** || Added alter method to TransactionalRef + added appl(initValue) to Transactional Map/Vector/Ref || Peter Vlugter || -|| **ADD** || Load dependency JARs in JAR deloyed in kernel's ,/deploy dir || Jonas Bonér || -|| **ADD** || Allowing using Akka without specifying AKKA_HOME or path to akka.conf config file || Jonas Bonér || -|| **ADD** || Redisclient now supports PubSub || Debasish Ghosh || -|| **ADD** || Added a sample project under akka-samples for Redis PubSub using Akka actors || Debasish Ghosh || -|| **ADD** || Richer API for Actor.reply || Viktor Klang || -|| **ADD** || Added Listeners to Akka patterns || Viktor Klang || -|| **ADD** || #183 Deactivate endpoints of stopped consumer actors || Martin Krasser || -|| **ADD** || Camel `Message API improvements `_ || Martin Krasser || -|| **ADD** || #83 Send notification to parent supervisor if all actors supervised by supervisor has been permanently killed || Jonas Bonér || -|| **ADD** || #121 Make it possible to dynamically create supervisor hierarchies for Active Objects || Michael Kober || -|| **ADD** || #131 Subscription API for node joining & leaving cluster || Jonas Bonér || -|| **ADD** || #145 Register listener for errors in RemoteClient/RemoteServer || Jonas Bonér || -|| **ADD** || #146 Create an additional distribution with sources || Jonas Bonér || -|| **ADD** || #149 Support loading JARs from META-INF/lib in JARs put into the ./deploy directory || Jonas Bonér || -|| **ADD** || #166 Implement insertVectorStorageEntriesFor in CassandraStorageBackend || Jonas Bonér || -|| **ADD** || #168 Separate ID from Value in Actor; introduce ActorRef || Jonas Bonér || -|| **ADD** || #174 Create sample module for remote actors || Jonas Bonér || -|| **ADD** || #175 Add new sample module with Peter Vlugter's Ant demo || Jonas Bonér || -|| **ADD** || #177 Rewrite remote protocol to make use of new ActorRef || Jonas Bonér || -|| **ADD** || #180 Make use of ActorRef indirection for fault-tolerance management || Jonas Bonér || -|| **ADD** || #184 Upgrade to Netty 3.2.0.CR1 || Jonas Bonér || -|| **ADD** || #185 Rewrite Agent and Supervisor to work with new ActorRef || Jonas Bonér || -|| **ADD** || #188 Change the order of how the akka.conf is detected || Jonas Bonér || -|| **ADD** || #189 Reintroduce 'sender: Option[Actor]' ref in Actor || Jonas Bonér || -|| **ADD** || #203 Upgrade to Scala 2.8 RC2 || Jonas Bonér || -|| **ADD** || #222 Using Akka without AKKA_HOME or akka.conf || Jonas Bonér || -|| **ADD** || #234 Add support for injection and management of ActiveObjectContext with RTTI such as 'sender' and 'senderFuture' references etc. || Jonas Bonér || -|| **ADD** || #236 Upgrade SBinary to Scala 2.8 RC2 || Jonas Bonér || -|| **ADD** || #235 Problem with RedisStorage.getVector(..) data structure storage management || Jonas Bonér || -|| **ADD** || #239 Upgrade to Camel 2.3.0 || Martin Krasser || -|| **ADD** || #242 Upgraded to Scala 2.8 RC3 || Jonas Bonér || -|| **ADD** || #243 Upgraded to Protobuf 2.3.0 || Jonas Bonér || -|| **ADD** || Added option to specify class loader when de-serializing messages and RemoteActorRef in RemoteClient || Jonas Bonér || -|| **ADD** || #238 Upgrading to Cassandra 0.6.1 || Jonas Bonér || -|| **ADD** || Upgraded to Jersey 1.2 || Viktor Klang || -|| **ADD** || Upgraded Atmosphere to 0.6-SNAPSHOT, adding WebSocket support || Viktor Klang || -|| **FIX** || Simplified ActiveObject configuration || Michael Kober || -|| **FIX** || #237 Upgrade Mongo Java driver to 1.4 (the latest stable release) || Debasish Ghosh || -|| **FIX** || #165 Implemented updateVectorStorageEntryFor in Mongo persistence module || Debasish Ghosh || -|| **FIX** || #154: Allow ActiveObjects to use the default timeout in config file || Michael Kober || -|| **FIX** || Active Object methods with @inittransactionalstate should be invoked automatically || Michael Kober || -|| **FIX** || Nested supervisor hierarchy failure propagation bug fixed || Jonas Bonér || -|| **FIX** || Fixed bug on CommitBarrier transaction registration || Jonas Bonér || -|| **FIX** || Merged many modules to reduce total number of modules || Viktor Klang || -|| **FIX** || Future parameterized || Viktor Klang || -|| **FIX** || #191: Workstealing dispatcher didn't work with !! || Viktor Klang || -|| **FIX** || #202: Allow applications to disable stream-caching || Martin Krasser || -|| **FIX** || #119 Problem with Cassandra-backed Vector || Jonas Bonér || -|| **FIX** || #147 Problem replying to remote sender when message sent with ! || Jonas Bonér || -|| **FIX** || #171 initial value of Ref can become null if first transaction rolled back || Jonas Bonér || -|| **FIX** || #172 Fix "broken" Protobuf serialization API || Jonas Bonér || -|| **FIX** || #173 Problem with Vector::slice in CassandraStorage || Jonas Bonér || -|| **FIX** || #190 RemoteClient shutdown ends up in endless loop || Jonas Bonér || -|| **FIX** || #211 Problem with getting CommitBarrierOpenException when using Transaction.Global || Jonas Bonér || -|| **FIX** || #240 Supervised actors not started when starting supervisor || Jonas Bonér || -|| **FIX** || Fixed problem with Transaction.Local not committing to persistent storage || Jonas Bonér || -|| **FIX** || #215: Re-engineered the JAX-RS support || Viktor Klang || -|| **FIX** || Many many bug fixes || Team || -|| **REM** || Shoal cluster module || Viktor Klang || - -Release 0.8.1 - April 6th 2010 -============================== - -||~ =**Type** - ||~ -===== - -**Changes** - ||~ -===== - -**By**= || -|| || || || -|| **ADD** || Redis cluster support || Debasish Ghosh || -|| **ADD** || Reply to remote sender from message set with ! || Jonas Bonér || -|| **ADD** || Load-balancer which prefers actors with few messages in mailbox || Jan Van Besien || -|| **ADD** || Added developer mailing list: [akka-dev AT googlegroups DOT com] || Jonas Bonér || -|| **FIX** || Separated thread-local from thread-global transaction API || Jonas Bonér || -|| **FIX** || Fixed bug in using STM outside Actors || Jonas Bonér || -|| **FIX** || Fixed bug in anonymous actors || Jonas Bonér || -|| **FIX** || Moved web initializer to new akka-servlet module || Viktor Klang || - -Release 0.8 - March 31st 2010 -============================= - -||~ =**Type** - ||~ -===== - -**Changes** - ||~ -===== - -**By**= || -|| || || || -|| **ADD** || Scala 2.8 based || Viktor Klang || -|| **ADD** || Monadic API for Agents || Jonas Bonér || -|| **ADD** || Agents are transactional || Jonas Bonér || -|| **ADD** || Work-stealing dispatcher || Jan Van Besien || -|| **ADD** || Improved Spring integration || Michael Kober || -|| **FIX** || Various bugfixes || Team || -|| **FIX** || Improved distribution packaging || Jonas Bonér || -|| **REMOVE** || Actor.send function || Jonas Bonér || - -Release 0.7 - March 21st 2010 -============================= - -||~ =**Type** - ||~ -===== - -**Changes** - ||~ -===== - -**By**= || -|| || || || -|| **ADD** || Rewritten STM now works generically with fire-forget message flows || Jonas Bonér || -|| **ADD** || Apache Camel integration || Martin Krasser || -|| **ADD** || Spring integration || Michael Kober || -|| **ADD** || Server-managed Remote Actors || Jonas Bonér || -|| **ADD** || Clojure-style Agents || Viktor Klang || -|| **ADD** || Shoal cluster backend || Viktor Klang || -|| **ADD** || Redis-based transactional queue storage backend || Debasish Ghosh || -|| **ADD** || Redis-based transactional sorted set storage backend || Debasish Ghosh || -|| **ADD** || Redis-based atomic INC (index) operation || Debasish Ghosh || -|| **ADD** || Distributed Comet || Viktor Klang || -|| **ADD** || Project moved to SBT (simple-build-tool) || Peter Hausel || -|| **ADD** || Futures object with utility methods for Future's || Jonas Bonér || -|| **ADD** || !!! function that returns a Future || Jonas Bonér || -|| **ADD** || Richer ActorRegistry API || Jonas Bonér || -|| **FIX** || Improved event-based dispatcher performance with 40% || Jan Van Besien || -|| **FIX** || Improved remote client pipeline performance || Viktor Klang || -|| **FIX** || Support several Clusters on the same network || Viktor Klang || -|| **FIX** || Structural package refactoring || Jonas Bonér || -|| **FIX** || Various bugs fixed || Team || - -Release 0.6 - January 5th 2010 -============================== - -||~ =**Type** - ||~ -===== - -**Changes** - ||~ -===== - -**By**= || -|| || || || -|| **ADD** || Clustered Comet using Akka remote actors and clustered membership API || Viktor Klang || -|| **ADD** || Cluster membership API and implementation based on JGroups || Viktor Klang || -|| **ADD** || Security module for HTTP-based authentication and authorization || Viktor Klang || -|| **ADD** || Support for using Scala XML tags in RESTful Actors (scala-jersey) || Viktor Klang || -|| **ADD** || Support for Comet Actors using Atmosphere || Viktor Klang || -|| **ADD** || MongoDB as Akka storage backend || Debasish Ghosh || -|| **ADD** || Redis as Akka storage backend || Debasish Ghosh || -|| **ADD** || Transparent JSON serialization of Scala objects based on SJSON || Debasish Ghosh || -|| **ADD** || Kerberos/SPNEGO support for Security module || Eckhart Hertzler || -|| **ADD** || Implicit sender for remote actors: Remote actors are able to use reply to answer a request || Mikael Högqvist || -|| **ADD** || Support for using the Lift Web framework with Actors || Tim Perrett || -|| **ADD** || Added CassandraSession API (with socket pooling) wrapping Cassandra's Thrift API in Scala and Java APIs || Jonas Bonér || -|| **ADD** || Rewritten STM, now integrated with Multiverse STM || Jonas Bonér || -|| **ADD** || Added STM API for atomic {..} and run {..} orElse {..} || Jonas Bonér || -|| **ADD** || Added STM retry || Jonas Bonér || -|| **ADD** || AMQP integration; abstracted as actors in a supervisor hierarchy. Impl AMQP 0.9.1 || Jonas Bonér || -|| **ADD** || Complete rewrite of the persistence transaction management, now based on Unit of Work and Multiverse STM || Jonas Bonér || -|| **ADD** || Monadic API to TransactionalRef (use it in for-comprehension) || Jonas Bonér || -|| **ADD** || Lightweight actor syntax using one of the Actor.actor(..) methods. F.e: 'val a = actor { case _ => .. }' || Jonas Bonér || -|| **ADD** || Rewritten event-based dispatcher which improved perfomance by 10x, now substantially faster than event-driven Scala Actors || Jonas Bonér || -|| **ADD** || New Scala JSON parser based on sjson || Jonas Bonér || -|| **ADD** || Added zlib compression to remote actors || Jonas Bonér || -|| **ADD** || Added implicit sender reference for fire-forget ('!') message sends || Jonas Bonér || -|| **ADD** || Monadic API to TransactionalRef (use it in for-comprehension) || Jonas Bonér || -|| **ADD** || Smoother web app integration; just add akka.conf to the classpath (WEB-INF/classes), no need for AKKA_HOME or -Dakka.conf=.. || Jonas Bonér || -|| **ADD** || Modularization of distribution into a thin core (actors, remoting and STM) and the rest in submodules || Jonas Bonér || -|| **ADD** || Added 'forward' to Actor, forwards message but keeps original sender address || Jonas Bonér || -|| **ADD** || JSON serialization for Java objects (using Jackson) || Jonas Bonér || -|| **ADD** || JSON serialization for Scala objects (using SJSON) || Jonas Bonér || -|| **ADD** || Added implementation for remote actor reconnect upon failure || Jonas Bonér || -|| **ADD** || Protobuf serialization for Java and Scala objects || Jonas Bonér || -|| **ADD** || SBinary serialization for Scala objects || Jonas Bonér || -|| **ADD** || Protobuf as remote protocol || Jonas Bonér || -|| **ADD** || Updated Cassandra integration and CassandraSession API to v0.4 || Jonas Bonér || -|| **ADD** || CassandraStorage is now works with external Cassandra cluster || Jonas Bonér || -|| **ADD** || ActorRegistry for retrieving Actor instances by class name and by id || Jonas Bonér || -|| **ADD** || SchedulerActor for scheduling periodic tasks || Jonas Bonér || -|| **ADD** || Now start up kernel with 'java -jar dist/akka-0.6.jar' || Jonas Bonér || -|| **ADD** || Added Akka user mailing list: akka-user AT googlegroups DOT com]] || Jonas Bonér || -|| **ADD** || Improved and restructured documentation || Jonas Bonér || -|| **ADD** || New URL: http://akkasource.org || Jonas Bonér || -|| **ADD** || New and much improved docs || Jonas Bonér || -|| **ADD** || Enhanced trapping of failures: 'trapExit = List(classOf[..], classOf[..])' || Jonas Bonér || -|| **ADD** || Upgraded to Netty 3.2, Protobuf 2.2, ScalaTest 1.0, Jersey 1.1.3, Atmosphere 0.4.1, Cassandra 0.4.1, Configgy 1.4 || Jonas Bonér || -|| **FIX** || Lowered actor memory footprint; now an actor consumes ~600 bytes, which mean that you can create 6.5 million on 4 G RAM || Jonas Bonér || -|| **FIX** || Remote actors are now defined by their UUID (not class name) || Jonas Bonér || -|| **FIX** || Fixed dispatcher bugs || Jonas Bonér || -|| **FIX** || Cleaned up Maven scripts and distribution in general || Jonas Bonér || -|| **FIX** || Fixed many many bugs and minor issues || Jonas Bonér || -|| **FIX** || Fixed inconsistencies and uglyness in Actors API || Jonas Bonér || -|| **REMOVE** || Removed concurrent mode || Jonas Bonér || -|| **REMOVE** || Removed embedded Cassandra mode || Jonas Bonér || -|| **REMOVE** || Removed the !? method in Actor (synchronous message send, since it's evil. Use !! with time-out instead. || Jonas Bonér || -|| **REMOVE** || Removed startup scripts and lib dir || Jonas Bonér || -|| **REMOVE** || Removed the 'Transient' life-cycle scope since to close to 'Temporary' in semantics. || Jonas Bonér || -|| **REMOVE** || Removed 'Transient' Actors and restart timeout || Jonas Bonér || diff --git a/akka-docs/pending/scheduler.rst b/akka-docs/pending/scheduler.rst deleted file mode 100644 index ac0c7a3a50..0000000000 --- a/akka-docs/pending/scheduler.rst +++ /dev/null @@ -1,16 +0,0 @@ -Scheduler -========= - -Module stability: **SOLID** - -Akka has a little scheduler written using actors. Can be convenient if you want to schedule some periodic task for maintenance or similar. - -It allows you to register a message that you want to be sent to a specific actor at a periodic interval. Here is an example: - -``_ -//Sends messageToBeSent to receiverActor after initialDelayBeforeSending and then after each delayBetweenMessages -Scheduler.schedule(receiverActor, messageToBeSent, initialDelayBeforeSending, delayBetweenMessages, timeUnit) - -//Sends messageToBeSent to receiverActor after delayUntilSend -Scheduler.scheduleOnce(receiverActor, messageToBeSent, delayUntilSend, timeUnit) -``_ diff --git a/akka-docs/pending/servlet.rst b/akka-docs/pending/servlet.rst deleted file mode 100644 index 6859657a72..0000000000 --- a/akka-docs/pending/servlet.rst +++ /dev/null @@ -1,41 +0,0 @@ -Akka Servlet -============ - -= - -Module stability: **STABLE** - -Akka has a servlet; ‘se.scalablesolutions.akka.comet.AkkaServlet’ that can use to deploy your Akka-based application in an external Servlet container. All you need to do is to add the servlet to the ‘web.xml’, set ‘$AKKA_HOME’ to the root of the distribution (needs the ‘$AKKA_HOME/config/*’ files) and add the JARs in the ‘$AKKA_HOME/lib’ to your classpath (or put them in the ‘WEB-INF/lib’ directory in the WAR file). - -Also, you need to add the Akka initialize/cleanup listener in web.xml - -.. code-block:: xml - - - ... - - se.scalablesolutions.akka.servlet.Initializer - - ... - - -And to support REST actors and/or comet actors, you need to add the following servlet declaration: - -``_ - -... - - Akka - - se.scalablesolutions.akka.comet.AkkaServlet - - se.scalablesolutions.akka.rest.AkkaServlet - - - * - Akka - -... - - -``_ diff --git a/akka-docs/pending/stm.rst b/akka-docs/pending/stm.rst deleted file mode 100644 index 8666425289..0000000000 --- a/akka-docs/pending/stm.rst +++ /dev/null @@ -1,60 +0,0 @@ -Akka STM - -The Akka Software Transactional Memory implementation - -**Read consistency** -^^^^^^^^^^^^^^^^^^^^ - -Read consistency is that all value - -**Read consistency and MVCC** -***************************** - -A lot of STM (like the Clojure STM) implementations are Multi Version Concurrency Control Based (MVCC) based (TL2 of david dice could be seen as MVCC). - -To provide read consistency, every ref is augmented with a version field (a long). There also is a logical clock (an AtomicLong for instance) that is incremented every time a transaction does a commit (there are some optimizations) and on all refs written, the version of the ref is updated to this new clock value. - -If a transaction begins, it reads the current version of the clock and makes sure that the version of the refs it reads, are equal or lower than the version of the transaction. If the transaction encounters a ref with a higher value, the transaction is aborted and retried. - -MVCC STM’s are relatively simple to write and have some very nice properties: -# readers don’t block writers -# writers don’t block readers -# persistent data-structures are very easy to write since a log can be added to each ref containing older versions of the data, - -The problem with MVCC however is that the central clock forms a contention point that makes independent transactional data-structures not linearly scalable. todo: give example of scalability with MVCC. - -So even if you have 2 Threads having their private transactional Ref (so there is no visible contention), underwater the transaction still are going to contend for the clock. - -**Read consistency and the Akka STM** -************************************* - -The AkkaSTM (that is build on top of the Multiverse 0.7 STM) and from Akka 1.1 it doesn’t use a MVCC based implementation because of the scalability limiting central clock. - -It uses 2 different mechanisms: -1) For very short transactions it does a full conflict scan every time a new ref is read. Doing a full conflict scan sounds expensive, but it only involves volatile reads. -2) For longer transactions it uses semi visible reads. Every time a read is done, the surplus of readers is incremented and stored in the ref. Once the transaction aborts or commits, the surplus is lowered again. If a transaction does an update, and sees that there is a surplus of readers, it increments a conflict counter. This conflict counter is checked every time a transaction reads a new ref. If it hasn’t changed, no full conflict scan is needed. If it has changed, a full conflict scan is required. If a conflict is detected, the transaction is aborted and retried. This technique is called a semi visible read (we don’t know which transactions are possibly going to encounter a conflict, but we do know if there is at least one possible conflict). - -There are 2 important optimizations to this design: -# Eager full conflict scan -# Read biases refs - -**Eager full conflict scan** -**************************** - -The reasons why short transactions always do a full conflict scan is that doing semi visible reads, relies doing more expensive synchronization operations (e.g. doing a cas to increase the surplus of readers, or doing a cas to decrease it). - -**Read biased vs update biased.** -********************************* - -The problem with semi visible reads is that certain structures (e.g. the root of a tree) can form a contention point (because of the arrives/departs) even though it mostly is read. To reduce contention, a ref can become read biased after a certain number of reads by transactions that use semi visible reads is done. Once it has become read biased, no arrives and departs are required any more, but once it the Ref is updated it will always increment the conflict counter because it doesn’t know if there are any conflicting readers. - -Visible reads, semi visible reads -Read tracking - -strict isolation -eager conflict detection -deferred write, no dirty read possible - -isolation level -optimistic -various levels of pessimistic behavior diff --git a/akka-docs/pending/team.rst b/akka-docs/pending/team.rst deleted file mode 100644 index cdc97244bd..0000000000 --- a/akka-docs/pending/team.rst +++ /dev/null @@ -1,22 +0,0 @@ -Team -===== - -|| **Name** || **Role** || **Email** || -|| Jonas Bonér || Founder, Despot, Committer || jonas AT jonasboner DOT com || -|| Viktor Klang || Bad cop, Committer || viktor DOT klang AT gmail DOT com || -|| Debasish Ghosh || Committer || dghosh AT acm DOT org || -|| Ross McDonald || Alumni || rossajmcd AT gmail DOT com || -|| Eckhart Hertzler || Alumni || || -|| Mikael Högqvist || Alumni || || -|| Tim Perrett || Alumni || || -|| Jeanfrancois Arcand || Alumni || jfarcand AT apache DOT org || -|| Martin Krasser || Committer || krasserm AT googlemail DOT com || -|| Jan Van Besien || Alumni || || -|| Michael Kober || Committer || || -|| Peter Vlugter || Committer || || -|| Peter Veentjer || Committer || || -|| Irmo Manie || Committer || || -|| Heiko Seeberger || Committer || || -|| Hiram Chirino || Committer || || -|| Scott Clasen || Committer || || -|| Roland Kuhn || Committer || || diff --git a/akka-docs/pending/test.rst b/akka-docs/pending/test.rst deleted file mode 100644 index c845cb36d2..0000000000 --- a/akka-docs/pending/test.rst +++ /dev/null @@ -1,55 +0,0 @@ -Testing of Akka -=============== - -Introduction -============ - -Testing concurrent code using time-outs (like Thread.sleep(..)) is usually a bad idea since it is both slow and error-prone. There are some frameworks that can help, some are listed below. - -Testing Actor Interaction -========================= - -For Actor interaction, making sure certain message arrives in time etc. we recommend you use Akka's built-in `TestKit `_. If you want to roll your own, you will find helpful abstractions in the `java.util.concurrent` package, most notably `BlockingQueue` and `CountDownLatch`. - -Unit testing of Actors -====================== - -If you need to unit test your actors then the best way to do that would be to decouple it from the Actor by putting it in a regular class/trait, test that, and then mix in the Actor trait when you want to create actors. This is necessary since you can't instantiate an Actor class directly with 'new'. But note that you can't test Actor interaction with this, but only local Actor implementation. Here is an example: - -.. code-block:: scala - - // test this - class MyLogic { - def blabla: Unit = { - ... - } - } - - // run this - actorOf(new MyLogic with Actor { - def receive = { - case Bla => blabla - } - }) - -...or define a non-anonymous MyLogicActor class. - -Akka Expect -=========== - -Expect mimic for testing Akka actors. - -``_ - -Awaitility -========== - -Not a Akka specific testing framework but a nice DSL for testing asynchronous code. -Scala and Java API. - -``_ - -ScalaTest Conductor -=================== - -``_ diff --git a/akka-docs/pending/testkit-example.rst b/akka-docs/pending/testkit-example.rst deleted file mode 100644 index 611ba4dea6..0000000000 --- a/akka-docs/pending/testkit-example.rst +++ /dev/null @@ -1,138 +0,0 @@ -Ray Roestenburg's example code from `his blog `_. -``_ -package unit.akka - -import org.scalatest.matchers.ShouldMatchers -import org.scalatest.{WordSpec, BeforeAndAfterAll} -import akka.actor.Actor._ -import akka.util.duration._ -import akka.util.TestKit -import java.util.concurrent.TimeUnit -import akka.actor.{ActorRef, Actor} -import util.Random - -/** - * a Test to show some TestKit examples - */ - -class TestKitUsageSpec extends WordSpec with BeforeAndAfterAll with ShouldMatchers with TestKit { - val echoRef = actorOf(new EchoActor).start() - val forwardRef = actorOf(new ForwardingActor(testActor)).start() - val filterRef = actorOf(new FilteringActor(testActor)).start() - val randomHead = Random.nextInt(6) - val randomTail = Random.nextInt(10) - val headList = List().padTo(randomHead, "0") - val tailList = List().padTo(randomTail, "1") - val seqRef = actorOf(new SequencingActor(testActor, headList, tailList)).start() - - override protected def afterAll(): scala.Unit = { - stopTestActor - echoRef.stop() - forwardRef.stop() - filterRef.stop() - seqRef.stop() - } - - "An EchoActor" should { - "Respond with the same message it receives" in { - within(100 millis) { - echoRef ! "test" - expectMsg("test") - } - } - } - "A ForwardingActor" should { - "Forward a message it receives" in { - within(100 millis) { - forwardRef ! "test" - expectMsg("test") - } - } - } - "A FilteringActor" should { - "Filter all messages, except expected messagetypes it receives" in { - var messages = List[String]() - within(100 millis) { - filterRef ! "test" - expectMsg("test") - filterRef ! 1 - expectNoMsg - filterRef ! "some" - filterRef ! "more" - filterRef ! 1 - filterRef ! "text" - filterRef ! 1 - - receiveWhile(500 millis) { - case msg: String => messages = msg :: messages - } - } - messages.length should be(3) - messages.reverse should be(List("some", "more", "text")) - } - } - "A SequencingActor" should { - "receive an interesting message at some point " in { - within(100 millis) { - seqRef ! "something" - ignoreMsg { - case msg: String => msg != "something" - } - expectMsg("something") - ignoreMsg { - case msg: String => msg == "1" - } - expectNoMsg - } - } - } -} - -/** - * An Actor that echoes everything you send to it - */ -class EchoActor extends Actor { - def receive = { - case msg => { - self.reply(msg) - } - } -} - -/** - * An Actor that forwards every message to a next Actor - */ -class ForwardingActor(next: ActorRef) extends Actor { - def receive = { - case msg => { - next ! msg - } - } -} - -/** - * An Actor that only forwards certain messages to a next Actor - */ -class FilteringActor(next: ActorRef) extends Actor { - def receive = { - case msg: String => { - next ! msg - } - case _ => None - } -} - -/** - * An actor that sends a sequence of messages with a random head list, an interesting value and a random tail list - * The idea is that you would like to test that the interesting value is received and that you cant be bothered with the rest - */ -class SequencingActor(next: ActorRef, head: List[String], tail: List[String]) extends Actor { - def receive = { - case msg => { - head map (next ! _) - next ! msg - tail map (next ! _) - } - } -} -``_ diff --git a/akka-docs/pending/testkit.rst b/akka-docs/pending/testkit.rst deleted file mode 100644 index 65aeac00b6..0000000000 --- a/akka-docs/pending/testkit.rst +++ /dev/null @@ -1,49 +0,0 @@ -Actor TestKit -============= - -Module Stability: **In Progress** - -Overview --------- - -Testing actors comprises several aspects, which can have different weight according to the concrete project at hand: -* If you have a collection of actors which performs a certain function, you may want to apply defined stimuli and observe the delivery of the desired result messages to a test actor; in this case the ***TestKit*** trait will likely interest you. -* If you encounter undesired behavior (exceptions, dead-locks) and want to nail down the cause, it might help to run the actors in question using the ***CallingThreadDispatcher***; this dispatcher is strictly less powerful than the general purpose ones, but its deterministic behavior and complete message stack can help debugging, unless your setup depends on concurrent execution for correctness. -* For real unit tests of one actor body at a time, there soon will be a special ***TestActorRef*** which allows access to the innards and enables running without a dispatcher. - -TestKit -------- - -The TestKit is a trait which you can mix into your test class to setup a test harness consisting of an test actor, which is implicitly available as sender reference, methods for querying and asserting features of messages received by said actor, and finally methods which provide a DSL for timing assertions. - -Ray Roestenburg has written a great article on using the TestKit: ``_. Here is a short teaser: - -.. code-block:: scala - - class SomeSpec extends WordSpec with MustMatchers with TestKit { - - val worker = actorOf(...) - - "A Worker" must { - "send timely replies" in { - within (50 millis) { - worker ! "some work" - expectMsg("some result") - expectNoMsg - } - } - } - } - -His full example is also available `here `_. - -CallingThreadDispatcher ------------------------ - -This special purpose dispatcher was conceived to enable collection of the full stack trace accumulated during processing of a complete message chain. The idea is to run invocations always on the calling thread, except when the target actor is already running on the current thread; in that case it is necessary to queue the invocation and run it after the current invocation on that actor has finished processing. This design implies that any invocation which blocks waiting on some future action to be done by the current thread will dead-lock. Hence, the CallingThreadDispatcher offers strictly more possibilities to dead-lock than a standard dispatcher. - -One nice property is that this feature can help verify that your design is dead-lock free: if you run only on this dispatcher and utilize only one thread, then a successful run implies that for the given set of inputs there cannot be a dead-lock. (This is unfortunately not a hard guarantee, as long as your actor behavior depends on the dispatcher used, e.g. you could sabotage it by explicitly dead-locking only if self.dispatcher != CallingThreadDispatcher.) - -TestActorRef (coming soon ...) ------------------------------- - diff --git a/akka-docs/pending/tutorial-chat-server-java.rst b/akka-docs/pending/tutorial-chat-server-java.rst deleted file mode 100644 index 4f0daaa0de..0000000000 --- a/akka-docs/pending/tutorial-chat-server-java.rst +++ /dev/null @@ -1,7 +0,0 @@ -Tutorial: write a scalable, fault-tolerant, persistent network chat server and client (Java) -============================================================================================ - -Here is a couple of ports of the Scala API chat sample application in the `Scala tutorial `_. - -``_ -``_ diff --git a/akka-docs/pending/use-cases.rst b/akka-docs/pending/use-cases.rst deleted file mode 100644 index 8647d0b17c..0000000000 --- a/akka-docs/pending/use-cases.rst +++ /dev/null @@ -1,31 +0,0 @@ -Examples of use-cases for Akka -============================== - -There is a great discussion on use-cases for Akka with some good write-ups by production users here: ``_ - -Here are some of the areas where Akka is being deployed into production ------------------------------------------------------------------------ - -# **Transaction processing (Online Gaming, Finance/Banking, Trading, Statistics, Betting, Social Media, Telecom)** -** Scale up, scale out, fault-tolerance / HA -# **Service backend (any industry, any app)** -** Service REST, SOAP, Cometd, WebSockets etc -** Act as message hub / integration layer -** Scale up, scale out, fault-tolerance / HA -# **Concurrency/parallelism (any app)** -** Correct -** Simple to work with and understand -** Just add the jars to your existing JVM project (use Scala, Java, Groovy or JRuby) -# **Simulation** -** Master/Worker, Compute Grid, MapReduce etc. -# **Batch processing (any industry)** -** Camel integration to hook up with batch data sources -** Actors divide and conquer the batch workloads -# **Communications Hub (Telecom, Web media, Mobile media)** -** Scale up, scale out, fault-tolerance / HA -# **Gaming and Betting (MOM, online gaming, betting)** -** Scale up, scale out, fault-tolerance / HA -# **Business Intelligence/Data Mining/general purpose crunching** -** Scale up, scale out, fault-tolerance / HA -# **Complex Event Stream Processing** -** Scale up, scale out, fault-tolerance / HA diff --git a/akka-docs/pending/web.rst b/akka-docs/pending/web.rst deleted file mode 100644 index 7d09ede65c..0000000000 --- a/akka-docs/pending/web.rst +++ /dev/null @@ -1,99 +0,0 @@ -Web Framework Integrations -========================== - -Play Framework -============== - -Home page: ``_ -Akka Play plugin: ``_ -Read more here: ``_ - -Lift Web Framework -================== - -Home page: ``_ - -In order to use Akka with Lift you basically just have to do one thing, add the 'AkkaServlet' to your 'web.xml'. - -web.xml -------- - -.. code-block:: xml - - - - - AkkaServlet - akka.comet.AkkaServlet - - - AkkaServlet - /* - - - - - LiftFilter - Lift Filter - The Filter that intercepts lift calls - net.liftweb.http.LiftFilter - - - LiftFilter - /* - - - -Boot class ----------- - -Lift bootstrap happens in the Lift 'Boot' class. Here is a good place to add Akka specific initialization. For example add declarative supervisor configuration to wire up the initial Actors. -Here is a full example taken from the Akka sample code, found here ``_. - -If a request is processed by Liftweb filter, Akka will not process the request. To disable processing of a request by the Lift filter : -* append partial function to LiftRules.liftRequest and return *false* value to disable processing of matching request -* use LiftRules.passNotFoundToChain to chain the request to the Akka filter - -Example of Boot class source code : -``_ -class Boot { - def boot { - // where to search snippet - LiftRules.addToPackages("sample.lift") - - LiftRules.httpAuthProtectedResource.prepend { - case (ParsePath("liftpage" :: Nil, _, _, _)) => Full(AuthRole("admin")) - } - - LiftRules.authentication = HttpBasicAuthentication("lift") { - case ("someuser", "1234", req) => { - Log.info("You are now authenticated !") - userRoles(AuthRole("admin")) - true - } - } - - LiftRules.liftRequest.append { - case Req("liftcount" :: _, _, _) => false - case Req("persistentliftcount" :: _, _, _) => false - } - LiftRules.passNotFoundToChain = true - - // Akka supervisor configuration wiring up initial Actor services - val supervisor = Supervisor( - SupervisorConfig( - RestartStrategy(OneForOne, 3, 100, List(classOf[Exception])), - Supervise( - actorOf[SimpleService], - LifeCycle(Permanent)) :: - Supervise( - actorOf[PersistentSimpleService], - LifeCycle(Permanent)) :: - Nil)) - - // Build SiteMap - // val entries = Menu(Loc("Home", List("index"), "Home")) :: Nil - // LiftRules.setSiteMap(SiteMap(entries:_*)) - } -} -``_ diff --git a/akka-docs/project/index.rst b/akka-docs/project/index.rst new file mode 100644 index 0000000000..b814fd07f1 --- /dev/null +++ b/akka-docs/project/index.rst @@ -0,0 +1,14 @@ +Project Information +=================== + +.. toctree:: + :maxdepth: 2 + + migration-guides + release-notes + scaladoc + other-doc + issue-tracking + licenses + sponsors + links diff --git a/akka-docs/project/issue-tracking.rst b/akka-docs/project/issue-tracking.rst new file mode 100644 index 0000000000..f5d43699f3 --- /dev/null +++ b/akka-docs/project/issue-tracking.rst @@ -0,0 +1,58 @@ +.. _issue_tracking: + +Issue Tracking +============== + +Akka is using ``Assembla`` as issue tracking system. + +Browsing +-------- + +Tickets +^^^^^^^ + +`You can find the Akka tickets here `_ + +`You can find the Akka Modules tickets here `_ + +Roadmaps +^^^^^^^^ + +`The roadmap for each Akka milestone is here `_ + +`The roadmap for each Akka Modules milestone is here `_ + +Creating tickets +---------------- + +In order to create tickets you need to do the following: + +`Register here `_ then log in + +For Akka tickets: +`Link to create new ticket `__ + + +For Akka Modules tickets: +`Link to create new ticket `__ + +Thanks a lot for reporting bugs and suggesting features. + +Failing test +------------ + +Please submit a failing test on the following format: + +.. code-block:: scala + + import org.scalatest.WordSpec + import org.scalatest.matchers.MustMatchers + + class Ticket001Spec extends WordSpec with MustMatchers { + + "An XXX" should { + "do YYY" in { + 1 must be (1) + } + } + } diff --git a/akka-docs/pending/licenses.rst b/akka-docs/project/licenses.rst similarity index 99% rename from akka-docs/pending/licenses.rst rename to akka-docs/project/licenses.rst index 02a3a10cec..b7104d9679 100644 --- a/akka-docs/pending/licenses.rst +++ b/akka-docs/project/licenses.rst @@ -1,3 +1,5 @@ +.. _licenses: + Licenses ======== diff --git a/akka-docs/project/links.rst b/akka-docs/project/links.rst new file mode 100644 index 0000000000..7ebe839d8c --- /dev/null +++ b/akka-docs/project/links.rst @@ -0,0 +1,36 @@ +.. _support: + +`Support `__ +========================================= + +`Typesafe `_ + +`Mailing List `_ +========================================================== + +`Akka User Google Group `_ + +`Akka Developer Google Group `_ + + +`Downloads `_ +======================================== + +``_ + + +`Source Code `_ +============================================== + +Akka uses Git and is hosted at `Github `_. + +* Akka: clone the Akka repository from ``_ +* Akka Modules: clone the Akka Modules repository from ``_ + + +`Maven Repository `_ +================================================ + +``_ + + diff --git a/akka-docs/general/migration-guide-0.7.x-0.8.x.rst b/akka-docs/project/migration-guide-0.7.x-0.8.x.rst similarity index 100% rename from akka-docs/general/migration-guide-0.7.x-0.8.x.rst rename to akka-docs/project/migration-guide-0.7.x-0.8.x.rst diff --git a/akka-docs/general/migration-guide-0.8.x-0.9.x.rst b/akka-docs/project/migration-guide-0.8.x-0.9.x.rst similarity index 100% rename from akka-docs/general/migration-guide-0.8.x-0.9.x.rst rename to akka-docs/project/migration-guide-0.8.x-0.9.x.rst diff --git a/akka-docs/general/migration-guide-0.9.x-0.10.x.rst b/akka-docs/project/migration-guide-0.9.x-0.10.x.rst similarity index 100% rename from akka-docs/general/migration-guide-0.9.x-0.10.x.rst rename to akka-docs/project/migration-guide-0.9.x-0.10.x.rst diff --git a/akka-docs/project/migration-guide-1.0.x-1.1.x.rst b/akka-docs/project/migration-guide-1.0.x-1.1.x.rst new file mode 100644 index 0000000000..44f09ea244 --- /dev/null +++ b/akka-docs/project/migration-guide-1.0.x-1.1.x.rst @@ -0,0 +1,78 @@ + +.. _migration-1.1: + +################################ + Migration Guide 1.0.x to 1.1.x +################################ + +**Akka has now moved to Scala 2.9.x** + + +Akka Actor +========== + +- is now dependency free, with the exception of the dependency on the + ``scala-library.jar`` + +- does not bundle any logging anymore, but you can subscribe to events within + Akka by registering an event handler on akka.event.EventHandler or by specifying + the ``FQN`` of an Actor in the akka.conf under akka.event-handlers; there is an + ``akka-slf4j`` module which still provides the Logging trait and a default + ``SLF4J`` logger adapter. + + Don't forget to add a SLF4J backend though, we recommend: + + .. code-block:: scala + + lazy val logback = "ch.qos.logback" % "logback-classic" % "0.9.28" % "runtime" + +- If you used HawtDispatcher and want to continue using it, you need to include + akka-dispatcher-extras.jar from Akka Modules, in your akka.conf you need to + specify: ``akka.dispatch.HawtDispatcherConfigurator`` instead of + ``HawtDispatcher`` + +- FSM: the onTransition method changed from Function1 to PartialFunction; there + is an implicit conversion for the precise types in place, but it may be + necessary to add an underscore if you are passing an eta-expansion (using a + method as function value). + + +Akka Typed Actor +================ + +- All methods starting with ``get*`` are deprecated and will be removed in post + 1.1 release. + + + +Akka Remote +=========== + +- ``UnparsebleException`` has been renamed to + ``CannotInstantiateRemoteExceptionDueToRemoteProtocolParsingErrorException(exception, + classname, message)`` + + +Akka HTTP +========= + +- akka.servlet.Initializer has been moved to ``akka-kernel`` to be able to have + ``akka-http`` not depend on ``akka-remote``. If you don't want to use the class + for kernel, just create your own version of ``akka.servlet.Initializer``, it's + just a couple of lines of code and there are instructions in + the :ref:`http-module` docs. + +- akka.http.ListWriter has been removed in full, if you use it and want to keep + using it, here's the code: `ListWriter`_. + +- Jersey-server is now a "provided" dependency for ``akka-http``, so you'll need + to add the dependency to your project, it's built against Jersey 1.3 + +.. _ListWriter: https://github.com/jboner/akka/blob/v1.0/akka-http/src/main/scala/akka/http/ListWriter.scala + + +Akka Testkit +============ + +- The TestKit moved into the akka-testkit subproject and correspondingly into the + ``akka.testkit`` package. diff --git a/akka-docs/general/migration-guides.rst b/akka-docs/project/migration-guides.rst similarity index 92% rename from akka-docs/general/migration-guides.rst rename to akka-docs/project/migration-guides.rst index ed9c1de270..bf43d939d5 100644 --- a/akka-docs/general/migration-guides.rst +++ b/akka-docs/project/migration-guides.rst @@ -1,10 +1,12 @@ +.. _migration: + Migration Guides ================ .. toctree:: :maxdepth: 1 - migration-guide-0.7.x-0.8.x - migration-guide-0.8.x-0.9.x - migration-guide-0.9.x-0.10.x migration-guide-1.0.x-1.1.x + migration-guide-0.9.x-0.10.x + migration-guide-0.8.x-0.9.x + migration-guide-0.7.x-0.8.x diff --git a/akka-docs/project/other-doc.rst b/akka-docs/project/other-doc.rst new file mode 100644 index 0000000000..7abfc8c8df --- /dev/null +++ b/akka-docs/project/other-doc.rst @@ -0,0 +1,34 @@ + +.. _other-doc: + +################################## + Documentation for Other Versions +################################## + + +Akka Snapshot +============= + +Automatically published documentation for the latest SNAPSHOT version of Akka can +be found here: + +- Akka - http://akka.io/docs/akka/snapshot/ (or in `PDF format `__) +- Akka Modules - http://akka.io/docs/akka-modules/snapshot/ (or in `PDF format `__) + + + +Release Versions +================ + +1.1 +--- + +- Akka 1.1 - http://akka.io/docs/akka/1.1/ (or in `PDF format `__) +- Akka Modules 1.1 - http://akka.io/docs/akka-modules/1.1/ (or in `PDF format `__) + +1.0 +--- + +- Akka 1.0 - http://akka.io/docs/akka-1.0/Home.html (or in `PDF format `__) + + diff --git a/akka-docs/project/release-notes.rst b/akka-docs/project/release-notes.rst new file mode 100644 index 0000000000..32b088c3a2 --- /dev/null +++ b/akka-docs/project/release-notes.rst @@ -0,0 +1,538 @@ +Release Notes +============== + +Changes listed in no particular order. + +Current Development 1.1-SNAPSHOT +---------------------------------------- + +- **UPD** - improve FSM DSL: make onTransition syntax nicer (Roland Kuhn) + +Release 1.1-M1 +-------------------- + +- **ADD** - #647 Extract an akka-camel-typed module out of akka-camel for optional typed actor support (Martin Krasser) +- **ADD** - #654 Allow consumer actors to acknowledge in-only message exchanges (Martin Krasser) +- **ADD** - #669 Support self.reply in preRestart and postStop after exception in receive (Martin Krasser) +- **ADD** - #682 Support for fault-tolerant Producer actors (Martin Krasser) +- **ADD** - Move TestKit to akka-testkit and add CallingThreadDispatcher (Roland Kuhn) +- **ADD** - Remote Client message buffering transaction log for buffering messages failed to send due to network problems. Flushes the buffer on reconnect. (Jonas Bonér) +- **ADD** - Added trait simulate network problems/errors to be used for remote actor testing (Jonas Bonér) +- **ADD** - Add future and await methods to Agent (Peter Vlugter) +- **ADD** - #586 Allow explicit reconnect for RemoteClient (Viktor Klang) +- **ADD** - #587 Dead letter sink queue for messages sent through RemoteClient that didn't get sent due to connection failure (Viktor Klang) +- **ADD** - #598 actor.id when using akka-spring should be the id of the spring bean (Viktor Klang) +- **ADD** - #652 Reap expired futures from ActiveRemoteClientHandler (Viktor Klang) +- **ADD** - #656 Squeeze more out of EBEDD? (Viktor Klang) +- **ADD** - #715 EventHandler.error should be usable without Throwable (Viktor Klang) +- **ADD** - #717 Add ExecutionHandler to NettyRemoteServer for more performance and scalability (Viktor Klang) +- **ADD** - #497 Optimize remote sends done in local scope (Viktor Klang) +- **ADD** - #633 Add support for Scalaz in akka-modules (Derek Williams) +- **ADD** - #677 Add map, flatMap, foreach, and filter to Future (Derek Williams) +- **ADD** - #661 Optimized Future's internals (Derek Williams) +- **ADD** - #685 Optimize execution of Futures (Derek Williams) +- **ADD** - #711 Make Future.completeWith work with an uncompleted Future (Derek Williams) +- **UPD** - #667 Upgrade to Camel 2.7.0 (Martin Krasser) +- **UPD** - Updated HawtDispatch to 1.1 (Hiram Chirino) +- **UPD** - #688 Update Akka 1.1-SNAPSHOT to Scala 2.9.0-RC1 (Viktor Klang) +- **UPD** - #718 Add HawtDispatcher to akka-modules (Viktor Klang) +- **UPD** - #698 Deprecate client-managed actors (Viktor Klang) +- **UPD** - #730 Update Akka and Akka Modules to SBT 0.7.6-RC0 (Viktor Klang) +- **UPD** - #663 Update to latest scalatest (Derek Williams) +- **FIX** - Misc cleanup, API changes and refactorings (Jonas Bonér) +- **FIX** - #675 preStart() is called twice when creating new instance of TypedActor (Debasish Ghosh) +- **FIX** - #704 Write docs for Java Serialization (Debasish Ghosh) +- **FIX** - #645 Change Futures.awaitAll to not throw FutureTimeoutException but return a List[Option[Any]] (Viktor Klang) +- **FIX** - #681 Clean exit using server-managed remote actor via client (Viktor Klang) +- **FIX** - #720 Connection loss when sending to a dead remote actor (Viktor Klang) +- **FIX** - #593 Move Jetty specific stuff (with deps) from akka-http to akka-kernel (Viktor Klang) +- **FIX** - #638 ActiveRemoteClientHandler - Unexpected exception from downstream in remote client (Viktor Klang) +- **FIX** - #655 Remote actors with non-uuid names doesnt work for req./reply-pattern (Viktor Klang) +- **FIX** - #588 RemoteClient.shutdown does not remove client from Map with clients (Viktor Klang) +- **FIX** - #672 Remoting breaks if mutual DNS lookup isn't possible (Viktor Klang) +- **FIX** - #699 Remote typed actor per-session server won't start if called method has no result (Viktor Klang) +- **FIX** - #702 Handle ReadTimeoutException in akka-remote (Viktor Klang) +- **FIX** - #708 Fall back to Akka classloader if event-handler class cannot be found. (Viktor Klang) +- **FIX** - #716 Split akka-http and clean-up dependencies (Viktor Klang) +- **FIX** - #721 Inability to parse/load the Config should do a System.exit(-1) (Viktor Klang) +- **FIX** - #722 Race condition in Actor hotswapping (Viktor Klang) +- **FIX** - #723 MessageSerializer CNFE regression (Viktor Klang) +- **FIX** - #680 Remote TypedActor behavior differs from local one when sending to generic interfaces (Viktor Klang) +- **FIX** - #659 Calling await on a Future that is expired and uncompleted should throw an exception (Derek Williams) +- **REM** - #626 Update and clean up dependencies (Viktor Klang) +- **REM** - #623 Remove embedded-repo (Akka + Akka Modules) (Viktor Klang) +- **REM** - #686 Remove SBinary (Viktor Klang) + +Release 1.0-RC6 +---------------------------------------- + +- **FIX** - #628 Supervied TypedActors fails to restart (Viktor Klang) +- **FIX** - #629 Stuck upon actor invocation (Viktor Klang) + +Release 1.0-RC5 +---------------------------------------- + +- **FIX** - Source JARs published to 'src' instead of 'source' || Odd Moller || +- **FIX** - #612 Conflict between Spring autostart=true for Consumer actors and (Martin Krasser) +- **FIX** - #613 Change Akka XML schema URI to http://akka.io/schema/akka (Martin Krasser) +- **FIX** - Spring XSD namespace changed from 'akkasource.org' to 'akka.io' (Viktor Klang) +- **FIX** - Checking for remote secure cookie is disabled by default if no akka.conf is loaded (Viktor Klang) +- **FIX** - Changed Casbah to ScalaToolsRepo for akka-sbt-plugin (Viktor Klang) +- **FIX** - ActorRef.forward now doesn't require the sender to be set on the message (Viktor Klang) + +Release 1.0-RC3 +---------------------------------------- + +- **ADD** - #568 Add autostart attribute to Spring actor configuration (Viktor Klang) +- **ADD** - #586 Allow explicit reconnect for remote clients (Viktor Klang) +- **ADD** - #587 Add possibility for dead letter queues for failed remote sends (Viktor Klang) +- **ADD** - #497 Optimize remote send in local scope (Viktor Klang) +- **ADD** - Improved Java Actor API: akka.actor.Actors (Viktor Klang) +- **ADD** - Improved Scala Actor API: akka.actor.Actor (Viktor Klang) +- **ADD** - #148 Create a testing framework for testing Actors (Roland Kuhn) +- **ADD** - Support Replica Set/Replica Pair connection modes with MongoDB Persistence || Brendan McAdams || +- **ADD** - User configurable Write Concern settings for MongoDB Persistence || Brendan McAdams || +- **ADD** - Support for configuring MongoDB Persistence with MongoDB's URI Connection String || Brendan McAdams || +- **ADD** - Support for Authentication with MongoDB Persistence || Brendan McAdams || +- **FIX** - Misc bug fixes || Team || +- **FIX** - #603 Race condition in Remote send (Viktor Klang) +- **FIX** - #594 Log statement in RemoteClientHandler was wrongly formatted (Viktor Klang) +- **FIX** - #580 Message uuids must be generated (Viktor Klang) +- **FIX** - #583 Serialization classloader has a visibility issue (Viktor Klang) +- **FIX** - #598 By default the bean ID should become the actor id for Spring actor configuration (Viktor Klang) +- **FIX** - #577 RemoteClientHandler swallows certain exceptions (Viktor Klang) +- **FIX** - #581 Fix edgecase where an exception could not be deserialized (Viktor Klang) +- **FIX** - MongoDB write success wasn't being properly checked; fixed (integrated w/ new write concern features) || Brendan McAdams || +- **UPD** - Improvements to FSM module akka.actor.FSM || Manie & Kuhn || +- **UPD** - Changed Akka URI to http://akka.io. Reflects both XSDs, Maven repositories etc. (Jonas Bonér) +- **REM** - #574 Remote RemoteClient, RemoteServer and RemoteNode (Viktor Klang) +- **REM** - object UntypedActor, object ActorRegistry, class RemoteActor, class RemoteUntypedActor, class RemoteUntypedConsumerActor (Viktor Klang) + +Release 1.0-RC1 +---------------------------------------- + +- **ADD** - #477 Added support for Remote Agents (Viktor Klang) +- **ADD** - #460 Hotswap for Java API (UntypedActor) (Viktor Klang) +- **ADD** - #471 Added support for TypedActors to return Java Option (Viktor Klang) +- **ADD** - New design and API for more fluent and intuitive FSM module (Roland Kuhn) +- **ADD** - Added secure cookie based remote node authentication (Jonas Bonér) +- **ADD** - Untrusted safe mode for remote server (Jonas Bonér) +- **ADD** - Refactored config file format - added list of enabled modules etc. (Jonas Bonér) +- **ADD** - Docs for Dataflow Concurrency (Jonas Bonér) +- **ADD** - Made remote message frame size configurable (Jonas Bonér) +- **ADD** - #496 Detect when Remote Client disconnects (Jonas Bonér) +- **ADD** - #472 Improve API to wait for endpoint activation/deactivation (`more `__ ...) (Martin Krasser) +- **ADD** - #473 Allow consumer actors to customize their own routes (`more `__ ...) (Martin Krasser) +- **ADD** - #504 Add session bound server managed remote actors || Paul Pach || +- **ADD** - DSL for FSM (Irmo Manie) +- **ADD** - Shared unit test for all dispatchers to enforce Actor Model (Viktor Klang) +- **ADD** - #522 Make stacking optional for become and HotSwap (Viktor Klang) +- **ADD** - #524 Make frame size configurable for client&server (Bonér & Klang) +- **ADD** - #526 Add onComplete callback to Future (Viktor Klang) +- **ADD** - #536 Document Channel-abstraction for later replies (Viktor Klang) +- **ADD** - #540 Include self-reference as parameter to HotSwap (Viktor Klang) +- **ADD** - #546 Include Garrick Evans' Akka-mist into master (Viktor Klang) +- **ADD** - #438 Support remove operation in PersistentVector (Scott Clasen) +- **ADD** - #229 Memcached protocol support for Persistence module (Scott Clasen) +- **ADD** - Amazon SimpleDb support for Persistence module (Scott Clasen) +- **FIX** - #518 refactor common storage bakend to use bulk puts/gets where possible (Scott Clasen) +- **FIX** - #532 Prevent persistent datatypes with same uuid from corrupting a TX (Scott Clasen) +- **FIX** - #464 ThreadPoolBuilder should be rewritten to be an immutable builder (Viktor Klang) +- **FIX** - #449 Futures.awaitOne now uses onComplete listeners (Viktor Klang) +- **FIX** - #486 Fixed memory leak caused by Configgy that prevented full unload (Viktor Klang) +- **FIX** - #488 Fixed race condition in EBEDD restart (Viktor Klang) +- **FIX** - #492 Fixed race condition in Scheduler (Viktor Klang) +- **FIX** - #493 Switched to non-https repository for JBoss artifacts (Viktor Klang) +- **FIX** - #481 Exception when creating an actor now behaves properly when supervised (Viktor Klang) +- **FIX** - #498 Fixed no-op in supervision DSL (Viktor Klang) +- **FIX** - #491 ``reply`` and ``reply_?`` now sets a sender reference (Viktor Klang) +- **FIX** - #519 NotSerializableError when using Remote Typed Actors (Viktor Klang) +- **FIX** - #523 Message.toString is called all the time for incomign messages, expensive (Viktor Klang) +- **FIX** - #537 Make sure top folder is included in sources jar (Viktor Klang) +- **FIX** - #529 Remove Scala version number from Akka artifact ids (Viktor Klang) +- **FIX** - #533 Can't set LifeCycle from the Java API (Viktor Klang) +- **FIX** - #542 Make Future-returning Remote Typed Actor methods use onComplete (Viktor Klang) +- **FIX** - #479 Do not register listeners when CamelService is turned off by configuration (Martin Krasser) +- **FIX** - Fixed bug with finding TypedActor by type in ActorRegistry (Jonas Bonér) +- **FIX** - #515 race condition in FSM StateTimeout Handling (Irmo Manie) +- **UPD** - Akka package from "se.scalablesolutions.akka" to "akka" (Viktor Klang) +- **UPD** - Update Netty to 3.2.3.Final (Viktor Klang) +- **UPD** - #458 Camel to 2.5.0 (Martin Krasser) +- **UPD** - #458 Spring to 3.0.4.RELEASE (Martin Krasser) +- **UPD** - #458 Jetty to 7.1.6.v20100715 (Martin Krasser) +- **UPD** - Update to Scala 2.8.1 (Jonas Bonér) +- **UPD** - Changed remote server default port to 2552 (AKKA) (Jonas Bonér) +- **UPD** - Cleaned up and made remote protocol more effifient (Jonas Bonér) +- **UPD** - #528 RedisPersistentRef should not throw in case of missing key (Debasish Ghosh) +- **UPD** - #531 Fix RedisStorage add() method in Java API (Debasish Ghosh) +- **UPD** - #513 Implement snapshot based persistence control in SortedSet (Debasish Ghosh) +- **UPD** - #547 Update FSM docs (Irmo Manie) +- **UPD** - #548 Update AMQP docs (Irmo Manie) +- **REM** - Atmosphere integration, replace with Mist (Klang @ Evans) +- **REM** - JGroups integration, doesn't play with cloud services :/ (Viktor Klang) + +Release 1.0-MILESTONE1 +---------------------------------------- + +- **ADD** - Splitted akka-core up in akka-actor, akka-typed-actor & akka-remote (Jonas Bonér) +- **ADD** - Added meta-data to network protocol (Jonas Bonér) +- **ADD** - HotSwap and actor.become now uses a stack of PartialFunctions with API for pushing and popping the stack (Jonas Bonér) +- **ADD** - #440 Create typed actors with constructor args (Michael Kober) +- **ADD** - #322 Abstraction for unification of sender and senderFuture for later reply (Michael Kober) +- **ADD** - #364 Serialization for TypedActor proxy reference (Michael Kober) +- **ADD** - #423 Support configuration of Akka via Spring (Michael Kober) +- **FIX** - #426 UUID wrong for remote proxy for server managed actor (Michael Kober) +- **ADD** - #378 Support for server initiated remote TypedActor and UntypedActor in Spring config (Michael Kober) +- **ADD** - #194 Support for server-managed typed actor ||< Michael Kober || +- **ADD** - #447 Allow Camel service to be turned off by configuration (Martin Krasser) +- **ADD** - #457 JavaAPI improvements for akka-camel (please read the `migration guide `_) (Martin Krasser) +- **ADD** - #465 Dynamic message routing to actors (`more `__ ...) (Martin Krasser) +- **FIX** - #410 Use log configuration from config directory (Martin Krasser) +- **FIX** - #343 Some problems with persistent structures (Debasish Ghosh) +- **FIX** - #430 Refactor / re-implement MongoDB adapter so that it conforms to the guidelines followed in Redis and Cassandra modules (Debasish Ghosh) +- **FIX** - #436 ScalaJSON serialization does not map Int data types properly when used within a Map (Debasish Ghosh) +- **ADD** - #230 Update redisclient to be Redis 2.0 compliant (Debasish Ghosh) +- **FIX** - #435 Mailbox serialization does not retain messages (Debasish Ghosh) +- **ADD** - #445 Integrate type class based serialization of sjson into Akka (Debasish Ghosh) +- **FIX** - #480: Regression multibulk replies redis client (Debasish Ghosh) +- **FIX** - #415 Publish now generate source and doc jars (Viktor Klang) +- **FIX** - #420 REST endpoints should be able to be processed in parallel (Viktor Klang) +- **FIX** - #422 Dispatcher config should work for ThreadPoolBuilder-based dispatchers (Viktor Klang) +- **FIX** - #401 ActorRegistry should not leak memory (Viktor Klang) +- **FIX** - #250 Performance optimization for ExecutorBasedEventDrivenDispatcher (Viktor Klang) +- **FIX** - #419 Rename init and shutdown callbacks to preStart and postStop, and remove initTransactionalState (Viktor Klang) +- **FIX** - #346 Make max no of restarts (and within) are now both optional (Viktor Klang) +- **FIX** - #424 Actors self.supervisor not set by the time init() is called when started by startLink() (Viktor Klang) +- **FIX** - #427 spawnLink and startLink now has the same dispatcher semantics (Viktor Klang) +- **FIX** - #413 Actor shouldn't process more messages when waiting to be restarted (HawtDispatcher still does) (Viktor Klang) +- **FIX** - !! and !!! now do now not block the actor when used in remote actor (Viktor Klang) +- **FIX** - RemoteClient now reconnects properly (Viktor Klang) +- **FIX** - Logger.warn now properly works with varargs (Viktor Klang) +- **FIX** - #450 Removed ActorRef lifeCycle boilerplate: Some(LifeCycle(Permanent)) => Permanent (Viktor Klang) +- **FIX** - Moved ActorRef.trapExit into ActorRef.faultHandler and removed Option-boilerplate from faultHandler (Viktor Klang) +- **FIX** - ThreadBasedDispatcher cheaper for idling actors, also benefits from all that is ExecutorBasedEventDrivenDispatcher (Viktor Klang) +- **FIX** - Fixing Futures.future, uses Actor.spawn under the hood, specify dispatcher to control where block is executed (Viktor Klang) +- **FIX** - #469 Akka "dist" now uses a root folder to avoid loitering if unzipped in a folder (Viktor Klang) +- **FIX** - Removed ScalaConfig, JavaConfig and rewrote Supervision configuration (Viktor Klang) +- **UPD** - Jersey to 1.3 (Viktor Klang) +- **UPD** - Atmosphere to 0.6.2 (Viktor Klang) +- **UPD** - Netty to 3.2.2.Final (Viktor Klang) +- **ADD** - Changed config file priority loading and added config modes. (Viktor Klang) +- **ADD** - #411 Bumped Jetty to v 7 and migrated to it's eclipse packages (Viktor Klang) +- **ADD** - #414 Migrate from Grizzly to Jetty for Akka Microkernel (Viktor Klang) +- **ADD** - #261 Add Java API for 'routing' module (Viktor Klang) +- **ADD** - #262 Add Java API for Agent (Viktor Klang) +- **ADD** - #264 Add Java API for Dataflow (Viktor Klang) +- **ADD** - Using JerseySimpleBroadcaster instead of JerseyBroadcaster in AkkaBroadcaster (Viktor Klang) +- **ADD** - #433 Throughput deadline added for ExecutorBasedEventDrivenDispatcher (Viktor Klang) +- **ADD** - Add possibility to set default cometSupport in akka.conf (Viktor Klang) +- **ADD** - #451 Added possibility to use akka-http as a standalone REST server (Viktor Klang) +- **ADD** - #446 Added support for Erlang-style receiveTimeout (Viktor Klang) +- **ADD** - #462 Added support for suspend/resume of processing individual actors mailbox, should give clearer restart semantics (Viktor Klang) +- **ADD** - #466 Actor.spawn now takes an implicit dispatcher to specify who should run the block (Viktor Klang) +- **ADD** - #456 Added map to Future and Futures.awaitMap (Viktor Klang) +- **REM** - #418 Remove Lift sample module and docs (Viktor Klang) +- **REM** - Removed all Reactor-based dispatchers (Viktor Klang) +- **REM** - Removed anonymous actor factories (Viktor Klang) +- **ADD** - Voldemort support for akka-persistence (Scott Clasen) +- **ADD** - HBase support for akka-persistence (David Greco) +- **ADD** - CouchDB support for akka-persistence (Yung-Luen Lan & Kahlen) +- **ADD** - #265 Java API for AMQP module (Irmo Manie) + +Release 0.10 - Aug 21 2010 +---------------------------------------- + +- **ADD** - Added new Actor type: UntypedActor for Java API (Jonas Bonér) +- **ADD** - #26 Deep serialization of Actor including its mailbox (Jonas Bonér) +- **ADD** - Rewritten network protocol. More efficient and cleaner. (Jonas Bonér) +- **ADD** - Rewritten Java Active Object tests into Scala to be able to run the in SBT. (Jonas Bonér) +- **ADD** - Added isDefinedAt method to Actor for checking if it can receive a certain message (Jonas Bonér) +- **ADD** - Added caching of Active Object generated class bytes, huge perf improvement (Jonas Bonér) +- **ADD** - Added RemoteClient Listener API (Jonas Bonér) +- **ADD** - Added methods to retrieve children from a Supervisor (Jonas Bonér) +- **ADD** - Rewritten Supervisor to become more clear and "correct" (Jonas Bonér) +- **ADD** - Added options to configure a blocking mailbox with custom capacity (Jonas Bonér) +- **ADD** - Added RemoteClient reconnection time window configuration option (Jonas Bonér) +- **ADD** - Added ActiveObjectContext with sender reference etc (Jonas Bonér) +- **ADD** - #293 Changed config format to JSON-style (Jonas Bonér) +- **ADD** - #302: Incorporate new ReceiveTimeout in Actor serialization (Jonas Bonér) +- **ADD** - Added Java API docs and made it comparable with Scala API docs. 1-1 mirroring (Jonas Bonér) +- **ADD** - Renamed Active Object to Typed Actor (Jonas Bonér) +- **ADD** - Enhanced Typed Actor: remoting, "real" restart upon failure etc. (Jonas Bonér) +- **ADD** - Typed Actor now inherits Actor and is a full citizen in the Actor world. (Jonas Bonér) +- **ADD** - Added support for remotely shutting down a remote actor (Jonas Bonér) +- **ADD** - #224 Add support for Camel in typed actors (`more `__ ...) (Martin Krasser) +- **ADD** - #282 Producer trait should implement Actor.receive (`more `__...) (Martin Krasser) +- **ADD** - #271 Support for bean scope prototype in akka-spring (Johan Rask) +- **ADD** - Support for DI of values and bean references on target instance in akka-spring (Johan Rask) +- **ADD** - #287 Method annotated with @postrestart in ActiveObject is not called during restart (Johan Rask) +- **ADD** - Support for ApplicationContextAware in akka-spring (Johan Rask) +- **ADD** - #199 Support shutdown hook in TypedActor (Martin Krasser) +- **ADD** - #266 Access to typed actors from user-defined Camel routes (`more `__ ...) (Martin Krasser) +- **ADD** - #268 Revise akka-camel documentation (`more `__ ...) (Martin Krasser) +- **ADD** - #289 Support for Spring configuration element (`more `__ ...) (Martin Krasser) +- **ADD** - #296 TypedActor lifecycle management (Martin Krasser) +- **ADD** - #297 Shutdown routes to typed actors (`more `__ ...) (Martin Krasser) +- **ADD** - #314 akka-spring to support typed actor lifecycle management (`more `__ ...) (Martin Krasser) +- **ADD** - #315 akka-spring to support configuration of shutdown callback method (`more `__ ...) (Martin Krasser) +- **ADD** - Fault-tolerant consumer actors and typed consumer actors (`more `__ ...) (Martin Krasser) +- **ADD** - #320 Leverage Camel's non-blocking routing engine (`more `__ ...) (Martin Krasser) +- **ADD** - #335 Producer trait should allow forwarding of results (Martin Krasser) +- **ADD** - #339 Redesign of Producer trait (pre/post processing hooks, async in-out) (`more `__ ...) (Martin Krasser) +- **ADD** - Non-blocking, asynchronous routing example for akka-camel (`more `__ ...) (Martin Krasser) +- **ADD** - #333 Allow applications to wait for endpoints being activated (`more `__ ...) (Martin Krasser) +- **ADD** - #356 Support @consume annotations on typed actor implementation class (Martin Krasser) +- **ADD** - #357 Support untyped Java actors as endpoint consumer (Martin Krasser) +- **ADD** - #366 CamelService should be a singleton (Martin Krasser) +- **ADD** - #392 Support untyped Java actors as endpoint producer (Martin Krasser) +- **ADD** - #393 Redesign CamelService singleton to be a CamelServiceManager (`more `__ ...) (Martin Krasser) +- **ADD** - #295 Refactoring Actor serialization to type classes (Debasish Ghosh) +- **ADD** - #317 Change documentation for Actor Serialization (Debasish Ghosh) +- **ADD** - #388 Typeclass serialization of ActorRef/UntypedActor isn't Java friendly (Debasish Ghosh) +- **ADD** - #292 Add scheduleOnce to Scheduler (Irmo Manie) +- **ADD** - #308 Initial receive timeout on actor (Irmo Manie) +- **ADD** - Redesign of AMQP module (`more `__ ...) (Irmo Manie) +- **ADD** - Added "become(behavior: Option[Receive])" to Actor (Viktor Klang) +- **ADD** - Added "find[T](f: PartialFunction[ActorRef,T]) : Option[T]" to ActorRegistry (Viktor Klang) +- **ADD** - #369 Possibility to configure dispatchers in akka.conf (Viktor Klang) +- **ADD** - #395 Create ability to add listeners to RemoteServer (Viktor Klang) +- **ADD** - #225 Add possibility to use Scheduler from TypedActor (Viktor Klang) +- **ADD** - #61 Integrate new persistent datastructures in Scala 2.8 (Peter Vlugter) +- **ADD** - Expose more of what Multiverse can do (Peter Vlugter) +- **ADD** - #205 STM transaction settings (Peter Vlugter) +- **ADD** - #206 STM transaction deferred and compensating (Peter Vlugter) +- **ADD** - #232 Expose blocking transactions (Peter Vlugter) +- **ADD** - #249 Expose Multiverse Refs for primitives (Peter Vlugter) +- **ADD** - #390 Expose transaction propagation level in multiverse (Peter Vlugter) +- **ADD** - Package objects for importing local/global STM (Peter Vlugter) +- **ADD** - Java API for the STM (Peter Vlugter) +- **ADD** - #379 Create STM Atomic templates for Java API (Peter Vlugter) +- **ADD** - #270 SBT plugin for Akka (Peter Vlugter) +- **ADD** - #198 support for ThreadBasedDispatcher in Spring config (Michael Kober) +- **ADD** - #377 support HawtDispatcher in Spring config (Michael Kober) +- **ADD** - #376 support Spring config for untyped actors (Michael Kober) +- **ADD** - #200 support WorkStealingDispatcher in Spring config (Michael Kober) +- **UPD** - #336 RabbitMQ 1.8.1 (Irmo Manie) +- **UPD** - #288 Netty to 3.2.1.Final (Viktor Klang) +- **UPD** - Atmosphere to 0.6.1 (Viktor Klang) +- **UPD** - Lift to 2.8.0-2.1-M1 (Viktor Klang) +- **UPD** - Camel to 2.4.0 (Martin Krasser) +- **UPD** - Spring to 3.0.3.RELEASE (Martin Krasser) +- **UPD** - Multiverse to 0.6 (Peter Vlugter) +- **FIX** - Fixed bug with stm not being enabled by default when no AKKA_HOME is set (Jonas Bonér) +- **FIX** - Fixed bug in network manifest serialization (Jonas Bonér) +- **FIX** - Fixed bug Remote Actors (Jonas Bonér) +- **FIX** - Fixed memory leak in Active Objects (Jonas Bonér) +- **FIX** - Fixed indeterministic deadlock in Transactor restart (Jonas Bonér) +- **FIX** - #325 Fixed bug in STM with dead hanging CountDownCommitBarrier (Jonas Bonér) +- **FIX** - #316: NoSuchElementException during ActiveObject restart (Jonas Bonér) +- **FIX** - #256: Tests for ActiveObjectContext (Jonas Bonér) +- **FIX** - Fixed bug in restart of Actors with 'Temporary' life-cycle (Jonas Bonér) +- **FIX** - #280 Tests fail if there is no akka.conf set (Jonas Bonér) +- **FIX** - #286 unwanted transitive dependencies from Geronimo project (Viktor Klang) +- **FIX** - Atmosphere comet comment to use stream instead of writer (Viktor Klang) +- **FIX** - #285 akka.conf is now used as defaults for Akka REST servlet init parameters (Viktor Klang) +- **FIX** - #321 fixed performance regression in ActorRegistry (Viktor Klang) +- **FIX** - #286 geronimo servlet 2.4 dep is no longer transitively loaded (Viktor Klang) +- **FIX** - #334 partial lift sample rewrite to fix breakage (Viktor Klang) +- **FIX** - Fixed a memory leak in ActorRegistry (Viktor Klang) +- **FIX** - Fixed a race-condition in Cluster (Viktor Klang) +- **FIX** - #355 Switched to Array instead of List on ActorRegistry return types (Viktor Klang) +- **FIX** - #352 ActorRegistry.actorsFor(class) now checks isAssignableFrom (Viktor Klang) +- **FIX** - Fixed a race condition in ActorRegistry.register (Viktor Klang) +- **FIX** - #337 Switched from Configgy logging to SLF4J, better for OSGi (Viktor Klang) +- **FIX** - #372 Scheduler now returns Futures to cancel tasks (Viktor Klang) +- **FIX** - #306 JSON serialization between remote actors is not transparent (Debasish Ghosh) +- **FIX** - #204 Reduce object creation in STM (Peter Vlugter) +- **FIX** - #253 Extend Multiverse BasicRef rather than wrap ProgrammaticRef (Peter Vlugter) +- **REM** - Removed pure POJO-style Typed Actor (old Active Object) (Jonas Bonér) +- **REM** - Removed Lift as a dependency for Akka-http (Viktor Klang) +- **REM** - #294 Remove ``reply`` and ``reply_?`` from Actor (Viktor Klang) +- **REM** - Removed one field in Actor, should be a minor memory reduction for high actor quantities (Viktor Klang) +- **FIX** - #301 DI does not work in akka-spring when specifying an interface (Johan Rask) +- **FIX** - #328 trapExit should pass through self with Exit to supervisor (Irmo Manie) +- **FIX** - Fixed warning when deregistering listeners (Martin Krasser) +- **FIX** - Added camel-jetty-2.4.0.1 to Akka's embedded-repo. (fixes a concurrency bug in camel-jetty-2.4.0, to be officially released in Camel 2.5.0) (Martin Krasser) +- **FIX** - #338 RedisStorageBackend fails when redis closes connection to idle client (Debasish Ghosh) +- **FIX** - #340 RedisStorage Map.get does not throw exception when disconnected from redis but returns None (Debasish Ghosh) + +Release 0.9 - June 2th 2010 +---------------------------------------- + +- **ADD** - Serializable, immutable, network-aware ActorRefs (Jonas Bonér) +- **ADD** - Optionally JTA-aware STM transactions (Jonas Bonér) +- **ADD** - Rewritten supervisor management, making use of ActorRef, now really kills the Actor instance and replaces it (Jonas Bonér) +- **ADD** - Allow linking and unlinking a declaratively configured Supervisor (Jonas Bonér) +- **ADD** - Remote protocol rewritten to allow passing along sender reference in all situations (Jonas Bonér) +- **ADD** - #37 API for JTA usage (Jonas Bonér) +- **ADD** - Added user accessible 'sender' and 'senderFuture' references (Jonas Bonér) +- **ADD** - Sender actor is now passed along for all message send functions (!, !!, !!!, forward) (Jonas Bonér) +- **ADD** - Subscription API for listening to RemoteClient failures (Jonas Bonér) +- **ADD** - Implemented link/unlink for ActiveObjects || Jan Kronquist / Michael Kober || +- **ADD** - Added alter method to TransactionalRef + added appl(initValue) to Transactional Map/Vector/Ref (Peter Vlugter) +- **ADD** - Load dependency JARs in JAR deloyed in kernel's ,/deploy dir (Jonas Bonér) +- **ADD** - Allowing using Akka without specifying AKKA_HOME or path to akka.conf config file (Jonas Bonér) +- **ADD** - Redisclient now supports PubSub (Debasish Ghosh) +- **ADD** - Added a sample project under akka-samples for Redis PubSub using Akka actors (Debasish Ghosh) +- **ADD** - Richer API for Actor.reply (Viktor Klang) +- **ADD** - Added Listeners to Akka patterns (Viktor Klang) +- **ADD** - #183 Deactivate endpoints of stopped consumer actors (Martin Krasser) +- **ADD** - Camel `Message API improvements `_ (Martin Krasser) +- **ADD** - #83 Send notification to parent supervisor if all actors supervised by supervisor has been permanently killed (Jonas Bonér) +- **ADD** - #121 Make it possible to dynamically create supervisor hierarchies for Active Objects (Michael Kober) +- **ADD** - #131 Subscription API for node joining & leaving cluster (Jonas Bonér) +- **ADD** - #145 Register listener for errors in RemoteClient/RemoteServer (Jonas Bonér) +- **ADD** - #146 Create an additional distribution with sources (Jonas Bonér) +- **ADD** - #149 Support loading JARs from META-INF/lib in JARs put into the ./deploy directory (Jonas Bonér) +- **ADD** - #166 Implement insertVectorStorageEntriesFor in CassandraStorageBackend (Jonas Bonér) +- **ADD** - #168 Separate ID from Value in Actor; introduce ActorRef (Jonas Bonér) +- **ADD** - #174 Create sample module for remote actors (Jonas Bonér) +- **ADD** - #175 Add new sample module with Peter Vlugter's Ant demo (Jonas Bonér) +- **ADD** - #177 Rewrite remote protocol to make use of new ActorRef (Jonas Bonér) +- **ADD** - #180 Make use of ActorRef indirection for fault-tolerance management (Jonas Bonér) +- **ADD** - #184 Upgrade to Netty 3.2.0.CR1 (Jonas Bonér) +- **ADD** - #185 Rewrite Agent and Supervisor to work with new ActorRef (Jonas Bonér) +- **ADD** - #188 Change the order of how the akka.conf is detected (Jonas Bonér) +- **ADD** - #189 Reintroduce 'sender: Option[Actor]' ref in Actor (Jonas Bonér) +- **ADD** - #203 Upgrade to Scala 2.8 RC2 (Jonas Bonér) +- **ADD** - #222 Using Akka without AKKA_HOME or akka.conf (Jonas Bonér) +- **ADD** - #234 Add support for injection and management of ActiveObjectContext with RTTI such as 'sender' and 'senderFuture' references etc. (Jonas Bonér) +- **ADD** - #236 Upgrade SBinary to Scala 2.8 RC2 (Jonas Bonér) +- **ADD** - #235 Problem with RedisStorage.getVector(..) data structure storage management (Jonas Bonér) +- **ADD** - #239 Upgrade to Camel 2.3.0 (Martin Krasser) +- **ADD** - #242 Upgraded to Scala 2.8 RC3 (Jonas Bonér) +- **ADD** - #243 Upgraded to Protobuf 2.3.0 (Jonas Bonér) +- **ADD** - Added option to specify class loader when de-serializing messages and RemoteActorRef in RemoteClient (Jonas Bonér) +- **ADD** - #238 Upgrading to Cassandra 0.6.1 (Jonas Bonér) +- **ADD** - Upgraded to Jersey 1.2 (Viktor Klang) +- **ADD** - Upgraded Atmosphere to 0.6-SNAPSHOT, adding WebSocket support (Viktor Klang) +- **FIX** - Simplified ActiveObject configuration (Michael Kober) +- **FIX** - #237 Upgrade Mongo Java driver to 1.4 (the latest stable release) (Debasish Ghosh) +- **FIX** - #165 Implemented updateVectorStorageEntryFor in Mongo persistence module (Debasish Ghosh) +- **FIX** - #154: Allow ActiveObjects to use the default timeout in config file (Michael Kober) +- **FIX** - Active Object methods with @inittransactionalstate should be invoked automatically (Michael Kober) +- **FIX** - Nested supervisor hierarchy failure propagation bug fixed (Jonas Bonér) +- **FIX** - Fixed bug on CommitBarrier transaction registration (Jonas Bonér) +- **FIX** - Merged many modules to reduce total number of modules (Viktor Klang) +- **FIX** - Future parameterized (Viktor Klang) +- **FIX** - #191: Workstealing dispatcher didn't work with !! (Viktor Klang) +- **FIX** - #202: Allow applications to disable stream-caching (Martin Krasser) +- **FIX** - #119 Problem with Cassandra-backed Vector (Jonas Bonér) +- **FIX** - #147 Problem replying to remote sender when message sent with ! (Jonas Bonér) +- **FIX** - #171 initial value of Ref can become null if first transaction rolled back (Jonas Bonér) +- **FIX** - #172 Fix "broken" Protobuf serialization API (Jonas Bonér) +- **FIX** - #173 Problem with Vector::slice in CassandraStorage (Jonas Bonér) +- **FIX** - #190 RemoteClient shutdown ends up in endless loop (Jonas Bonér) +- **FIX** - #211 Problem with getting CommitBarrierOpenException when using Transaction.Global (Jonas Bonér) +- **FIX** - #240 Supervised actors not started when starting supervisor (Jonas Bonér) +- **FIX** - Fixed problem with Transaction.Local not committing to persistent storage (Jonas Bonér) +- **FIX** - #215: Re-engineered the JAX-RS support (Viktor Klang) +- **FIX** - Many many bug fixes || Team || +- **REM** - Shoal cluster module (Viktor Klang) + +Release 0.8.1 - April 6th 2010 +---------------------------------------- + +- **ADD** - Redis cluster support (Debasish Ghosh) +- **ADD** - Reply to remote sender from message set with ! (Jonas Bonér) +- **ADD** - Load-balancer which prefers actors with few messages in mailbox || Jan Van Besien || +- **ADD** - Added developer mailing list: [akka-dev AT googlegroups DOT com] (Jonas Bonér) +- **FIX** - Separated thread-local from thread-global transaction API (Jonas Bonér) +- **FIX** - Fixed bug in using STM outside Actors (Jonas Bonér) +- **FIX** - Fixed bug in anonymous actors (Jonas Bonér) +- **FIX** - Moved web initializer to new akka-servlet module (Viktor Klang) + +Release 0.8 - March 31st 2010 +---------------------------------------- + +- **ADD** - Scala 2.8 based (Viktor Klang) +- **ADD** - Monadic API for Agents (Jonas Bonér) +- **ADD** - Agents are transactional (Jonas Bonér) +- **ADD** - Work-stealing dispatcher || Jan Van Besien || +- **ADD** - Improved Spring integration (Michael Kober) +- **FIX** - Various bugfixes || Team || +- **FIX** - Improved distribution packaging (Jonas Bonér) +- **REMOVE** - Actor.send function (Jonas Bonér) + +Release 0.7 - March 21st 2010 +---------------------------------------- + +- **ADD** - Rewritten STM now works generically with fire-forget message flows (Jonas Bonér) +- **ADD** - Apache Camel integration (Martin Krasser) +- **ADD** - Spring integration (Michael Kober) +- **ADD** - Server-managed Remote Actors (Jonas Bonér) +- **ADD** - Clojure-style Agents (Viktor Klang) +- **ADD** - Shoal cluster backend (Viktor Klang) +- **ADD** - Redis-based transactional queue storage backend (Debasish Ghosh) +- **ADD** - Redis-based transactional sorted set storage backend (Debasish Ghosh) +- **ADD** - Redis-based atomic INC (index) operation (Debasish Ghosh) +- **ADD** - Distributed Comet (Viktor Klang) +- **ADD** - Project moved to SBT (simple-build-tool) || Peter Hausel || +- **ADD** - Futures object with utility methods for Future's (Jonas Bonér) +- **ADD** - !!! function that returns a Future (Jonas Bonér) +- **ADD** - Richer ActorRegistry API (Jonas Bonér) +- **FIX** - Improved event-based dispatcher performance with 40% || Jan Van Besien || +- **FIX** - Improved remote client pipeline performance (Viktor Klang) +- **FIX** - Support several Clusters on the same network (Viktor Klang) +- **FIX** - Structural package refactoring (Jonas Bonér) +- **FIX** - Various bugs fixed || Team || + +Release 0.6 - January 5th 2010 +---------------------------------------- + +- **ADD** - Clustered Comet using Akka remote actors and clustered membership API (Viktor Klang) +- **ADD** - Cluster membership API and implementation based on JGroups (Viktor Klang) +- **ADD** - Security module for HTTP-based authentication and authorization (Viktor Klang) +- **ADD** - Support for using Scala XML tags in RESTful Actors (scala-jersey) (Viktor Klang) +- **ADD** - Support for Comet Actors using Atmosphere (Viktor Klang) +- **ADD** - MongoDB as Akka storage backend (Debasish Ghosh) +- **ADD** - Redis as Akka storage backend (Debasish Ghosh) +- **ADD** - Transparent JSON serialization of Scala objects based on SJSON (Debasish Ghosh) +- **ADD** - Kerberos/SPNEGO support for Security module || Eckhart Hertzler || +- **ADD** - Implicit sender for remote actors: Remote actors are able to use reply to answer a request || Mikael Högqvist || +- **ADD** - Support for using the Lift Web framework with Actors || Tim Perrett || +- **ADD** - Added CassandraSession API (with socket pooling) wrapping Cassandra's Thrift API in Scala and Java APIs (Jonas Bonér) +- **ADD** - Rewritten STM, now integrated with Multiverse STM (Jonas Bonér) +- **ADD** - Added STM API for atomic {..} and run {..} orElse {..} (Jonas Bonér) +- **ADD** - Added STM retry (Jonas Bonér) +- **ADD** - AMQP integration; abstracted as actors in a supervisor hierarchy. Impl AMQP 0.9.1 (Jonas Bonér) +- **ADD** - Complete rewrite of the persistence transaction management, now based on Unit of Work and Multiverse STM (Jonas Bonér) +- **ADD** - Monadic API to TransactionalRef (use it in for-comprehension) (Jonas Bonér) +- **ADD** - Lightweight actor syntax using one of the Actor.actor(..) methods. F.e: 'val a = actor { case _ => .. }' (Jonas Bonér) +- **ADD** - Rewritten event-based dispatcher which improved perfomance by 10x, now substantially faster than event-driven Scala Actors (Jonas Bonér) +- **ADD** - New Scala JSON parser based on sjson (Jonas Bonér) +- **ADD** - Added zlib compression to remote actors (Jonas Bonér) +- **ADD** - Added implicit sender reference for fire-forget ('!') message sends (Jonas Bonér) +- **ADD** - Monadic API to TransactionalRef (use it in for-comprehension) (Jonas Bonér) +- **ADD** - Smoother web app integration; just add akka.conf to the classpath (WEB-INF/classes), no need for AKKA_HOME or -Dakka.conf=.. (Jonas Bonér) +- **ADD** - Modularization of distribution into a thin core (actors, remoting and STM) and the rest in submodules (Jonas Bonér) +- **ADD** - Added 'forward' to Actor, forwards message but keeps original sender address (Jonas Bonér) +- **ADD** - JSON serialization for Java objects (using Jackson) (Jonas Bonér) +- **ADD** - JSON serialization for Scala objects (using SJSON) (Jonas Bonér) +- **ADD** - Added implementation for remote actor reconnect upon failure (Jonas Bonér) +- **ADD** - Protobuf serialization for Java and Scala objects (Jonas Bonér) +- **ADD** - SBinary serialization for Scala objects (Jonas Bonér) +- **ADD** - Protobuf as remote protocol (Jonas Bonér) +- **ADD** - Updated Cassandra integration and CassandraSession API to v0.4 (Jonas Bonér) +- **ADD** - CassandraStorage is now works with external Cassandra cluster (Jonas Bonér) +- **ADD** - ActorRegistry for retrieving Actor instances by class name and by id (Jonas Bonér) +- **ADD** - SchedulerActor for scheduling periodic tasks (Jonas Bonér) +- **ADD** - Now start up kernel with 'java -jar dist/akka-0.6.jar' (Jonas Bonér) +- **ADD** - Added Akka user mailing list: akka-user AT googlegroups DOT com]] (Jonas Bonér) +- **ADD** - Improved and restructured documentation (Jonas Bonér) +- **ADD** - New URL: http://akkasource.org (Jonas Bonér) +- **ADD** - New and much improved docs (Jonas Bonér) +- **ADD** - Enhanced trapping of failures: 'trapExit = List(classOf[..], classOf[..])' (Jonas Bonér) +- **ADD** - Upgraded to Netty 3.2, Protobuf 2.2, ScalaTest 1.0, Jersey 1.1.3, Atmosphere 0.4.1, Cassandra 0.4.1, Configgy 1.4 (Jonas Bonér) +- **FIX** - Lowered actor memory footprint; now an actor consumes ~600 bytes, which mean that you can create 6.5 million on 4 G RAM (Jonas Bonér) +- **FIX** - Remote actors are now defined by their UUID (not class name) (Jonas Bonér) +- **FIX** - Fixed dispatcher bugs (Jonas Bonér) +- **FIX** - Cleaned up Maven scripts and distribution in general (Jonas Bonér) +- **FIX** - Fixed many many bugs and minor issues (Jonas Bonér) +- **FIX** - Fixed inconsistencies and uglyness in Actors API (Jonas Bonér) +- **REMOVE** - Removed concurrent mode (Jonas Bonér) +- **REMOVE** - Removed embedded Cassandra mode (Jonas Bonér) +- **REMOVE** - Removed the !? method in Actor (synchronous message send, since it's evil. Use !! with time-out instead. (Jonas Bonér) +- **REMOVE** - Removed startup scripts and lib dir (Jonas Bonér) +- **REMOVE** - Removed the 'Transient' life-cycle scope since to close to 'Temporary' in semantics. (Jonas Bonér) +- **REMOVE** - Removed 'Transient' Actors and restart timeout (Jonas Bonér) diff --git a/akka-docs/project/scaladoc.rst b/akka-docs/project/scaladoc.rst new file mode 100644 index 0000000000..01d95ecd18 --- /dev/null +++ b/akka-docs/project/scaladoc.rst @@ -0,0 +1,33 @@ + +.. _scaladoc: + +############## + Scaladoc API +############## + + +Akka Snapshot +============= + +Automatically published Scaladoc API for the latest SNAPSHOT version of Akka can +be found here: + +- Akka - http://akka.io/api/akka/snapshot + +- Akka Modules - http://akka.io/api/akka-modules/snapshot + + +Release Versions +================ + +1.1 +--- + +- Akka 1.1 - http://akka.io/api/akka/1.1/ +- Akka Modules 1.1 - http://akka.io/api/akka-modules/1.1/ + +1.0 +--- + +- Akka 1.0 - http://akka.io/api/1.0/ + diff --git a/akka-docs/pending/sponsors.rst b/akka-docs/project/sponsors.rst similarity index 57% rename from akka-docs/pending/sponsors.rst rename to akka-docs/project/sponsors.rst index 88d35f1f0a..085d35cc0d 100644 --- a/akka-docs/pending/sponsors.rst +++ b/akka-docs/project/sponsors.rst @@ -1,14 +1,10 @@ -****Sponsors **** -======================================================= +.. _sponsors: -Scalable Solutions -================== - -Scalable Solutions AB is the commercial entity behind Akka, providing support, consulting and training around Akka. -``_ +Sponsors +============ YourKit -======= +------- YourKit is kindly supporting open source projects with its full-featured Java Profiler. YourKit, LLC is the creator of innovative and intelligent tools for profiling Java and .NET applications. diff --git a/akka-docs/pending/actor-registry-scala.rst b/akka-docs/scala/actor-registry.rst similarity index 77% rename from akka-docs/pending/actor-registry-scala.rst rename to akka-docs/scala/actor-registry.rst index 5f57434501..d812ef9066 100644 --- a/akka-docs/pending/actor-registry-scala.rst +++ b/akka-docs/scala/actor-registry.rst @@ -6,14 +6,14 @@ Module stability: **SOLID** ActorRegistry: Finding Actors ----------------------------- -Actors can be looked up by using the **akka.actor.Actor.registry: akka.actor.ActorRegistry**. Lookups for actors through this registry can be done by: +Actors can be looked up by using the ``akka.actor.Actor.registry: akka.actor.ActorRegistry``. Lookups for actors through this registry can be done by: -* uuid akka.actor.Uuid – this uses the ‘**uuid**’ field in the Actor class, returns the actor reference for the actor with specified uuid, if one exists, otherwise None -* id string – this uses the ‘**id**’ field in the Actor class, which can be set by the user (default is the class name), returns all actor references to actors with specified id -* specific actor class - returns an '**Array[Actor]**' with all actors of this exact class -* parameterized type - returns an '**Array[Actor]**' with all actors that are a subtype of this specific type +* uuid akka.actor.Uuid – this uses the ``uuid`` field in the Actor class, returns the actor reference for the actor with specified uuid, if one exists, otherwise None +* id string – this uses the ``id`` field in the Actor class, which can be set by the user (default is the class name), returns all actor references to actors with specified id +* specific actor class - returns an ``Array[Actor]`` with all actors of this exact class +* parameterized type - returns an ``Array[Actor]`` with all actors that are a subtype of this specific type -Actors are automatically registered in the ActorRegistry when they are started, removed or stopped. You can explicitly register and unregister ActorRef's by using the '**register**' and '**unregister**' methods. The ActorRegistry contains many convenience methods for looking up typed actors. +Actors are automatically registered in the ActorRegistry when they are started, removed or stopped. You can explicitly register and unregister ActorRef's by using the ``register`` and ``unregister`` methods. The ActorRegistry contains many convenience methods for looking up typed actors. Here is a summary of the API for finding actors: diff --git a/akka-docs/scala/actors.rst b/akka-docs/scala/actors.rst index 2da7f2d57b..bd550b807b 100644 --- a/akka-docs/scala/actors.rst +++ b/akka-docs/scala/actors.rst @@ -1,5 +1,11 @@ -Actors -====== +.. _actors-scala: + +Actors (Scala) +============== + +.. sidebar:: Contents + + .. contents:: :local: Module stability: **SOLID** @@ -7,7 +13,7 @@ The `Actor Model `_ provides a higher The API of Akka’s Actors is similar to Scala Actors which has borrowed some of its syntax from Erlang. -The Akka 0.9 release introduced a new concept; ActorRef, which requires some refactoring. If you are new to Akka just read along, but if you have used Akka 0.6.x, 0.7.x and 0.8.x then you might be helped by the :doc:`0.8.x => 0.9.x migration guide ` +The Akka 0.9 release introduced a new concept; ActorRef, which requires some refactoring. If you are new to Akka just read along, but if you have used Akka 0.6.x, 0.7.x and 0.8.x then you might be helped by the :doc:`0.8.x => 0.9.x migration guide ` Creating Actors --------------- @@ -26,6 +32,9 @@ Here is an example: .. code-block:: scala + import akka.actor.Actor + import akka.event.EventHandler + class MyActor extends Actor { def receive = { case "test" => EventHandler.info(this, "received test") @@ -177,7 +186,7 @@ Using ``!!!`` will send a message to the receiving Actor asynchronously and will val future = actor !!! "Hello" -See `Futures `_ for more information. +See :ref:`futures-scala` for more information. Forward message ^^^^^^^^^^^^^^^ @@ -253,6 +262,25 @@ Let's start by looking how we can reply to messages in a convenient way using th Reply to messages ----------------- +Reply using the channel +^^^^^^^^^^^^^^^^^^^^^^^ + +If you want to have a handle to an object to whom you can reply to the message, you can use the ``Channel`` abstraction. +Simply call ``self.channel`` and then you can forward that to others, store it away or otherwise until you want to reply, which you do by ``Channel ! response``: + +.. code-block:: scala + + case request => + val result = process(request) + self.channel ! result + +.. code-block:: scala + + case request => + friend forward self.channel + +We recommend that you as first choice use the channel abstraction instead of the other ways described in the following sections. + Reply using the reply and reply\_? methods ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ @@ -318,22 +346,6 @@ Here is an example of how it can be used: senderFuture.foreach(_.completeWithException(this, e)) } -Reply using the channel -^^^^^^^^^^^^^^^^^^^^^^^ - -If you want to have a handle to an object to whom you can reply to the message, you can use the ``Channel`` abstraction. -Simply call ``self.channel`` and then you can forward that to others, store it away or otherwise until you want to reply, which you do by ``Channel ! response``: - -.. code-block:: scala - - case request => - val result = process(request) - self.channel ! result - -.. code-block:: scala - - case request => - friend forward self.channel Summary of reply semantics and options ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ @@ -382,7 +394,7 @@ When you start the ``Actor`` then it will automatically call the ``def preStart` .. code-block:: scala - override def preStart = { + override def preStart() = { ... // initialization code } @@ -399,7 +411,7 @@ When stop is called then a call to the ``def postStop`` callback method will tak .. code-block:: scala - override def postStop = { + override def postStop() = { ... // clean up resources } diff --git a/akka-docs/pending/agents-scala.rst b/akka-docs/scala/agents.rst similarity index 89% rename from akka-docs/pending/agents-scala.rst rename to akka-docs/scala/agents.rst index 9adb9e9f81..dc62000995 100644 --- a/akka-docs/pending/agents-scala.rst +++ b/akka-docs/scala/agents.rst @@ -1,6 +1,10 @@ Agents (Scala) ============== +.. sidebar:: Contents + + .. contents:: :local: + Module stability: **SOLID** Agents in Akka were inspired by `agents in Clojure `_. @@ -26,7 +30,7 @@ An Agent will be running until you invoke ``close`` on it. Then it will be eligi .. code-block:: scala - agent.close + agent.close() Updating Agents --------------- @@ -88,6 +92,29 @@ Transactional Agents If an Agent is used within an enclosing transaction, then it will participate in that transaction. If you send to an Agent within a transaction then the dispatch to the Agent will be held until that transaction commits, and discarded if the transaction is aborted. +.. code-block:: scala + + import akka.agent.Agent + import akka.stm._ + + def transfer(from: Agent[Int], to: Agent[Int], amount: Int): Boolean = { + atomic { + if (from.get < amount) false + else { + from send (_ - amount) + to send (_ + amount) + true + } + } + } + + val from = Agent(100) + val to = Agent(20) + val ok = transfer(from, to, 50) + + from() // -> 50 + to() // -> 70 + Monadic usage ------------- @@ -101,6 +128,7 @@ Example of a monadic usage: val agent2 = Agent(5) // uses foreach + var result = 0 for (value <- agent1) { result = value + 1 } @@ -115,7 +143,7 @@ Example of a monadic usage: value2 <- agent2 } yield value1 + value2 - agent1.close - agent2.close - agent3.close - agent4.close + agent1.close() + agent2.close() + agent3.close() + agent4.close() diff --git a/akka-docs/pending/dataflow-scala.rst b/akka-docs/scala/dataflow.rst similarity index 58% rename from akka-docs/pending/dataflow-scala.rst rename to akka-docs/scala/dataflow.rst index c935537cae..1d46f0164f 100644 --- a/akka-docs/pending/dataflow-scala.rst +++ b/akka-docs/scala/dataflow.rst @@ -1,12 +1,14 @@ Dataflow Concurrency (Scala) ============================ +.. sidebar:: Contents + + .. contents:: :local: + Description ----------- -**IMPORTANT: As of Akka 1.1, Akka Future, CompletableFuture and DefaultCompletableFuture have all the functionality of DataFlowVariables, they also support non-blocking composition and advanced features like fold and reduce, Akka DataFlowVariable is therefor deprecated and will probably resurface in the following release as a DSL on top of Futures.** - -Akka implements `Oz-style dataflow concurrency `_ through dataflow (single assignment) variables and lightweight (event-based) processes/threads. +Akka implements `Oz-style dataflow concurrency `_ by using a special API for :ref:`futures-scala` that allows single assignment variables and multiple lightweight (event-based) processes/threads. Dataflow concurrency is deterministic. This means that it will always behave the same. If you run it once and it yields output 5 then it will do that **every time**, run it 10 million times, same result. If it on the other hand deadlocks the first time you run it, then it will deadlock **every single time** you run it. Also, there is **no difference** between sequential code and concurrent code. These properties makes it very easy to reason about concurrency. The limitation is that the code needs to be side-effect free, e.g. deterministic. You can't use exceptions, time, random etc., but need to treat the part of your program that uses dataflow concurrency as a pure function with input and output. @@ -17,55 +19,84 @@ The documentation is not as complete as it should be, something we will improve * ``_ * ``_ +Getting Started +--------------- + +Scala's Delimited Continuations plugin is required to use the Dataflow API. To enable the plugin when using sbt, your project must inherit the ``AutoCompilerPlugins`` trait and contain a bit of configuration as is seen in this example: + +.. code-block:: scala + + import sbt._ + + class MyAkkaProject(info: ProjectInfo) extends DefaultProject(info) with AkkaProject with AutoCompilerPlugins { + val continuationsPlugin = compilerPlugin("org.scala-lang.plugins" % "continuations" % "2.9.0") + override def compileOptions = super.compileOptions ++ compileOptions("-P:continuations:enable") + } + Dataflow Variables ------------------ -Dataflow Variable defines three different operations: +Dataflow Variable defines four different operations: 1. Define a Dataflow Variable .. code-block:: scala - val x = new DataFlowVariable[Int] + val x = Promise[Int]() -2. Wait for Dataflow Variable to be bound +2. Wait for Dataflow Variable to be bound (must be contained within a ``Future.flow`` block as described in the next section) .. code-block:: scala x() -3. Bind Dataflow Variable +3. Bind Dataflow Variable (must be contained within a ``Future.flow`` block as described in the next section) .. code-block:: scala x << 3 -A Dataflow Variable can only be bound once. Subsequent attempts to bind the variable will throw an exception. - -You can also shutdown a dataflow variable like this: +4. Bind Dataflow Variable with a Future (must be contained within a ``Future.flow`` block as described in the next section) .. code-block:: scala - x.shutdown + x << y -Threads -------- +A Dataflow Variable can only be bound once. Subsequent attempts to bind the variable will be ignored. -You can easily create millions lightweight (event-driven) threads on a regular workstation. +Dataflow Delimiter +------------------ + +Dataflow is implemented in Akka using Scala's Delimited Continuations. To use the Dataflow API the code must be contained within a ``Future.flow`` block. For example: .. code-block:: scala - thread { ... } + import Future.flow -You can also set the thread to a reference to be able to control its life-cycle: + val a = Future( ... ) + val b = Future( ... ) + val c = Promise[Int]() + + flow { + c << (a() + b()) + } + + val result = c.get() + +The ``flow`` method also returns a ``Future`` for the result of the contained expression, so the previous example could also be written like this: .. code-block:: scala - val t = thread { ... } + import Future.flow - ... // time passes + val a = Future( ... ) + val b = Future( ... ) - t ! 'exit // shut down the thread + val c = flow { + a() + b() + } + + val result = c.get() Examples -------- @@ -84,7 +115,7 @@ To run these examples: :: - Welcome to Scala version 2.8.0.final (Java HotSpot(TM) 64-Bit Server VM, Java 1.6.0_22). + Welcome to Scala version 2.9.0 (Java HotSpot(TM) 64-Bit Server VM, Java 1.6.0_25). Type in expressions to have them evaluated. Type :help for more information. @@ -116,16 +147,17 @@ Example in Akka: .. code-block:: scala - import akka.dataflow.DataFlow._ + import akka.dispatch._ + import Future.flow - val x, y, z = new DataFlowVariable[Int] + val x, y, z = Promise[Int]() - thread { + flow { z << x() + y() println("z = " + z()) } - thread { x << 40 } - thread { y << 2 } + flow { x << 40 } + flow { y << 2 } Example of using DataFlowVariable with recursion ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ @@ -159,38 +191,41 @@ Example in Akka: .. code-block:: scala - import akka.dataflow.DataFlow._ + import akka.dispatch._ + import Future.flow - def ints(n: Int, max: Int): List[Int] = + def ints(n: Int, max: Int): List[Int] = { if (n == max) Nil else n :: ints(n + 1, max) + } - def sum(s: Int, stream: List[Int]): List[Int] = stream match { + def sum(s: Int, stream: List[Int]): List[Int] = stream match { case Nil => s :: Nil case h :: t => s :: sum(h + s, t) } - val x = new DataFlowVariable[List[Int]] - val y = new DataFlowVariable[List[Int]] + val x, y = Promise[List[Int]]() - thread { x << ints(0, 1000) } - thread { y << sum(0, x()) } - thread { println("List of sums: " + y()) } + flow { x << ints(0, 1000) } + flow { y << sum(0, x()) } + flow { println("List of sums: " + y()) } -Example on life-cycle management of DataFlowVariables -^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ +Example using concurrent Futures +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ -Shows how to shutdown dataflow variables and bind threads to values to be able to interact with them (exit etc.). +Shows how to have a calculation run in another thread. Example in Akka: .. code-block:: scala - import akka.dataflow.DataFlow._ + + import akka.dispatch._ + import Future.flow // create four 'Int' data flow variables - val x, y, z, v = new DataFlowVariable[Int] + val x, y, z, v = Promise[Int]() - val main = thread { + flow { println("Thread 'main'") x << 1 @@ -205,28 +240,19 @@ Example in Akka: z << y println("'z' set to 'y': " + z()) } - - // main completed, shut down the data flow variables - x.shutdown - y.shutdown - z.shutdown - v.shutdown } - val setY = thread { - println("Thread 'setY', sleeping...") - Thread.sleep(5000) - y << 2 + flow { + y << Future { + println("Thread 'setY', sleeping") + Thread.sleep(2000) + 2 + } println("'y' set to: " + y()) } - val setV = thread { + flow { println("Thread 'setV'") v << y println("'v' set to 'y': " + v()) } - - // shut down the threads - main ! 'exit - setY ! 'exit - setV ! 'exit diff --git a/akka-docs/pending/dispatchers-scala.rst b/akka-docs/scala/dispatchers.rst similarity index 97% rename from akka-docs/pending/dispatchers-scala.rst rename to akka-docs/scala/dispatchers.rst index 62584835a4..35285c20fa 100644 --- a/akka-docs/pending/dispatchers-scala.rst +++ b/akka-docs/scala/dispatchers.rst @@ -1,6 +1,10 @@ Dispatchers (Scala) =================== +.. sidebar:: Contents + + .. contents:: :local: + Module stability: **SOLID** The Dispatcher is an important piece that allows you to configure the right semantics and parameters for optimal performance, throughput and scalability. Different Actors have different needs. @@ -124,7 +128,7 @@ If you don't define a the 'throughput' option in the configuration file then the Browse the `ScalaDoc `_ or look at the code for all the options available. Priority event-based -^^^^^^^^^^^ +^^^^^^^^^^^^^^^^^^^^ Sometimes it's useful to be able to specify priority order of messages, that is done by using PriorityExecutorBasedEventDrivenDispatcher and supply a java.util.Comparator[MessageInvocation] or use a akka.dispatch.PriorityGenerator (recommended): @@ -231,11 +235,13 @@ For the 'ExecutorBasedEventDrivenDispatcher' and the 'ExecutorBasedWorkStealingD For the 'ThreadBasedDispatcher', it is non-shareable between actors, and associates a dedicated Thread with the actor. Making it bounded (by specifying a capacity) is optional, but if you do, you need to provide a pushTimeout (default is 10 seconds). When trying to send a message to the Actor it will throw a MessageQueueAppendFailedException("BlockingMessageTransferQueue transfer timed out") if the message cannot be added to the mailbox within the time specified by the pushTimeout. -``_ -class MyActor extends Actor { - import akka.util.duration._ - self.dispatcher = Dispatchers.newThreadBasedDispatcher(self, mailboxCapacity = 100, - pushTimeOut = 10 seconds) - ... -} -``_ +.. code-block:: scala + + class MyActor extends Actor { + import akka.util.duration._ + self.dispatcher = Dispatchers.newThreadBasedDispatcher(self, mailboxCapacity = 100, + pushTimeOut = 10 seconds) + ... + } + + diff --git a/akka-docs/pending/fault-tolerance-scala.rst b/akka-docs/scala/fault-tolerance.rst similarity index 94% rename from akka-docs/pending/fault-tolerance-scala.rst rename to akka-docs/scala/fault-tolerance.rst index 6070f9e01e..c7ac83fd7e 100644 --- a/akka-docs/pending/fault-tolerance-scala.rst +++ b/akka-docs/scala/fault-tolerance.rst @@ -1,6 +1,12 @@ +.. _fault-tolerance-scala: + Fault Tolerance Through Supervisor Hierarchies (Scala) ====================================================== +.. sidebar:: Contents + + .. contents:: :local: + Module stability: **SOLID** The "let it crash" approach to fault/error handling, implemented by linking actors, is very different to what Java and most non-concurrency oriented languages/frameworks have adopted. It's a way of dealing with failure that is designed for concurrent and distributed systems. @@ -10,15 +16,15 @@ Concurrency Throwing an exception in concurrent code (let's assume we are using non-linked actors), will just simply blow up the thread that currently executes the actor. -# There is no way to find out that things went wrong (apart from inspecting the stack trace). -# There is nothing you can do about it. +- There is no way to find out that things went wrong (apart from inspecting the stack trace). +- There is nothing you can do about it. Here actors provide a clean way of getting notification of the error and do something about it. Linking actors also allow you to create sets of actors where you can be sure that either: -# All are dead -# None are dead +- All are dead +- None are dead This is very useful when you have thousands of concurrent actors. Some actors might have implicit dependencies and together implement a service, computation, user session etc. @@ -241,10 +247,13 @@ The supervising Actor also needs to define a fault handler that defines the rest The different options are: -* AllForOneStrategy(trapExit, maxNrOfRetries, withinTimeRange) - * trapExit is a List or Array of classes inheriting from Throwable, they signal which types of exceptions this actor will handle -* OneForOneStrategy(trapExit, maxNrOfRetries, withinTimeRange) - * trapExit is a List or Array of classes inheriting from Throwable, they signal which types of exceptions this actor will handle +- AllForOneStrategy(trapExit, maxNrOfRetries, withinTimeRange) + + - trapExit is a List or Array of classes inheriting from Throwable, they signal which types of exceptions this actor will handle + +- OneForOneStrategy(trapExit, maxNrOfRetries, withinTimeRange) + + - trapExit is a List or Array of classes inheriting from Throwable, they signal which types of exceptions this actor will handle Here is an example: @@ -316,13 +325,13 @@ Supervised actors have the option to reply to the initial sender within preResta self.reply_?(reason.getMessage) } - override def postStop { + override def postStop() { self.reply_?("stopped by supervisor") } } -* A reply within preRestart or postRestart must be a safe reply via self.reply_? because an unsafe self.reply will throw an exception when the actor is restarted without having failed. This can be the case in context of AllForOne restart strategies. -* A reply within postStop must be a safe reply via self.reply_? because an unsafe self.reply will throw an exception when the actor has been stopped by the application (and not by a supervisor) after successful execution of receive (or no execution at all). +- A reply within preRestart or postRestart must be a safe reply via `self.reply_?` because an unsafe self.reply will throw an exception when the actor is restarted without having failed. This can be the case in context of AllForOne restart strategies. +- A reply within postStop must be a safe reply via `self.reply_?` because an unsafe self.reply will throw an exception when the actor has been stopped by the application (and not by a supervisor) after successful execution of receive (or no execution at all). Handling too many actor restarts within a specific time limit ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ @@ -431,6 +440,7 @@ If the parent TypedActor (supervisor) wants to be able to do handle failing chil For convenience there is an overloaded link that takes trapExit and faultHandler for the supervisor as arguments. Here is an example: .. code-block:: scala + import akka.actor.TypedActor._ val foo = newInstance(classOf[Foo], 1000) diff --git a/akka-docs/scala/fsm.rst b/akka-docs/scala/fsm.rst index 3b5fdd4394..8f4b6187ce 100644 --- a/akka-docs/scala/fsm.rst +++ b/akka-docs/scala/fsm.rst @@ -49,13 +49,14 @@ Now lets create an object representing the FSM and defining the behavior. import akka.actor.{Actor, FSM} import akka.event.EventHandler - import FSM._ import akka.util.duration._ case object Move class ABC extends Actor with FSM[ExampleState, Unit] { + import FSM._ + startWith(A, Unit) when(A) { @@ -113,8 +114,12 @@ Now we can create a lock FSM that takes :class:`LockState` as a state and a .. code-block:: scala + import akka.actor.{Actor, FSM} + class Lock(code: String) extends Actor with FSM[LockState, String] { + import FSM._ + val emptyCode = "" startWith(Locked, emptyCode) @@ -317,6 +322,12 @@ The parentheses are not actually needed in all cases, but they visually distinguish between modifiers and their arguments and therefore make the code even more pleasant to read for foreigners. +.. note:: + + Please note that the ``return`` statement may not be used in :meth:`when` + blocks or similar; this is a Scala restriction. Either refactor your code + using ``if () ... else ...`` or move it into a method definition. + Monitoring Transitions ---------------------- @@ -434,7 +445,8 @@ state data which is available during termination handling. It should be noted that :func:`stop` does not abort the actions and stop the FSM immediately. The stop action must be returned from the event handler in - the same way as a state transition. + the same way as a state transition (but note that the ``return`` statement + may not be used within a :meth:`when` block). .. code-block:: scala @@ -462,7 +474,7 @@ invocation of :func:`onTermination` replaces the previously installed handler. Examples ======== -A bigger FSM example can be found in the sources: +A bigger FSM example contrasted with Actor's :meth:`become`/:meth:`unbecome` can be found in the sources: * `Dining Hakkers using FSM `_ * `Dining Hakkers using become `_ diff --git a/akka-docs/scala/futures.rst b/akka-docs/scala/futures.rst new file mode 100644 index 0000000000..f64c864564 --- /dev/null +++ b/akka-docs/scala/futures.rst @@ -0,0 +1,244 @@ +.. _futures-scala: + +Futures (Scala) +=============== + +.. sidebar:: Contents + + .. contents:: :local: + +Introduction +------------ + +In Akka, a `Future `_ is a data structure used to retrieve the result of some concurrent operation. This operation is usually performed by an ``Actor`` or by the ``Dispatcher`` directly. This result can be accessed synchronously (blocking) or asynchronously (non-blocking). + +Use with Actors +--------------- + +There are generally two ways of getting a reply from an ``Actor``: the first is by a sent message (``actor ! msg``), which only works if the original sender was an ``Actor``) and the second is through a ``Future``. + +Using an ``Actor``\'s ``!!!`` method to send a message will return a Future. To wait for and retrieve the actual result the simplest method is: + +.. code-block:: scala + + val future = actor !!! msg + val result: Any = future.get() + +This will cause the current thread to block and wait for the ``Actor`` to 'complete' the ``Future`` with it's reply. Due to the dynamic nature of Akka's ``Actor``\s this result will be untyped and will default to ``Nothing``. The safest way to deal with this is to cast the result to an ``Any`` as is shown in the above example. You can also use the expected result type instead of ``Any``, but if an unexpected type were to be returned you will get a ``ClassCastException``. For more elegant ways to deal with this and to use the result without blocking, refer to `Functional Futures`_. + +Use Directly +------------ + +A common use case within Akka is to have some computation performed concurrently without needing the extra utility of an ``Actor``. If you find yourself creating a pool of ``Actor``\s for the sole reason of performing a calculation in parallel, there is an easier (and faster) way: + +.. code-block:: scala + + import akka.dispatch.Future + + val future = Future { + "Hello" + "World" + } + val result = future.get() + +In the above code the block passed to ``Future`` will be executed by the default ``Dispatcher``, with the return value of the block used to complete the ``Future`` (in this case, the result would be the string: "HelloWorld"). Unlike a ``Future`` that is returned from an ``Actor``, this ``Future`` is properly typed, and we also avoid the overhead of managing an ``Actor``. + +Functional Futures +------------------ + +A recent addition to Akka's ``Future`` is several monadic methods that are very similar to the ones used by Scala's collections. These allow you to create 'pipelines' or 'streams' that the result will travel through. + +Future is a Monad +^^^^^^^^^^^^^^^^^ + +The first method for working with ``Future`` functionally is ``map``. This method takes a ``Function`` which performs some operation on the result of the ``Future``, and returning a new result. The return value of the ``map`` method is another ``Future`` that will contain the new result: + +.. code-block:: scala + + val f1 = Future { + "Hello" + "World" + } + + val f2 = f1 map { x => + x.length + } + + val result = f2.get() + +In this example we are joining two strings together within a Future. Instead of waiting for this to complete, we apply our function that calculates the length of the string using the ``map`` method. Now we have a second Future that will eventually contain an ``Int``. When our original ``Future`` completes, it will also apply our function and complete the second Future with it's result. When we finally ``get`` the result, it will contain the number 10. Our original Future still contains the string "HelloWorld" and is unaffected by the ``map``. + +Something to note when using these methods: if the ``Future`` is still being processed when one of these methods are called, it will be the completing thread that actually does the work. If the ``Future`` is already complete though, it will be run in our current thread. For example: + +.. code-block:: scala + + val f1 = Future { + Thread.sleep(1000) + "Hello" + "World" + } + + val f2 = f1 map { x => + x.length + } + + val result = f2.get() + +The original ``Future`` will take at least 1 second to execute now, which means it is still being processed at the time we call ``map``. The function we provide gets stored within the ``Future`` and later executed automatically by the dispatcher when the result is ready. + +If we do the opposite: + +.. code-block:: scala + + val f1 = Future { + "Hello" + "World" + } + + Thread.sleep(1000) + + val f2 = f1 map { x => + x.length + } + + val result = f2.get() + +Our little string has been processed long before our 1 second sleep has finished. Because of this, the dispatcher has moved onto other messages that need processing and can no longer calculate the length of the string for us, instead it gets calculated in the current thread just as if we weren't using a ``Future``. + +Normally this works quite well as it means there is very little overhead to running a quick function. If there is a possibility of the function taking a non-trivial amount of time to process it might be better to have this done concurrently, and for that we use ``flatMap``: + +.. code-block:: scala + + val f1 = Future { + "Hello" + "World" + } + + val f2 = f1 flatMap {x => + Future(x.length) + } + + val result = f2.get() + +Now our second Future is executed concurrently as well. This technique can also be used to combine the results of several Futures into a single calculation, which will be better explained in the following sections. + +For Comprehensions +^^^^^^^^^^^^^^^^^^ + +Since ``Future`` has a ``map`` and ``flatMap`` method it can be easily used in a 'for comprehension': + +.. code-block:: scala + + val f = for { + a <- Future(10 / 2) // 10 / 2 = 5 + b <- Future(a + 1) // 5 + 1 = 6 + c <- Future(a - 1) // 5 - 1 = 4 + } yield b * c // 6 * 4 = 24 + + val result = f.get() + +Something to keep in mind when doing this is even though it looks like parts of the above example can run in parallel, each step of the for comprehension is run sequentially. This will happen on separate threads for each step but there isn't much benefit over running the calculations all within a single Future. The real benefit comes when the ``Future``\s are created first, and then combining them together. + +Composing Futures +^^^^^^^^^^^^^^^^^ + +The example for comprehension above is an example of composing ``Future``\s. A common use case for this is combining the replies of several ``Actor``\s into a single calculation without resorting to calling ``get`` or ``await`` to block for each result. First an example of using ``get``: + +.. code-block:: scala + + val f1 = actor1 !!! msg1 + val f2 = actor2 !!! msg2 + + val a: Int = f1.get() + val b: Int = f2.get() + + val f3 = actor3 !!! (a + b) + + val result: String = f3.get() + +Here we wait for the results from the first 2 ``Actor``\s before sending that result to the third ``Actor``. We called ``get`` 3 times, which caused our little program to block 3 times before getting our final result. Now compare that to this example: + +.. code-block:: scala + + val f1 = actor1 !!! msg1 + val f2 = actor2 !!! msg2 + + val f3 = for { + a: Int <- f1 + b: Int <- f2 + c: String <- actor3 !!! (a + b) + } yield c + + val result = f3.get() + +Here we have 2 actors processing a single message each. Once the 2 results are available (note that we don't block to get these results!), they are being added together and sent to a third ``Actor``, which replies with a string, which we assign to 'result'. + +This is fine when dealing with a known amount of Actors, but can grow unwieldy if we have more then a handful. The ``sequence`` and ``traverse`` helper methods can make it easier to handle more complex use cases. Both of these methods are ways of turning, for a subclass ``T`` of ``Traversable``, ``T[Future[A]]`` into a ``Future[T[A]]``. For example: + +.. code-block:: scala + + // oddActor returns odd numbers sequentially from 1 + val listOfFutures: List[Future[Int]] = List.fill(100)(oddActor !!! GetNext) + + // now we have a Future[List[Int]] + val futureList = Future.sequence(listOfFutures) + + // Find the sum of the odd numbers + val oddSum = futureList.map(_.sum).get() + +To better explain what happened in the example, ``Future.sequence`` is taking the ``List[Future[Int]]`` and turning it into a ``Future[List[Int]]``. We can then use ``map`` to work with the ``List[Int]`` directly, and we find the sum of the ``List``. + +The ``traverse`` method is similar to ``sequence``, but it takes a ``T[A]`` and a function ``T => Future[B]`` to return a ``Future[T[B]]``, where ``T`` is again a subclass of Traversable. For example, to use ``traverse`` to sum the first 100 odd numbers: + +.. code-block:: scala + + val oddSum = Future.traverse((1 to 100).toList)(x => Future(x * 2 - 1)).map(_.sum).get() + +This is the same result as this example: + +.. code-block:: scala + + val oddSum = Future.sequence((1 to 100).toList.map(x => Future(x * 2 - 1))).map(_.sum).get() + +But it may be faster to use ``traverse`` as it doesn't have to create an intermediate ``List[Future[Int]]``. + + +Then there's a method that's called ``fold`` that takes a start-value, a sequence of ``Future``:s and a function from the type of the start-value and the type of the futures and returns something with the same type as the start-value, and then applies the function to all elements in the sequence of futures, non-blockingly, the execution will run on the Thread of the last completing Future in the sequence. + +.. code-block:: scala + + val futures = for(i <- 1 to 1000) yield Future(i * 2) // Create a sequence of Futures + + val futureSum = Futures.fold(0)(futures)(_ + _) + +That's all it takes! + + +If the sequence passed to ``fold`` is empty, it will return the start-value, in the case above, that will be 0. In some cases you don't have a start-value and you're able to use the value of the first completing Future in the sequence as the start-value, you can use ``reduce``, it works like this: + +.. code-block:: scala + + val futures = for(i <- 1 to 1000) yield Future(i * 2) // Create a sequence of Futures + + val futureSum = Futures.reduce(futures)(_ + _) + +Same as with ``fold``, the execution will be done by the Thread that completes the last of the Futures, you can also parallize it by chunking your futures into sub-sequences and reduce them, and then reduce the reduced results again. + + + +This is just a sample of what can be done, but to use more advanced techniques it is easier to take advantage of Scalaz, which Akka has support for in its akka-scalaz module. + +Scalaz +^^^^^^ + +Akka also has a Scalaz module (:ref:`add-on-modules`) for a more complete support of programming in a functional style. + +Exceptions +---------- + +Since the result of a ``Future`` is created concurrently to the rest of the program, exceptions must be handled differently. It doesn't matter if an ``Actor`` or the dispatcher is completing the ``Future``, if an ``Exception`` is caught the ``Future`` will contain it instead of a valid result. If a ``Future`` does contain an ``Exception``, calling ``get`` will cause it to be thrown again so it can be handled properly. + +It is also possible to handle an ``Exception`` by returning a different result. This is done with the ``failure`` method. For example: + +.. code-block:: scala + + val future = actor !!! msg1 failure { + case e: ArithmeticException => 0 + } + +In this example, if an ``ArithmeticException`` was thrown while the ``Actor`` processed the message, our ``Future`` would have a result of 0. The ``failure`` method works very similarly to the standard try/catch blocks, so multiple ``Exception``\s can be handled in this manner, and if an ``Exception`` is not handled this way it will be behave as if we hadn't used the ``failure`` method. diff --git a/akka-docs/pending/http.rst b/akka-docs/scala/http.rst similarity index 97% rename from akka-docs/pending/http.rst rename to akka-docs/scala/http.rst index a4c7842233..9f15664b70 100644 --- a/akka-docs/pending/http.rst +++ b/akka-docs/scala/http.rst @@ -1,10 +1,17 @@ + +.. _http-module: + HTTP ==== +.. sidebar:: Contents + + .. contents:: :local: + Module stability: **SOLID** -When using Akkas embedded servlet container: --------------------------------------------- +When using Akkas embedded servlet container +------------------------------------------- Akka supports the JSR for REST called JAX-RS (JSR-311). It allows you to create interaction with your actors through HTTP + REST @@ -138,12 +145,12 @@ If you want to use akka-camel or any other modules that have their own "Bootable Java API: Typed Actors ---------------------- -`Sample module for REST services with Actors in Java `_ +`Sample module for REST services with Actors in Java `_ Scala API: Actors ----------------- -`Sample module for REST services with Actors in Scala `_ +`Sample module for REST services with Actors in Scala `_ Using Akka with the Pinky REST/MVC framework -------------------------------------------- @@ -269,7 +276,7 @@ Finally, bind the *handleHttpRequest* function of the *Endpoint* trait to the ac // // this is where you want attach your endpoint hooks // - override def preStart = { + override def preStart() = { // // we expect there to be one root and that it's already been started up // obviously there are plenty of other ways to obtaining this actor @@ -397,7 +404,7 @@ As noted above, hook functions are non-exclusive. This means multiple actors can // // this is where you want attach your endpoint hooks // - override def preStart = { + override def preStart() = { // // we expect there to be one root and that it's already been started up // obviously there are plenty of other ways to obtaining this actor @@ -519,3 +526,5 @@ Using the Akka Mist module with the Facebook Graph API and WebGL Example project using Akka Mist with the Facebook Graph API and WebGL ``_ + + diff --git a/akka-docs/scala/index.rst b/akka-docs/scala/index.rst index e54c88b979..7998ae2765 100644 --- a/akka-docs/scala/index.rst +++ b/akka-docs/scala/index.rst @@ -1,3 +1,5 @@ +.. _scala-api: + Scala API ========= @@ -5,5 +7,20 @@ Scala API :maxdepth: 2 actors + typed-actors + actor-registry + futures + dataflow + agents + stm + transactors + remote-actors + serialization + fault-tolerance + dispatchers + routing fsm + http + security testing + tutorial-chat-server diff --git a/akka-docs/pending/remote-actors-scala.rst b/akka-docs/scala/remote-actors.rst similarity index 81% rename from akka-docs/pending/remote-actors-scala.rst rename to akka-docs/scala/remote-actors.rst index 9389a5d284..305783238d 100644 --- a/akka-docs/pending/remote-actors-scala.rst +++ b/akka-docs/scala/remote-actors.rst @@ -1,13 +1,17 @@ Remote Actors (Scala) ===================== +.. sidebar:: Contents + + .. contents:: :local: + Module stability: **SOLID** -Akka supports starting Actors and Typed Actors on remote nodes using a very efficient and scalable NIO implementation built upon `JBoss Netty `_ and `Google Protocol Buffers `_ . +Akka supports starting and interacting with Actors and Typed Actors on remote nodes using a very efficient and scalable NIO implementation built upon `JBoss Netty `_ and `Google Protocol Buffers `_ . -The usage is completely transparent both in regards to sending messages and error handling and propagation as well as supervision, linking and restarts. You can send references to other Actors as part of the message. +The usage is completely transparent with local actors, both in regards to sending messages and error handling and propagation as well as supervision, linking and restarts. You can send references to other Actors as part of the message. -You can find a runnable sample `here `_. +You can find a runnable sample `here `__. Starting up the remote service ------------------------------ @@ -64,7 +68,7 @@ If you invoke 'shutdown' on the server then the connection will be closed. import akka.actor.Actor._ - remote.shutdown + remote.shutdown() Connecting and shutting down a client explicitly ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ @@ -74,6 +78,7 @@ Normally you should not have to start and stop the client connection explicitly .. code-block:: scala import akka.actor.Actor._ + import java.net.InetSocketAddress remote.shutdownClientConnection(new InetSocketAddress("localhost", 6666)) //Returns true if successful, false otherwise remote.restartClientConnection(new InetSocketAddress("localhost", 6666)) //Returns true if successful, false otherwise @@ -143,12 +148,6 @@ The default behavior is that the remote client will maintain a transaction log o If you choose a capacity higher than 0, then a bounded queue will be used and if the limit of the queue is reached then a 'RemoteClientMessageBufferException' will be thrown. -You can also get an Array with all the messages that the remote client has failed to send. Since the remote client events passes you an instance of the RemoteClient you have an easy way to act upon failure and do something with these messages (while waiting for them to be retried). - -.. code-block:: scala - - val pending: Array[Any] = Actor.remote.pendingMessages - Running Remote Server in untrusted mode --------------------------------------- @@ -255,24 +254,16 @@ You can also generate the secure cookie by using the 'Crypt' object and its 'gen The secure cookie is a cryptographically secure randomly generated byte array turned into a SHA-1 hash. -Remote Actors -------------- - -Akka has two types of remote actors: - -* Client-initiated and managed. Here it is the client that creates the remote actor and "moves it" to the server. -* Server-initiated and managed. Here it is the server that creates the remote actor and the client can ask for a handle to this actor. - -They are good for different use-cases. The client-initiated are great when you want to monitor an actor on another node since it allows you to link to it and supervise it using the regular supervision semantics. They also make RPC completely transparent. The server-initiated, on the other hand, are great when you have a service running on the server that you want clients to connect to, and you want full control over the actor on the server side for security reasons etc. - Client-managed Remote Actors -^^^^^^^^^^^^^^^^^^^^^^^^^^^^ +---------------------------- DEPRECATED AS OF 1.1 -When you define an actors as being remote it is instantiated as on the remote host and your local actor becomes a proxy, it works as a handle to the remote actor. The real execution is always happening on the remote node. +The client creates the remote actor and "moves it" to the server. -Actors can be made remote by calling remote().actorOf[MyActor](host, port) +When you define an actor as being remote it is instantiated as on the remote host and your local actor becomes a proxy, it works as a handle to the remote actor. The real execution is always happening on the remote node. + +Actors can be made remote by calling remote.actorOf[MyActor](host, port) Here is an example: @@ -280,29 +271,30 @@ Here is an example: import akka.actor.Actor - class MyActor extends RemoteActor() { + class MyActor extends Actor { def receive = { case "hello" => self.reply("world") } } - val remote = Actor.remote().actorOf[MyActor]("192.68.23.769", 2552) + val remoteActor = Actor.remote.actorOf[MyActor]("192.68.23.769", 2552) An Actor can also start remote child Actors through one of the 'spawn/link' methods. These will start, link and make the Actor remote atomically. .. code-block:: scala ... - spawnRemote[MyActor](hostname, port) - spawnLinkRemote[MyActor](hostname, port) + self.spawnRemote[MyActor](hostname, port, timeout) + self.spawnLinkRemote[MyActor](hostname, port, timeout) ... Server-managed Remote Actors ---------------------------- +Here it is the server that creates the remote actor and the client can ask for a handle to this actor. + Server side setup ^^^^^^^^^^^^^^^^^ - The API for server managed remote actors is really simple. 2 methods only: .. code-block:: scala @@ -332,7 +324,7 @@ Session bound server side setup Session bound server managed remote actors work by creating and starting a new actor for every client that connects. Actors are stopped automatically when the client disconnects. The client side is the same as regular server managed remote actors. Use the function registerPerSession instead of register. Session bound actors are useful if you need to keep state per session, e.g. username. -They are also useful if you need to perform some cleanup when a client disconnects by overriding the postStop method as described `here `_ +They are also useful if you need to perform some cleanup when a client disconnects by overriding the postStop method as described `here `__ .. code-block:: scala @@ -358,10 +350,10 @@ There are many variations on the 'remote#actorFor' method. Here are some of them .. code-block:: scala - ... = actorFor(className, hostname, port) - ... = actorFor(className, timeout, hostname, port) - ... = actorFor(uuid, className, hostname, port) - ... = actorFor(uuid, className, timeout, hostname, port) + ... = remote.actorFor(className, hostname, port) + ... = remote.actorFor(className, timeout, hostname, port) + ... = remote.actorFor(uuid, className, hostname, port) + ... = remote.actorFor(uuid, className, timeout, hostname, port) ... // etc All of these also have variations where you can pass in an explicit 'ClassLoader' which can be used when deserializing messages sent from the remote actor. @@ -371,11 +363,16 @@ Running sample Here is a complete running sample (also available `here `_): +Paste in the code below into two sbt concole shells. Then run: + +- ServerInitiatedRemoteActorServer.run() in one shell +- ServerInitiatedRemoteActorClient.run() in the other shell + .. code-block:: scala import akka.actor.Actor - import akka.util.Logging import Actor._ + import akka.event.EventHandler class HelloWorldActor extends Actor { def receive = { @@ -385,27 +382,27 @@ Here is a complete running sample (also available `here self.reply("world") } @@ -430,11 +427,9 @@ Here is an example of overriding the 'id' field: val actor = remote.actorOf[MyActor]("192.68.23.769", 2552) -Remote Typed Actors -------------------- -Client-managed Remote Actors -^^^^^^^^^^^^^^^^^^^^^^^^^^^^ +Client-managed Remote Typed Actors +---------------------------------- DEPRECATED AS OF 1.1 @@ -458,13 +453,13 @@ You can also define an Typed Actor to be remote programmatically when creating i ... // use pojo as usual -Server-managed Remote Actors -^^^^^^^^^^^^^^^^^^^^^^^^^^^^ +Server-managed Remote Typed Actors +---------------------------------- WARNING: Remote TypedActors do not work with overloaded methods on your TypedActor, refrain from using overloading. Server side setup -***************** +^^^^^^^^^^^^^^^^^ The API for server managed remote typed actors is nearly the same as for untyped actor @@ -507,20 +502,20 @@ They are also useful if you need to perform some cleanup when a client disconnec Note that the second argument in registerTypedPerSessionActor is an implicit function. It will be called to create an actor every time a session is established. Client side usage -***************** +^^^^^^^^^^^^^^^^^ .. code-block:: scala val actor = remote.typedActorFor(classOf[RegistrationService], "user-service", 5000L, "localhost", 2552) actor.registerUser(…) -There are variations on the 'RemoteClient#typedActorFor' method. Here are some of them: +There are variations on the 'remote#typedActorFor' method. Here are some of them: .. code-block:: scala - ... = typedActorFor(interfaceClazz, serviceIdOrClassName, hostname, port) - ... = typedActorFor(interfaceClazz, serviceIdOrClassName, timeout, hostname, port) - ... = typedActorFor(interfaceClazz, serviceIdOrClassName, timeout, hostname, port, classLoader) + ... = remote.typedActorFor(interfaceClazz, serviceIdOrClassName, hostname, port) + ... = remote.typedActorFor(interfaceClazz, serviceIdOrClassName, timeout, hostname, port) + ... = remote.typedActorFor(interfaceClazz, serviceIdOrClassName, timeout, hostname, port, classLoader) Data Compression Configuration ------------------------------ @@ -583,15 +578,19 @@ So a simple listener actor can look like this: .. code-block:: scala + import akka.actor.Actor + import akka.actor.Actor._ + import akka.remoteinterface._ + val listener = actorOf(new Actor { def receive = { - case RemoteClientError(cause, client, address) => ... // act upon error - case RemoteClientDisconnected(client, address) => ... // act upon disconnection - case RemoteClientConnected(client, address) => ... // act upon connection - case RemoteClientStarted(client, address) => ... // act upon client shutdown - case RemoteClientShutdown(client, address) => ... // act upon client shutdown - case RemoteClientWriteFailed(request, cause, client, address) => ... // act upon write failure - case _ => //ignore other + case RemoteClientError(cause, client, address) => //... act upon error + case RemoteClientDisconnected(client, address) => //... act upon disconnection + case RemoteClientConnected(client, address) => //... act upon connection + case RemoteClientStarted(client, address) => //... act upon client shutdown + case RemoteClientShutdown(client, address) => //... act upon client shutdown + case RemoteClientWriteFailed(request, cause, client, address) => //... act upon write failure + case _ => // ignore other } }).start() @@ -637,15 +636,19 @@ So a simple listener actor can look like this: .. code-block:: scala + import akka.actor.Actor + import akka.actor.Actor._ + import akka.remoteinterface._ + val listener = actorOf(new Actor { def receive = { - case RemoteServerStarted(server) => ... // act upon server start - case RemoteServerShutdown(server) => ... // act upon server shutdown - case RemoteServerError(cause, server) => ... // act upon server error - case RemoteServerClientConnected(server, clientAddress) => ... // act upon client connection - case RemoteServerClientDisconnected(server, clientAddress) => ... // act upon client disconnection - case RemoteServerClientClosed(server, clientAddress) => ... // act upon client connection close - case RemoteServerWriteFailed(request, cause, server, clientAddress) => ... // act upon server write failure + case RemoteServerStarted(server) => //... act upon server start + case RemoteServerShutdown(server) => //... act upon server shutdown + case RemoteServerError(cause, server) => //... act upon server error + case RemoteServerClientConnected(server, clientAddress) => //... act upon client connection + case RemoteServerClientDisconnected(server, clientAddress) => //... act upon client disconnection + case RemoteServerClientClosed(server, clientAddress) => //... act upon client connection close + case RemoteServerWriteFailed(request, cause, server, clientAddress) => //... act upon server write failure } }).start() @@ -662,7 +665,7 @@ Message Serialization All messages that are sent to remote actors needs to be serialized to binary format to be able to travel over the wire to the remote node. This is done by letting your messages extend one of the traits in the 'akka.serialization.Serializable' object. If the messages don't implement any specific serialization trait then the runtime will try to use standard Java serialization. -Here are some examples, but full documentation can be found in the `Serialization section `_. +Here are some examples, but full documentation can be found in the :ref:`serialization-scala`. Scala JSON ^^^^^^^^^^ @@ -676,7 +679,7 @@ Protobuf Protobuf message specification needs to be compiled with 'protoc' compiler. -.. code-block:: scala +:: message ProtobufPOJO { required uint64 id = 1; @@ -694,29 +697,3 @@ Using the generated message builder to send the message to a remote actor: .setName("Coltrane") .build -SBinary -^^^^^^^ - -``_ -case class User(firstNameLastName: Tuple2[String, String], email: String, age: Int) extends Serializable.SBinary[User] { - import sbinary.DefaultProtocol._ - - def this() = this(null, null, 0) - - implicit object UserFormat extends Format[User] { - def reads(in : Input) = User( - read[Tuple2[String, String]](in), - read[String](in), - read[Int](in)) - def writes(out: Output, value: User) = { - write[Tuple2[String, String]](out, value. firstNameLastName) - write[String](out, value.email) - write[Int](out, value.age) - } - } - - def fromBytes(bytes: Array[Byte]) = fromByteArray[User](bytes) - - def toBytes: Array[Byte] = toByteArray(this) -} -``_ diff --git a/akka-docs/pending/routing-scala.rst b/akka-docs/scala/routing.rst similarity index 97% rename from akka-docs/pending/routing-scala.rst rename to akka-docs/scala/routing.rst index 4cb825219e..6f90f4fcce 100644 --- a/akka-docs/pending/routing-scala.rst +++ b/akka-docs/scala/routing.rst @@ -1,4 +1,9 @@ -**Routing / Patterns (Scala)** +Routing (Scala) +=============== + +.. sidebar:: Contents + + .. contents:: :local: Akka-core includes some building blocks to build more complex message flow handlers, they are listed and explained below: @@ -7,7 +12,7 @@ Dispatcher A Dispatcher is an actor that routes incoming messages to outbound actors. -To use it you can either create a Dispatcher through the **dispatcherActor()** factory method +To use it you can either create a Dispatcher through the ``dispatcherActor()`` factory method .. code-block:: scala @@ -69,7 +74,7 @@ LoadBalancer A LoadBalancer is an actor that forwards messages it receives to a boundless sequence of destination actors. -Example using the **loadBalancerActor()** factory method: +Example using the ``loadBalancerActor()`` factory method: .. code-block:: scala @@ -147,11 +152,10 @@ Selection ^^^^^^^^^ All pools require a *Selector* to be mixed-in. This trait controls how and how many actors in the pool will receive the incoming message. Define *selectionCount* to some positive number greater than one to route to multiple actors. Currently two are provided: + * `SmallestMailboxSelector `_ - Using the exact same logic as the iterator of the same name, the pooled actor with the fewest number of pending messages will be chosen. * `RoundRobinSelector `_ - Performs a very simple index-based selection, wrapping around the end of the list, very much like the CyclicIterator does. -* - Partial Fills ************* @@ -161,6 +165,7 @@ Capacity ^^^^^^^^ As you'd expect, capacity traits determine how the pool is funded with actors. There are two types of strategies that can be employed: + * `FixedCapacityStrategy `_ - When you mix this into your actor pool, you define a pool size and when the pool is started, it will have that number of actors within to which messages will be delegated. * `BoundedCapacityStrategy `_ - When you mix this into your actor pool, you define upper and lower bounds, and when the pool is started, it will have the minimum number of actors in place to handle messages. You must also mix-in a Capacitor and a Filter when using this strategy (see below). @@ -204,6 +209,7 @@ A *Filter* is a trait that modifies the raw pressure reading returned from a Cap } Here we see how the filter function will have the chance to modify the pressure reading to influence the capacity change. You are free to implement filter() however you like. We provide a `Filter `_ trait that evaluates both a rampup and a backoff subfilter to determine how to use the pressure reading to alter the pool capacity. There are several subfilters available to use, though again you may create whatever makes the most sense for you pool: + * `BasicRampup `_ - When pressure exceeds current capacity, increase the number of actors in the pool by some factor (*rampupRate*) of the current pool size. * `BasicBackoff `_ - When the pressure ratio falls under some predefined amount (*backoffThreshold*), decrease the number of actors in the pool by some factor of the current pool size. * `RunningMeanBackoff `_ - This filter tracks the average pressure-to-capacity over the lifetime of the pool (or since the last time the filter was reset) and will begin to reduce capacity once this mean falls below some predefined amount. The number of actors that will be stopped is determined by some factor of the difference between the current capacity and pressure. The idea behind this filter is to reduce the likelihood of "thrashing" (removing then immediately creating...) pool actors by delaying the backoff until some quiescent stage of the pool. Put another way, use this subfilter to allow quick rampup to handle load and more subtle backoff as that decreases over time. @@ -219,18 +225,16 @@ Examples with SmallestMailboxSelector with BasicNoBackoffFilter { - def factory = actorOf(new Actor {def receive = {case n:Int => - Thread.sleep(n) - counter.incrementAndGet - latch.countDown()}}) - + def receive = _route def lowerBound = 2 def upperBound = 4 def rampupRate = 0.1 def partialFill = true def selectionCount = 1 - def instance = factory - def receive = _route + def instance = actorOf(new Actor {def receive = {case n:Int => + Thread.sleep(n) + counter.incrementAndGet + latch.countDown()}}) } .. code-block:: scala @@ -243,11 +247,7 @@ Examples with RunningMeanBackoff with BasicRampup { - - def factory = actorOf(new Actor {def receive = {case n:Int => - Thread.sleep(n) - latch.countDown()}}) - + def receive = _route def lowerBound = 1 def upperBound = 5 def pressureThreshold = 1 @@ -256,8 +256,9 @@ Examples def rampupRate = 0.1 def backoffRate = 0.50 def backoffThreshold = 0.50 - def instance = factory - def receive = _route + def instance = actorOf(new Actor {def receive = {case n:Int => + Thread.sleep(n) + latch.countDown()}}) } Taken from the unit test `spec `_. diff --git a/akka-docs/pending/security.rst b/akka-docs/scala/security.rst similarity index 82% rename from akka-docs/pending/security.rst rename to akka-docs/scala/security.rst index cae23fbdd5..9d186b5ea2 100644 --- a/akka-docs/pending/security.rst +++ b/akka-docs/scala/security.rst @@ -1,11 +1,16 @@ -Security -======== +HTTP Security +============= + +.. sidebar:: Contents + + .. contents:: :local: Module stability: **IN PROGRESS** Akka supports security for access to RESTful Actors through `HTTP Authentication `_. The security is implemented as a jersey ResourceFilter which delegates the actual authentication to an authentication actor. Akka provides authentication via the following authentication schemes: + * `Basic Authentication `_ * `Digest Authentication `_ * `Kerberos SPNEGO Authentication `_ @@ -13,16 +18,14 @@ Akka provides authentication via the following authentication schemes: The authentication is performed by implementations of akka.security.AuthenticationActor. Akka provides a trait for each authentication scheme: + * BasicAuthenticationActor * DigestAuthenticationActor * SpnegoAuthenticationActor -With Akka’s excellent support for distributed databases, it’s a one-liner to do a distributed authentication scheme. - -^ Setup -===== +----- To secure your RESTful actors you need to perform the following steps: @@ -54,6 +57,7 @@ To secure your RESTful actors you need to perform the following steps: 3. Start your authentication actor in your 'Boot' class. The security package consists of the following parts: 4. Secure your RESTful actors using class or resource level annotations: + * @DenyAll * @RolesAllowed(listOfRoles) * @PermitAll @@ -65,6 +69,7 @@ The akka-samples-security module contains a small sample application with sample You can start the sample app using the jetty plugin: mvn jetty:run. The RESTful actor can then be accessed using your browser of choice under: + * permit access only to users having the “chef” role: ``_ * public access: ``_ @@ -72,7 +77,6 @@ You can access the secured resource using any user for basic authentication (whi Digest authentication can be directly enabled in the sample app. Kerberos/SPNEGO authentication is a bit more involved an is described below. -^ Kerberos/SPNEGO Authentication ------------------------------ @@ -83,11 +87,12 @@ In a kerberos enabled environment a user will need to sign on only once. Subsequ Most prominently the kerberos protocol is used to authenticate users in a windows network. When deploying web applications to a corporate intranet an important feature will be to support the single sign on (SSO), which comes to make the application kerberos aware. How does it work (at least for REST actors)? -# When accessing a secured resource the server will check the request for the *Authorization* header as with basic or digest authentication. -# If it is not set, the server will respond with a challenge to “Negotiate”. The negotiation is in fact the NEGO part of the `SPNEGO `_ specification) -# The browser will then try to acquire a so called *service ticket* from a ticket granting service, i.e. the kerberos server -# The browser will send the *service ticket* to the web application encoded in the header value of the *Authorization*header -# The web application must validate the ticket based on a shared secret between the web application and the kerberos server. As a result the web application will know the name of the user + +- When accessing a secured resource the server will check the request for the *Authorization* header as with basic or digest authentication. +- If it is not set, the server will respond with a challenge to "Negotiate". The negotiation is in fact the NEGO part of the `SPNEGO `_ specification +- The browser will then try to acquire a so called *service ticket* from a ticket granting service, i.e. the kerberos server +- The browser will send the *service ticket* to the web application encoded in the header value of the *Authorization* header +- The web application must validate the ticket based on a shared secret between the web application and the kerberos server. As a result the web application will know the name of the user To activate the kerberos/SPNEGO authentication for your REST actor you need to enable the kerberos/SPNEGOauthentication actor in the akka.conf like this: @@ -103,8 +108,9 @@ To activate the kerberos/SPNEGO authentication for your REST actor you need to e } Furthermore you must provide the SpnegoAuthenticator with the following information. -# Service principal name: the name of your web application in the kerberos servers user database. This name is always has the form “HTTP/{server}@{realm}” -# Path to the keytab file: this is a kind of certificate for your web application to acquire tickets from the kerberos server + +- Service principal name: the name of your web application in the kerberos servers user database. This name is always has the form ``HTTP/{server}@{realm}`` +- Path to the keytab file: this is a kind of certificate for your web application to acquire tickets from the kerberos server .. code-block:: ruby @@ -122,7 +128,6 @@ Furthermore you must provide the SpnegoAuthenticator with the following informat ... } -^ How to setup kerberos on localhost for Ubuntu --------------------------------------------- @@ -233,9 +238,9 @@ This seems correct. To remove the ticket cache simply type kdestroy. Verifying - Password: eckart@dilbert:~$ -This command will create a keytab file for the service principal named “http.keytab” in the current directory. You can specify other encryption methods than ‘aes256-cts-hmac-sha1-96’, but this is the e default encryption method for the heimdal client, so there is no additional configuration needed. You can specify other encryption types in the krb5.conf. +This command will create a keytab file for the service principal named ``http.keytab`` in the current directory. You can specify other encryption methods than ‘aes256-cts-hmac-sha1-96’, but this is the e default encryption method for the heimdal client, so there is no additional configuration needed. You can specify other encryption types in the krb5.conf. -Note that you might need to install the unlimited strength policy files for java from here:``_ to use the aes256 encryption from your application. +Note that you might need to install the unlimited strength policy files for java from here: ``_ to use the aes256 encryption from your application. Again we can test if the keytab generation worked with the kinit command: @@ -249,13 +254,13 @@ Again we can test if the keytab generation worked with the kinit command: Issued Expires Principal Oct 24 21:59:20 Oct 25 06:59:20 krbtgt/EXAMPLE.COM@EXAMPLE.COM -Now point the configuration of the key in 'akka.conf' to the correct location and set the correct service principal name. The web application should now startup and produce at least a 401 response with a header “WWW-Authenticate” = “Negotiate”. The last step is to configure the browser. +Now point the configuration of the key in 'akka.conf' to the correct location and set the correct service principal name. The web application should now startup and produce at least a 401 response with a header ``WWW-Authenticate`` = "Negotiate". The last step is to configure the browser. 6. Set up Firefox to use Kerberos/SPNEGO -This is done by typing 'about:config'. Filter the config entries for “network.neg” and set the config entries “network.negotiate-auth.delegation-uris” and “network.negotiate-auth.trusted-uris” to “localhost”. +This is done by typing ``about:config``. Filter the config entries for ``network.neg`` and set the config entries ``network.negotiate-auth.delegation-uris`` and ``network.negotiate-auth.trusted-uris`` to ``localhost``. and now … 7. Access the RESTful Actor. 8. Have fun -… but acquire an initial ticket for the user principal first: kinit zaphod +… but acquire an initial ticket for the user principal first: ``kinit zaphod`` diff --git a/akka-docs/pending/serialization-scala.rst b/akka-docs/scala/serialization.rst similarity index 83% rename from akka-docs/pending/serialization-scala.rst rename to akka-docs/scala/serialization.rst index 166a3291fe..fc4f9b2af2 100644 --- a/akka-docs/pending/serialization-scala.rst +++ b/akka-docs/scala/serialization.rst @@ -1,10 +1,16 @@ +.. _serialization-scala: + Serialization (Scala) ===================== +.. sidebar:: Contents + + .. contents:: :local: + Module stability: **SOLID** Serialization of ActorRef -========================= +------------------------- An Actor can be serialized in two different ways: @@ -13,7 +19,7 @@ An Actor can be serialized in two different ways: Both of these can be sent as messages over the network and/or store them to disk, in a persistent storage backend etc. -Actor serialization in Akka is implemented through a type class 'Format[T <: Actor]' which publishes the 'fromBinary' and 'toBinary' methods for serialization. Here's the complete definition of the type class: +Actor serialization in Akka is implemented through a type class ``Format[T <: Actor]`` which publishes the ``fromBinary`` and ``toBinary`` methods for serialization. Here's the complete definition of the type class: .. code-block:: scala @@ -31,15 +37,14 @@ Actor serialization in Akka is implemented through a type class 'Format[T <: Act // client needs to implement Format[] for the respective actor trait Format[T <: Actor] extends FromBinary[T] with ToBinary[T] -**Deep serialization of an Actor and ActorRef** ------------------------------------------------ +Deep serialization of an Actor and ActorRef +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ -You can serialize the whole actor deeply, e.g. both the 'ActorRef' and then instance of its 'Actor'. This can be useful if you want to move an actor from one node to another, or if you want to store away an actor, with its state, into a database. +You can serialize the whole actor deeply, e.g. both the ``ActorRef`` and then instance of its ``Actor``. This can be useful if you want to move an actor from one node to another, or if you want to store away an actor, with its state, into a database. Here is an example of how to serialize an Actor. Step 1: Define the actor -^^^^^^^^^^^^^^^^^^^^^^^^ .. code-block:: scala @@ -53,11 +58,15 @@ Step 1: Define the actor } } -Step 2: Implement the type class for the actor -^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ +Step 2: Implement the type class for the actor. ProtobufProtocol.Counter is something you need to define yourself, as +explained in the Protobuf section. .. code-block:: scala + import akka.serialization.{Serializer, Format} + import akka.actor.Actor + import akka.actor.Actor._ + object BinaryFormatMyActor { implicit object MyActorFormat extends Format[MyActor] { def fromBinary(bytes: Array[Byte], act: MyActor) = { @@ -66,13 +75,11 @@ Step 2: Implement the type class for the actor act } def toBinary(ac: MyActor) = - ProtobufProtocol.Counter.newBuilder.setCount(ac.count).build.toByteArray - } + ProtobufProtocol.Counter.newBuilder.setCount(ac.count).build.toByteArray } } Step 3: Import the type class module definition and serialize / de-serialize -^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ .. code-block:: scala @@ -90,7 +97,8 @@ Step 3: Import the type class module definition and serialize / de-serialize (actor2 !! "hello").getOrElse("_") should equal("world 3") } -**Helper Type Class for Stateless Actors** +Helper Type Class for Stateless Actors +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ If your actor is stateless, then you can use the helper trait that Akka provides to serialize / de-serialize. Here's the definition: @@ -138,9 +146,10 @@ and use it for serialization: (actor2 !! "hello").getOrElse("_") should equal("world") } -**Helper Type Class for actors with external serializer** +Helper Type Class for actors with external serializer +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ -Use the trait 'SerializerBasedActorFormat' for specifying serializers. +Use the trait ``SerializerBasedActorFormat`` for specifying serializers. .. code-block:: scala @@ -154,7 +163,7 @@ For a Java serializable actor: .. code-block:: scala - @serializable class MyJavaSerializableActor extends Actor { + class MyJavaSerializableActor extends Actor with scala.Serializable { var count = 0 def receive = { @@ -168,6 +177,8 @@ Create a module for the type class .. .. code-block:: scala + import akka.serialization.{SerializerBasedActorFormat, Serializer} + object BinaryFormatMyJavaSerializableActor { implicit object MyJavaSerializableActorFormat extends SerializerBasedActorFormat[MyJavaSerializableActor] { val serializer = Serializers.Java @@ -179,6 +190,7 @@ and serialize / de-serialize .. .. code-block:: scala it("should be able to serialize and de-serialize a stateful actor with a given serializer") { + import akka.actor.Actor._ import akka.serialization.ActorSerialization._ import BinaryFormatMyJavaSerializableActor._ @@ -192,51 +204,53 @@ and serialize / de-serialize .. (actor2 !! "hello").getOrElse("_") should equal("world 3") } -**Serialization of a RemoteActorRef** -------------------------------------- +Serialization of a RemoteActorRef +--------------------------------- -You can serialize an 'ActorRef' to an immutable, network-aware Actor reference that can be freely shared across the network, a reference that "remembers" and stay mapped to its original Actor instance and host node, and will always work as expected. +You can serialize an ``ActorRef`` to an immutable, network-aware Actor reference that can be freely shared across the network, a reference that "remembers" and stay mapped to its original Actor instance and host node, and will always work as expected. -The 'RemoteActorRef' serialization is based upon Protobuf (Google Protocol Buffers) and you don't need to do anything to use it, it works on any 'ActorRef' (as long as the actor has **not** implemented one of the 'SerializableActor' traits, since then deep serialization will happen). +The ``RemoteActorRef`` serialization is based upon Protobuf (Google Protocol Buffers) and you don't need to do anything to use it, it works on any ``ActorRef``. -Currently Akka will **not** autodetect an 'ActorRef' as part of your message and serialize it for you automatically, so you have to do that manually or as part of your custom serialization mechanisms. +Currently Akka will **not** autodetect an ``ActorRef`` as part of your message and serialize it for you automatically, so you have to do that manually or as part of your custom serialization mechanisms. Here is an example of how to serialize an Actor. .. code-block:: scala + import akka.serialization.RemoteActorSerialization._ + val actor1 = actorOf[MyActor] - val bytes = toBinary(actor1) + val bytes = toRemoteActorRefProtocol(actor1).toByteArray -To deserialize the 'ActorRef' to a 'RemoteActorRef' you need to use the 'fromBinaryToRemoteActorRef(bytes: Array[Byte])' method on the 'ActorRef' companion object: +To deserialize the ``ActorRef`` to a ``RemoteActorRef`` you need to use the ``fromBinaryToRemoteActorRef(bytes: Array[Byte])`` method on the ``ActorRef`` companion object: .. code-block:: scala - import RemoteActorSerialization._ + import akka.serialization.RemoteActorSerialization._ val actor2 = fromBinaryToRemoteActorRef(bytes) -You can also pass in a class loader to load the 'ActorRef' class and dependencies from: +You can also pass in a class loader to load the ``ActorRef`` class and dependencies from: .. code-block:: scala - import RemoteActorSerialization._ + import akka.serialization.RemoteActorSerialization._ val actor2 = fromBinaryToRemoteActorRef(bytes, classLoader) Deep serialization of a TypedActor ---------------------------------- -Serialization of typed actors works almost the same way as untyped actors. You can serialize the whole actor deeply, e.g. both the 'proxied ActorRef' and the instance of its 'TypedActor'. +Serialization of typed actors works almost the same way as untyped actors. You can serialize the whole actor deeply, e.g. both the 'proxied ActorRef' and the instance of its ``TypedActor``. Here is the example from above implemented as a TypedActor. -^ Step 1: Define the actor -^^^^^^^^^^^^^^^^^^^^^^^^ .. code-block:: scala + import akka.actor.TypedActor + trait MyTypedActor { def requestReply(s: String) : String def oneWay() : Unit @@ -249,13 +263,18 @@ Step 1: Define the actor count = count + 1 "world " + count } + + override def oneWay() { + count = count + 1 + } } Step 2: Implement the type class for the actor -^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ .. code-block:: scala + import akka.serialization.{Serializer, Format} + class MyTypedActorFormat extends Format[MyTypedActorImpl] { def fromBinary(bytes: Array[Byte], act: MyTypedActorImpl) = { val p = Serializers.Protobuf.fromBinary(bytes, Some(classOf[ProtobufProtocol.Counter])).asInstanceOf[ProtobufProtocol.Counter] @@ -266,10 +285,11 @@ Step 2: Implement the type class for the actor } Step 3: Import the type class module definition and serialize / de-serialize -^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ .. code-block:: scala + import akka.serialization.TypedActorSerialization._ + val typedActor1 = TypedActor.newInstance(classOf[MyTypedActor], classOf[MyTypedActorImpl], 1000) val f = new MyTypedActorFormat @@ -278,23 +298,23 @@ Step 3: Import the type class module definition and serialize / de-serialize val typedActor2: MyTypedActor = fromBinaryJ(bytes, f) //type hint needed typedActor2.requestReply("hello") -- + Serialization of a remote typed ActorRef ---------------------------------------- -To deserialize the TypedActor to a 'RemoteTypedActorRef' (an aspectwerkz proxy to a RemoteActorRef) you need to use the 'fromBinaryToRemoteTypedActorRef(bytes: Array[Byte])' method on 'RemoteTypedActorSerialization' object: +To deserialize the TypedActor to a ``RemoteTypedActorRef`` (an aspectwerkz proxy to a RemoteActorRef) you need to use the ``fromBinaryToRemoteTypedActorRef(bytes: Array[Byte])`` method on ``RemoteTypedActorSerialization`` object: .. code-block:: scala - import RemoteTypedActorSerialization._ + import akka.serialization.RemoteTypedActorSerialization._ val typedActor = fromBinaryToRemoteTypedActorRef(bytes) // you can also pass in a class loader val typedActor2 = fromBinaryToRemoteTypedActorRef(bytes, classLoader) Compression -=========== +----------- Akka has a helper class for doing compression of binary data. This can be useful for example when storing data in one of the backing storages. It currently supports LZF which is a very fast compression algorithm suited for runtime dynamic compression. @@ -309,24 +329,37 @@ Here is an example of how it can be used: val uncompressBytes = Compression.LZF.uncompress(compressBytes) Using the Serializable trait and Serializer class for custom serialization -========================================================================== +-------------------------------------------------------------------------- -If you are sending messages to a remote Actor and these messages implement one of the predefined interfaces/traits in the 'akka.serialization.Serializable.*' object, then Akka will transparently detect which serialization format it should use as wire protocol and will automatically serialize and deserialize the message according to this protocol. +If you are sending messages to a remote Actor and these messages implement one of the predefined interfaces/traits in the ``akka.serialization.Serializable.*`` object, then Akka will transparently detect which serialization format it should use as wire protocol and will automatically serialize and deserialize the message according to this protocol. Each serialization interface/trait in +<<<<<<< HEAD:akka-docs/pending/serialization-scala.rst * akka.serialization.Serializable.* > has a matching serializer in * akka.serialization.Serializers.* +======= + +- akka.serialization.Serializable.* + +has a matching serializer in + +- akka.serialization.Serializer.* +>>>>>>> 62427f518c217e685c14577c8e5749d9b07c1f82:akka-docs/scala/serialization.rst Note however that if you are using one of the Serializable interfaces then you don’t have to do anything else in regard to sending remote messages. The ones currently supported are (besides the default which is regular Java serialization): -* ScalaJSON (Scala only) -* JavaJSON (Java but some Scala structures) -* SBinary (Scala only) -* Protobuf (Scala and Java) +<<<<<<< HEAD:akka-docs/pending/serialization-scala.rst Apart from the above, Akka also supports Scala object serialization through `SJSON `_ that implements APIs similar to 'akka.serialization.Serializers.*'. See the section on SJSON below for details. +======= +- ScalaJSON (Scala only) +- JavaJSON (Java but some Scala structures) +- Protobuf (Scala and Java) + +Apart from the above, Akka also supports Scala object serialization through `SJSON `_ that implements APIs similar to ``akka.serialization.Serializer.*``. See the section on SJSON below for details. +>>>>>>> 62427f518c217e685c14577c8e5749d9b07c1f82:akka-docs/scala/serialization.rst Protobuf -------- @@ -372,15 +405,16 @@ The remote Actor can then receive the Protobuf message typed as-is: JSON: Scala ----------- -Use the akka.serialization.Serialization.ScalaJSON base class with its toJSON method. Akka’s Scala JSON is based upon the SJSON library. +Use the ``akka.serialization.Serializable.ScalaJSON`` base class with its toJSON method. Akka’s Scala JSON is based upon the SJSON library. For your POJOs to be able to serialize themselves you have to extend the ScalaJSON[] trait as follows. JSON serialization is based on a type class protocol which you need to define for your own abstraction. The instance of the type class is defined as an implicit object which is used for serialization and de-serialization. You also need to implement the methods in terms of the APIs which sjson publishes. .. code-block:: scala - import akka.serialization.Serializer + import akka.serialization._ import akka.serialization.Serializable.ScalaJSON - import scala.reflect.BeanInfo + import akka.serialization.JsonSerialization._ + import akka.serialization.DefaultProtocol._ case class MyMessage(val id: String, val value: Tuple2[String, Int]) extends ScalaJSON[MyMessage] { // type class instance @@ -422,7 +456,7 @@ Here are the steps that you need to follow: .. code-block:: scala - import DefaultProtocol._ + import akka.serialization.DefaultProtocol._ implicit val MyMessageFormat: sjson.json.Format[MyMessage] = asProduct2("id", "value")(MyMessage)(MyMessage.unapply(_).get) @@ -430,7 +464,12 @@ Here are the steps that you need to follow: .. code-block:: scala +<<<<<<< HEAD:akka-docs/pending/serialization-scala.rst import akka.serialization.Serializers.ScalaJSON +======= + import akka.serialization.Serializer.ScalaJSON + import akka.serialization.JsonSerialization._ +>>>>>>> 62427f518c217e685c14577c8e5749d9b07c1f82:akka-docs/scala/serialization.rst val o = MyMessage("dg", ("akka", 100)) fromjson[MyMessage](tojson(o)) should equal(o) @@ -443,13 +482,14 @@ You can also use the Serializer abstraction to serialize using the reflection ba .. code-block:: scala - import akka.serialization.Serializer import scala.reflect.BeanInfo + import akka.serialization.Serializer @BeanInfo case class Foo(name: String) { def this() = this(null) // default constructor is necessary for deserialization } +<<<<<<< HEAD:akka-docs/pending/serialization-scala.rst val foo = new Foo("bar") val json = Serializers.ScalaJSON.out(foo) @@ -458,6 +498,17 @@ You can also use the Serializer abstraction to serialize using the reflection ba val fooCopy2 = Serializers.ScalaJSON.in(new String(json)) // can also take a string as input val fooCopy3 = Serializers.ScalaJSON.in[Foo](json).asInstanceOf[Foo] +======= + val foo = Foo("bar") + + val json = Serializer.ScalaJSON.toBinary(foo) + + val fooCopy = Serializer.ScalaJSON.fromBinary(json) // returns a JsObject as an AnyRef + + val fooCopy2 = Serializer.ScalaJSON.fromJSON(new String(json)) // can also take a string as input + + val fooCopy3 = Serializer.ScalaJSON.fromBinary[Foo](json).asInstanceOf[Foo] +>>>>>>> 62427f518c217e685c14577c8e5749d9b07c1f82:akka-docs/scala/serialization.rst Classes without a @BeanInfo annotation cannot be serialized as JSON. So if you see something like that: @@ -475,14 +526,14 @@ So if you see something like that: it means, that you haven't got a @BeanInfo annotation on your class. -You may also see this exception when trying to serialize a case class with out an attribute like this: +You may also see this exception when trying to serialize a case class without any attributes, like this: .. code-block:: scala @BeanInfo case class Empty() // cannot be serialized - SJSON: Scala -------------- +SJSON: Scala +------------ SJSON supports serialization of Scala objects into JSON. It implements support for built in Scala structures like List, Map or String as well as custom objects. SJSON is available as an Apache 2 licensed project on Github `here `_. @@ -496,6 +547,8 @@ where Address is a custom class defined by the user. Using SJSON, I can store it .. code-block:: scala + val serializer = sjson.json.Serializer.SJSON + addr should equal( serializer.in[Address](serializer.out(addr))) @@ -517,6 +570,8 @@ What you get back from is a JsValue, an abstraction of the JSON object model. Fo .. code-block:: scala + val serializer = sjson.json.Serializer.SJSON + val a = serializer.in[AnyRef](serializer.out(addr)) // use extractors @@ -535,7 +590,7 @@ What you get back from is a JsValue, an abstraction of the JSON object model. Fo Serialization of Embedded Objects ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ - SJSON supports serialization of Scala objects that have other embedded objects. Suppose you have the following Scala classes .. Here Contact has an embedded Address Map .. +SJSON supports serialization of Scala objects that have other embedded objects. Suppose you have the following Scala classes .. Here Contact has an embedded Address Map .. .. code-block:: scala @@ -564,6 +619,8 @@ With SJSON, I can do the following: val c = Contact("Bob", Map("residence" -> a1, "office" -> a2, "club" -> a3)) val co = serializer.out(c) + val serializer = sjson.json.Serializer.SJSON + // with class specified c should equal(serializer.in[Contact](co)) @@ -593,7 +650,7 @@ With SJSON, I can do the following: "Market Street" should equal( (r ># { ('addresses ? obj) andThen ('residence ? obj) andThen ('street ? str) })) -^ + Changing property names during serialization ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ @@ -619,7 +676,7 @@ When this will be serialized out, the property name will be changed. JsString("ISBN") -> JsString("012-456372") ) -^ + Serialization with ignore properties ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ @@ -646,6 +703,8 @@ The annotation @JSONProperty can be used to selectively ignore fields. When I se .. code-block:: scala + val serializer = sjson.json.Serializer.SJSON + it("should ignore issn field") { val j = Journal(100, "IEEE Computer", "Alex Payne", "012-456372") serializer.in[Journal](serializer.out(j)).asInstanceOf[Journal].issn should equal(null) @@ -653,7 +712,7 @@ The annotation @JSONProperty can be used to selectively ignore fields. When I se Similarly, we can ignore properties of an object **only** if they are null and not ignore otherwise. Just specify the annotation @JSONProperty as @JSONProperty {val ignoreIfNull = true}. -^ + Serialization with Type Hints for Generic Data Members ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ @@ -676,6 +735,8 @@ SJSON will pick up during serialization. Now we can say: .. code-block:: scala + val serializer = sjson.json.Serializer.SJSON + val c = Contact("Bob", Map("residence" -> a1, "office" -> a2, "club" -> a3)) val co = serializer.out(c) @@ -704,6 +765,8 @@ Serialization works ok with optional members annotated as above. .. code-block:: scala + val serializer = sjson.json.Serializer.SJSON + describe("Bean with optional bean member serialization") { it("should serialize with Option defined") { val c = new ContactWithOptionalAddr("Debasish Ghosh", @@ -751,13 +814,15 @@ and the serialization code like the following: val res = sjson.json.Serializers.SJSON.in[D](json) val res1: D = res.asInstanceOf[D] println(res1) - } + } q } Note that the type hint on class D says A, but the actual instances that have been put into the object before serialization is one of the derived classes (B). During de-serialization, we have no idea of what can be inside D. The serializer.in API will fail since all hint it has is for A, which is abstract. In such cases, we need to handle the de-serialization by using extractors over the underlying data structure that we use for storing JSON objects, which is JsValue. Here's an example: .. code-block:: scala + val serializer = sjson.json.Serializer.SJSON + val test1 = new D(List(B("hello1"))) val json = serializer.out(test1) @@ -892,14 +957,18 @@ and the serialization in action in the REPL: There are other nifty ways to implement case class serialization using sjson. For more details, have a look at the `wiki `_ for sjson. -**JSON: Java** +JSON: Java +---------- -Use the akka.serialization.Serialization.JavaJSON base class with its toJSONmethod. Akka’s Java JSON is based upon the Jackson library. +Use the ``akka.serialization.Serializable.JavaJSON`` base class with its toJSONmethod. Akka’s Java JSON is based upon the Jackson library. -For your POJOs to be able to serialize themselves you have to extend the JavaJSON trait. +For your POJOs to be able to serialize themselves you have to extend the JavaJSON base class. .. code-block:: java + import akka.serialization.Serializable.JavaJSON; + import akka.serialization.SerializerFactory; + class MyMessage extends JavaJSON { private String name = null; public MyMessage(String name) { @@ -924,6 +993,7 @@ Use the akka.serialization.SerializerFactory.getJavaJSON to do generic JSON seri String json = factory.getJavaJSON().out(foo); Foo fooCopy = factory.getJavaJSON().in(json, Foo.class); +<<<<<<< HEAD:akka-docs/pending/serialization-scala.rst - SBinary: Scala @@ -970,9 +1040,7 @@ case class User(val usernamePassword: Tuple2[String, String], val email: String, write[Int](out, value.age) } } +======= +>>>>>>> 62427f518c217e685c14577c8e5749d9b07c1f82:akka-docs/scala/serialization.rst - def fromBytes(bytes: Array[Byte]) = fromByteArray[User](bytes) - def toBytes: Array[Byte] = toByteArray(this) -} -``_ diff --git a/akka-docs/pending/stm-scala.rst b/akka-docs/scala/stm.rst similarity index 73% rename from akka-docs/pending/stm-scala.rst rename to akka-docs/scala/stm.rst index 0e1249fc48..b11cf828e1 100644 --- a/akka-docs/pending/stm-scala.rst +++ b/akka-docs/scala/stm.rst @@ -1,10 +1,16 @@ +.. _stm-scala: + Software Transactional Memory (Scala) ===================================== +.. sidebar:: Contents + + .. contents:: :local: + Module stability: **SOLID** Overview of STM -=============== +--------------- An `STM `_ turns the Java heap into a transactional data set with begin/commit/rollback semantics. Very much like a regular database. It implements the first three letters in ACID; ACI: * Atomic @@ -12,18 +18,19 @@ An `STM `_ turns the * Isolated Generally, the STM is not needed very often when working with Akka. Some use-cases (that we can think of) are: -# When you really need composable message flows across many actors updating their **internal local** state but need them to do that atomically in one big transaction. Might not often, but when you do need this then you are screwed without it. -# When you want to share a datastructure across actors. -# When you need to use the persistence modules. + +- When you really need composable message flows across many actors updating their **internal local** state but need them to do that atomically in one big transaction. Might not be often, but when you do need this then you are screwed without it. +- When you want to share a datastructure across actors. +- When you need to use the persistence modules. Akka’s STM implements the concept in `Clojure’s `_ STM view on state in general. Please take the time to read `this excellent document `_ and view `this presentation `_ by Rich Hickey (the genius behind Clojure), since it forms the basis of Akka’s view on STM and state in general. The STM is based on Transactional References (referred to as Refs). Refs are memory cells, holding an (arbitrary) immutable value, that implement CAS (Compare-And-Swap) semantics and are managed and enforced by the STM for coordinated changes across many Refs. They are implemented using the excellent `Multiverse STM `_. -Working with immutable collections can sometimes give bad performance due to extensive copying. Scala provides so-called persistent datastructures which makes working with immutable collections fast. They are immutable but with constant time access and modification. The use of structural sharing and an insert or update does not ruin the old structure, hence “persistent”. Makes working with immutable composite types fast. The persistent datastructures currently consist of a Map and Vector. +Working with immutable collections can sometimes give bad performance due to extensive copying. Scala provides so-called persistent datastructures which makes working with immutable collections fast. They are immutable but with constant time access and modification. They use structural sharing and an insert or update does not ruin the old structure, hence “persistent”. Makes working with immutable composite types fast. The persistent datastructures currently consist of a Map and Vector. Simple example -============== +-------------- Here is a simple example of an incremental counter using STM. This shows creating a ``Ref``, a transactional reference, and then modifying it within a transaction, which is delimited by ``atomic``. @@ -43,15 +50,14 @@ Here is a simple example of an incremental counter using STM. This shows creatin counter // -> 2 ----- Ref -=== +--- Refs (transactional references) are mutable references to values and through the STM allow the safe sharing of mutable data. Refs separate identity from value. To ensure safety the value stored in a Ref should be immutable (they can of course contain refs themselves). The value referenced by a Ref can only be accessed or swapped within a transaction. If a transaction is not available, the call will be executed in its own transaction (the call will be atomic). This is a different approach than the Clojure Refs, where a missing transaction results in an error. Creating a Ref --------------- +^^^^^^^^^^^^^^ You can create a Ref with or without an initial value. @@ -66,7 +72,7 @@ You can create a Ref with or without an initial value. val ref = Ref[Int] Accessing the value of a Ref ----------------------------- +^^^^^^^^^^^^^^^^^^^^^^^^^^^^ Use ``get`` to access the value of a Ref. Note that if no initial value has been given then the value is initially ``null``. @@ -96,7 +102,7 @@ If there is a chance that the value of a Ref is null then you can use ``opt``, w } Changing the value of a Ref ---------------------------- +^^^^^^^^^^^^^^^^^^^^^^^^^^^ To set a new value for a Ref you can use ``set`` (or equivalently ``swap``), which sets the new value and returns the old value. @@ -137,7 +143,7 @@ You can also use ``alter`` which accepts a function that takes the old value and // -> 6 Refs in for-comprehensions --------------------------- +^^^^^^^^^^^^^^^^^^^^^^^^^^ Ref is monadic and can be used in for-comprehensions. @@ -173,10 +179,9 @@ Ref is monadic and can be used in for-comprehensions. } // -> Ref[Int] ----- Transactions -============ +------------ A transaction is delimited using ``atomic``. @@ -186,13 +191,27 @@ A transaction is delimited using ``atomic``. // ... } -Coordinated transactions and Transactors ----------------------------------------- +All changes made to transactional objects are isolated from other changes, all make it or non make it (so failure atomicity) and are consistent. With the AkkaSTM you automatically have the Oracle version of the SERIALIZED isolation level, lower isolation is not possible. To make it fully serialized, set the writeskew property that checks if a writeskew problem is allowed to happen. -If you need coordinated transactions across actors or threads then see `Transactors `_. +Retries +^^^^^^^ + +A transaction is automatically retried when it runs into some read or write conflict, until the operation completes, an exception (throwable) is thrown or when there are too many retries. When a read or writeconflict is encountered, the transaction uses a bounded exponential backoff to prevent cause more contention and give other transactions some room to complete. + +If you are using non transactional resources in an atomic block, there could be problems because a transaction can be retried. If you are using print statements or logging, it could be that they are called more than once. So you need to be prepared to deal with this. One of the possible solutions is to work with a deferred or compensating task that is executed after the transaction aborts or commits. + +Unexpected retries +^^^^^^^^^^^^^^^^^^ + +It can happen for the first few executions that you get a few failures of execution that lead to unexpected retries, even though there is not any read or writeconflict. The cause of this is that speculative transaction configuration/selection is used. There are transactions optimized for a single transactional object, for 1..n and for n to unlimited. So based on the execution of the transaction, the system learns; it begins with a cheap one and upgrades to more expensive ones. Once it has learned, it will reuse this knowledge. It can be activated/deactivated using the speculative property on the TransactionFactory. In most cases it is best use the default value (enabled) so you get more out of performance. + +Coordinated transactions and Transactors +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +If you need coordinated transactions across actors or threads then see :ref:`transactors-scala`. Configuring transactions ------------------------- +^^^^^^^^^^^^^^^^^^^^^^^^ It's possible to configure transactions. The ``atomic`` method can take an implicit or explicit ``TransactionFactory``, which can determine properties of the transaction. A default transaction factory is used if none is specified explicitly or there is no implicit ``TransactionFactory`` in scope. @@ -221,33 +240,36 @@ Configuring transactions with an **explicit** ``TransactionFactory``: } The following settings are possible on a TransactionFactory: -* familyName - Family name for transactions. Useful for debugging. -* readonly - Sets transaction as readonly. Readonly transactions are cheaper. -* maxRetries - The maximum number of times a transaction will retry. -* timeout - The maximum time a transaction will block for. -* trackReads - Whether all reads should be tracked. Needed for blocking operations. -* writeSkew - Whether writeskew is allowed. Disable with care. -* blockingAllowed - Whether explicit retries are allowed. -* interruptible - Whether a blocking transaction can be interrupted. -* speculative - Whether speculative configuration should be enabled. -* quickRelease - Whether locks should be released as quickly as possible (before whole commit). -* propagation - For controlling how nested transactions behave. -* traceLevel - Transaction trace level. + +- familyName - Family name for transactions. Useful for debugging. +- readonly - Sets transaction as readonly. Readonly transactions are cheaper. +- maxRetries - The maximum number of times a transaction will retry. +- timeout - The maximum time a transaction will block for. +- trackReads - Whether all reads should be tracked. Needed for blocking operations. +- writeSkew - Whether writeskew is allowed. Disable with care. +- blockingAllowed - Whether explicit retries are allowed. +- interruptible - Whether a blocking transaction can be interrupted. +- speculative - Whether speculative configuration should be enabled. +- quickRelease - Whether locks should be released as quickly as possible (before whole commit). +- propagation - For controlling how nested transactions behave. +- traceLevel - Transaction trace level. You can also specify the default values for some of these options in akka.conf. Here they are with their default values: :: stm { - max-retries = 1000 - timeout = 10 - write-skew = true + fair = on # Should global transactions be fair or non-fair (non fair yield better performance) + max-retries = 1000 + timeout = 5 # Default timeout for blocking transactions and transaction set (in unit defined by + # the time-unit property) + write-skew = true blocking-allowed = false - interruptible = false - speculative = true - quick-release = true - propagation = requires - trace-level = none + interruptible = false + speculative = true + quick-release = true + propagation = "requires" + trace-level = "none" } You can also determine at which level a transaction factory is shared or not shared, which affects the way in which the STM can optimise transactions. @@ -293,7 +315,7 @@ Here's a similar example with an individual transaction factory for each instanc } Transaction lifecycle listeners -------------------------------- +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ It's possible to have code that will only run on the successful commit of a transaction, or when a transaction aborts. You can do this by adding ``deferred`` or ``compensating`` blocks to a transaction. @@ -311,7 +333,7 @@ It's possible to have code that will only run on the successful commit of a tran } Blocking transactions ---------------------- +^^^^^^^^^^^^^^^^^^^^^ You can block in a transaction until a condition is met by using an explicit ``retry``. To use ``retry`` you also need to configure the transaction to allow explicit retries. @@ -322,23 +344,23 @@ Here is an example of using ``retry`` to block until an account has enough money import akka.stm._ import akka.actor._ import akka.util.duration._ - import akka.util.Logging + import akka.event.EventHandler type Account = Ref[Double] case class Transfer(from: Account, to: Account, amount: Double) - class Transferer extends Actor with Logging { + class Transferer extends Actor { implicit val txFactory = TransactionFactory(blockingAllowed = true, trackReads = true, timeout = 60 seconds) def receive = { case Transfer(from, to, amount) => atomic { if (from.get < amount) { - log.info("not enough money - retrying") + EventHandler.info(this, "not enough money - retrying") retry } - log.info("transferring") + EventHandler.info(this, "transferring") from alter (_ - amount) to alter (_ + amount) } @@ -365,7 +387,7 @@ Here is an example of using ``retry`` to block until an account has enough money transferer.stop() Alternative blocking transactions ---------------------------------- +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ You can also have two alternative blocking transactions, one of which can succeed first, with ``either-orElse``. @@ -374,11 +396,11 @@ You can also have two alternative blocking transactions, one of which can succee import akka.stm._ import akka.actor._ import akka.util.duration._ - import akka.util.Logging + import akka.event.EventHandler case class Branch(left: Ref[Int], right: Ref[Int], amount: Int) - class Brancher extends Actor with Logging { + class Brancher extends Actor { implicit val txFactory = TransactionFactory(blockingAllowed = true, trackReads = true, timeout = 60 seconds) def receive = { @@ -386,13 +408,13 @@ You can also have two alternative blocking transactions, one of which can succee atomic { either { if (left.get < amount) { - log.info("not enough on left - retrying") + EventHandler.info(this, "not enough on left - retrying") retry } log.info("going left") } orElse { if (right.get < amount) { - log.info("not enough on right - retrying") + EventHandler.info(this, "not enough on right - retrying") retry } log.info("going right") @@ -416,14 +438,14 @@ You can also have two alternative blocking transactions, one of which can succee brancher.stop() ----- Transactional datastructures -============================ +---------------------------- Akka provides two datastructures that are managed by the STM. -* TransactionalMap -* TransactionalVector + +- TransactionalMap +- TransactionalVector TransactionalMap and TransactionalVector look like regular mutable datastructures, they even implement the standard Scala 'Map' and 'RandomAccessSeq' interfaces, but they are implemented using persistent datastructures and managed references under the hood. Therefore they are safe to use in a concurrent environment. Underlying TransactionalMap is HashMap, an immutable Map but with near constant time access and modification operations. Similarly TransactionalVector uses a persistent Vector. See the Persistent Datastructures section below for more details. @@ -492,53 +514,22 @@ Here is the same example using TransactionalMap: } // -> User("bill") ----- Persistent datastructures -========================= +------------------------- Akka's STM should only be used with immutable data. This can be costly if you have large datastructures and are using a naive copy-on-write. In order to make working with immutable datastructures fast enough Scala provides what are called Persistent Datastructures. There are currently two different ones: -* HashMap (`scaladoc `_) -* Vector (`scaladoc `_) +* HashMap (`scaladoc `__) +* Vector (`scaladoc `__) They are immutable and each update creates a completely new version but they are using clever structural sharing in order to make them almost as fast, for both read and update, as regular mutable datastructures. This illustration is taken from Rich Hickey's presentation. Copyright Rich Hickey 2009. -``_ +.. image:: ../images/clojure-trees.png ----- - -JTA integration -=============== - -The STM has JTA (Java Transaction API) integration. This means that it will, if enabled, hook in to JTA and start a JTA transaction when the STM transaction is started. It will also rollback the STM transaction if the JTA transaction has failed and vice versa. This does not mean that the STM is made durable, if you need that you should use one of the `persistence modules `_. It simply means that the STM will participate and interact with and external JTA provider, for example send a message using JMS atomically within an STM transaction, or use Hibernate to persist STM managed data etc. - -Akka also has an API for using JTA explicitly. Read the `section on JTA `_ for details. - -You can enable JTA support in the 'stm' section in the config: - -:: - - stm { - jta-aware = off # 'on' means that if there JTA Transaction Manager available then the STM will - # begin (or join), commit or rollback the JTA transaction. Default is 'off'. - } - -You also have to configure which JTA provider to use etc in the 'jta' config section: - -:: - - jta { - provider = "from-jndi" # Options: "from-jndi" (means that Akka will try to detect a TransactionManager in the JNDI) - # "atomikos" (means that Akka will use the Atomikos based JTA impl in 'akka-jta', - # e.g. you need the akka-jta JARs on classpath). - timeout = 60 - } - ----- Ants simulation sample -====================== +---------------------- One fun and very enlightening visual demo of STM, actors and transactional references is the `Ant simulation sample `_. I encourage you to run it and read through the code since it's a good example of using actors with STM. diff --git a/akka-docs/scala/testing.rst b/akka-docs/scala/testing.rst index 9238cfd198..eee3a2b029 100644 --- a/akka-docs/scala/testing.rst +++ b/akka-docs/scala/testing.rst @@ -1,3 +1,5 @@ +.. _akka-testkit: + ##################### Testing Actor Systems ##################### @@ -40,6 +42,10 @@ encompass functional tests of complete actor networks. The important distinction lies in whether concurrency concerns are part of the test or not. The tools offered are described in detail in the following sections. +.. note:: + + Be sure to add the module :mod:`akka-testkit` to your dependencies. + Unit Testing with :class:`TestActorRef` ======================================= @@ -68,6 +74,8 @@ reference is done like this: .. code-block:: scala + import akka.testkit.TestActorRef + val actorRef = TestActorRef[MyActor] val actor = actorRef.underlyingActor @@ -96,8 +104,8 @@ into a :class:`TestActorRef`. .. code-block:: scala val actorRef = TestActorRef(new MyActor) - val result = actorRef !! msg - result must be (expected) + val result = actorRef !! Say42 // hypothetical message stimulating a '42' answer + result must be (42) As the :class:`TestActorRef` is a subclass of :class:`LocalActorRef` with a few special extras, also aspects like linking to a supervisor and restarting work @@ -169,6 +177,10 @@ common task easy: .. code-block:: scala + import akka.testkit.TestKit + import org.scalatest.WordSpec + import org.scalatest.matchers.MustMatchers + class MySpec extends WordSpec with MustMatchers with TestKit { "An Echo actor" must { @@ -238,6 +250,11 @@ first; it follows that this examination usually is the last statement in a } } +.. note:: + + All times are measured using ``System.nanoTime``, meaning that they describe + wall time, not CPU time. + Ray Roestenburg has written a great article on using the TestKit: ``_. His full example is also available :ref:`here `. @@ -252,6 +269,31 @@ runs everything which would normally be queued directly on the current thread, the full history of a message's processing chain is recorded on the call stack, so long as all intervening actors run on this dispatcher. +How to use it +------------- + +Just set the dispatcher as you normally would, either from within the actor + +.. code-block:: scala + + import akka.testkit.CallingThreadDispatcher + + class MyActor extends Actor { + self.dispatcher = CallingThreadDispatcher.global + ... + } + +or from the client code + +.. code-block:: scala + + val ref = Actor.actorOf[MyActor] + ref.dispatcher = CallingThreadDispatcher.global + ref.start() + +As the :class:`CallingThreadDispatcher` does not have any configurable state, +you may always use the (lazily) preallocated one as shown in the examples. + How it works ------------ diff --git a/akka-docs/scala/testkit-example.rst b/akka-docs/scala/testkit-example.rst index f53543e474..a0ec001902 100644 --- a/akka-docs/scala/testkit-example.rst +++ b/akka-docs/scala/testkit-example.rst @@ -4,7 +4,7 @@ TestKit Example ############### -Ray Roestenburg's example code from `his blog `_. +Ray Roestenburg's example code from `his blog `_ adapted to work with Akka 1.1. .. code-block:: scala @@ -14,7 +14,7 @@ Ray Roestenburg's example code from `his blog Transactors (Scala)** -============================================================= +.. _transactors-scala: + +Transactors (Scala) +=================== + +.. sidebar:: Contents + + .. contents:: :local: Module stability: **SOLID** Why Transactors? -================ +---------------- Actors are excellent for solving problems where you have many independent processes that can work in isolation and only interact with other Actors through message passing. This model fits many problems. But the actor model is unfortunately a terrible model for implementing truly shared state. E.g. when you need to have consensus and a stable view of state across many components. The classic example is the bank account where clients can deposit and withdraw, in which each operation needs to be atomic. For detailed discussion on the topic see `this JavaOne presentation `_. @@ -15,21 +21,21 @@ Akka's Transactors combine Actors and STM to provide the best of the Actor model If you need Durability then you should not use one of the in-memory data structures but one of the persistent ones. Generally, the STM is not needed very often when working with Akka. Some use-cases (that we can think of) are: -# When you really need composable message flows across many actors updating their **internal local** state but need them to do that atomically in one big transaction. Might not often, but when you do need this then you are screwed without it. -# When you want to share a datastructure across actors. -# When you need to use the persistence modules. + +- When you really need composable message flows across many actors updating their **internal local** state but need them to do that atomically in one big transaction. Might not often, but when you do need this then you are screwed without it. +- When you want to share a datastructure across actors. +- When you need to use the persistence modules. Actors and STM --------------- +^^^^^^^^^^^^^^ You can combine Actors and STM in several ways. An Actor may use STM internally so that particular changes are guaranteed to be atomic. Actors may also share transactional datastructures as the STM provides safe shared state across threads. It's also possible to coordinate transactions across Actors or threads so that either the transactions in a set all commit successfully or they all fail. This is the focus of Transactors and the explicit support for coordinated transactions in this section. ----- Coordinated transactions -======================== +------------------------ Akka provides an explicit mechanism for coordinating transactions across Actors. Under the hood it uses a ``CountDownCommitBarrier``, similar to a CountDownLatch. @@ -70,7 +76,7 @@ Here is an example of coordinating two simple counter Actors so that they both i counter1.stop() counter2.stop() -To start a new coordinated transaction set that you will also participate in, just create a ``Coordinated`` object: +To start a new coordinated transaction that you will also participate in, just create a ``Coordinated`` object: .. code-block:: scala @@ -90,7 +96,7 @@ To receive a coordinated message in an actor simply match it in a case statement case coordinated @ Coordinated(Message) => ... } -To include another actor in the same coordinated transaction set that you've created or received, use the apply method on that object. This will increment the number of parties involved by one and create a new ``Coordinated`` object to be sent. +To include another actor in the same coordinated transaction that you've created or received, use the apply method on that object. This will increment the number of parties involved by one and create a new ``Coordinated`` object to be sent. .. code-block:: scala @@ -106,10 +112,9 @@ To enter the coordinated transaction use the atomic method of the coordinated ob The coordinated transaction will wait for the other transactions before committing. If any of the coordinated transactions fail then they all fail. ----- Transactor -========== +---------- Transactors are actors that provide a general pattern for coordinating transactions, using the explicit coordination described above. @@ -125,7 +130,7 @@ Here's an example of a simple transactor that will join a coordinated transactio class Counter extends Transactor { val count = Ref(0) - def atomically = { + override def atomically = { case Increment => count alter (_ + 1) } } @@ -140,6 +145,7 @@ Example of coordinating an increment: import akka.transactor.Transactor import akka.stm.Ref + import akka.actor.ActorRef case object Increment @@ -150,7 +156,7 @@ Example of coordinating an increment: case Increment => include(friend) } - def atomically = { + override def atomically = { case Increment => count alter (_ + 1) } } @@ -176,10 +182,9 @@ To execute directly before or after the coordinated transaction, override the `` To completely bypass coordinated transactions override the ``normally`` method. Any message matched by ``normally`` will not be matched by the other methods, and will not be involved in coordinated transactions. In this method you can implement normal actor behavior, or use the normal STM atomic for local transactions. ----- Coordinating Typed Actors -========================= +------------------------- It's also possible to use coordinated transactions with typed actors. You can explicitly pass around ``Coordinated`` objects, or use built-in support with the ``@Coordinated`` annotation and the ``Coordination.coordinate`` method. @@ -188,7 +193,7 @@ To specify a method should use coordinated transactions add the ``@Coordinated`` .. code-block:: scala trait Counter { - @Coordinated def increment: Unit + @Coordinated def increment() def get: Int } @@ -197,8 +202,8 @@ To coordinate transactions use a ``coordinate`` block: .. code-block:: scala coordinate { - counter1.increment - counter2.increment + counter1.increment() + counter2.increment() } Here's an example of using ``@Coordinated`` with a TypedActor to coordinate increments. @@ -211,13 +216,13 @@ Here's an example of using ``@Coordinated`` with a TypedActor to coordinate incr import akka.transactor.Coordination._ trait Counter { - @Coordinated def increment: Unit + @Coordinated def increment() def get: Int } class CounterImpl extends TypedActor with Counter { val ref = Ref(0) - def increment = ref alter (_ + 1) + def increment() { ref alter (_ + 1) } def get = ref.get } @@ -227,8 +232,8 @@ Here's an example of using ``@Coordinated`` with a TypedActor to coordinate incr val counter2 = TypedActor.newInstance(classOf[Counter], classOf[CounterImpl]) coordinate { - counter1.increment - counter2.increment + counter1.increment() + counter2.increment() } TypedActor.stop(counter1) @@ -236,9 +241,10 @@ Here's an example of using ``@Coordinated`` with a TypedActor to coordinate incr The ``coordinate`` block will wait for the transactions to complete. If you do not want to wait then you can specify this explicitly: -``_ -coordinate(wait = false) { - counter1.increment - counter2.increment -} -``_ +.. code-block:: scala + + coordinate(wait = false) { + counter1.increment() + counter2.increment() + } + diff --git a/akka-docs/pending/tutorial-chat-server-scala.rst b/akka-docs/scala/tutorial-chat-server.rst similarity index 91% rename from akka-docs/pending/tutorial-chat-server-scala.rst rename to akka-docs/scala/tutorial-chat-server.rst index 830bf75c22..78a93157a8 100644 --- a/akka-docs/pending/tutorial-chat-server-scala.rst +++ b/akka-docs/scala/tutorial-chat-server.rst @@ -1,6 +1,10 @@ -Tutorial: write a scalable, fault-tolerant, persistent network chat server and client (Scala) +Tutorial: write a scalable, fault-tolerant, network chat server and client (Scala) ============================================================================================= +.. sidebar:: Contents + + .. contents:: :local: + Introduction ------------ @@ -32,7 +36,7 @@ Actors encapsulate state and behavior into a lightweight process/thread. In a se Creating Actors --------------- -Akka has both a `Scala API `_ and a `Java API `_. In this article we will only look at the Scala API since that is the most expressive one. The article assumes some basic Scala knowledge, but even if you don't know Scala I don't think it will not be too hard to follow along anyway. +Akka has both a Scala API (:ref:`actors-scala`) and a Java API (:ref:`untyped-actors-java`). In this article we will only look at the Scala API since that is the most expressive one. The article assumes some basic Scala knowledge, but even if you don't know Scala I don't think it will not be too hard to follow along anyway. Akka has adopted the same style of writing Actors as Erlang in which each Actor has an explicit message handler which does pattern matching to match on the incoming messages. @@ -44,6 +48,8 @@ Here is a little example before we dive into a more interesting one. .. code-block:: scala + import akka.actor.Actor + class MyActor extends Actor { def receive = { case "test" => println("received test") @@ -81,12 +87,53 @@ We will try to write a simple chat/IM system. It is client-server based and uses We will use many of the features of Akka along the way. In particular; Actors, fault-tolerance using Actor supervision, remote Actors, Software Transactional Memory (STM) and persistence. -But let's start by defining the messages that will flow in our system. +Creating an Akka SBT project +---------------------------- + +First we need to create an SBT project for our tutorial. You do that by stepping into the directory you want to create your project in and invoking the ``sbt`` command answering the questions for setting up your project:: + + $ sbt + Project does not exist, create new project? (y/N/s) y + Name: Chat + Organization: Hakkers Inc + Version [1.0]: + Scala version [2.9.0]: + sbt version [0.7.6.RC0]: + +Add the Akka SBT plugin definition to your SBT project by creating a ``Plugins.scala`` file in the ``project/plugins`` directory containing:: + + import sbt._ + + class Plugins(info: ProjectInfo) extends PluginDefinition(info) { + val akkaRepo = "Akka Repo" at "http://akka.io/repository" + val akkaPlugin = "se.scalablesolutions.akka" % "akka-sbt-plugin" % "1.1-M1" + } + +Create a project definition ``project/build/Project.scala`` file containing:: + + import sbt._ + + class ChatProject(info: ProjectInfo) extends DefaultProject(info) with AkkaProject { + val akkaRepo = "Akka Repo" at "http://akka.io/repository" + val akkaSTM = akkaModule("stm") + val akkaRemote = akkaModule("remote") + } + + +Make SBT download the dependencies it needs. That is done by invoking:: + + > reload + > update + +From the SBT project you can generate files for your IDE: + +- `SbtEclipsify `_ to generate Eclipse project. Detailed instructions are available in :ref:`getting-started-first-scala-eclipse`. +- `sbt-idea `_ to generate IntelliJ IDEA project. Creating messages ----------------- -It is very important that all messages that will be sent around in the system are immutable. The Actor model relies on the simple fact that no state is shared between Actors and the only way to guarantee that is to make sure we don't pass mutable state around as part of the messages. +Let's start by defining the messages that will flow in our system. It is very important that all messages that will be sent around in the system are immutable. The Actor model relies on the simple fact that no state is shared between Actors and the only way to guarantee that is to make sure we don't pass mutable state around as part of the messages. In Scala we have something called `case classes `_. These make excellent messages since they are both immutable and great to pattern match on. @@ -118,7 +165,8 @@ Sometimes however, there is a need for sequential logic, sending a message and w def login = chat ! Login(name) def logout = chat ! Logout(name) def post(message: String) = chat ! ChatMessage(name, name + ": " + message) - def chatLog = (chat !! GetChatLog(name)).as[ChatLog].getOrElse(throw new Exception("Couldn't get the chat log from ChatServer")) + def chatLog = (chat !! GetChatLog(name)).as[ChatLog] + .getOrElse(throw new Exception("Couldn't get the chat log from ChatServer")) } As you can see, we are using the 'Actor.remote.actorFor' to lookup the chat server on the remote node. From this call we will get a handle to the remote instance and can use it as it is local. @@ -221,7 +269,7 @@ I'll try to show you how we can make use Scala's mixins to decouple the Actor im protected def sessionManagement: Receive protected def shutdownSessions(): Unit - override def postStop = { + override def postStop() = { EventHandler.info(this, "Chat server is shutting down...") shutdownSessions self.unlink(storage) @@ -323,22 +371,8 @@ Akka currently provides three different transactional abstractions; 'Map', 'Vect What you get is transactional memory in which multiple Actors are allowed to read and write to the same memory concurrently and if there is a clash between two transactions then both of them are aborted and retried. Aborting a transaction means that the memory is rolled back to the state it were in when the transaction was started. -In database terms STM gives you 'ACI' semantics; 'Atomicity', 'Consistency' and 'Isolation'. The 'D' in 'ACID'; 'Durability', you can't get with an STM since it is in memory. This however is addressed by the persistence module in Akka. - -Persistence: Storing the chat log ---------------------------------- - -Akka modules provides the possibility of taking the transactional data structures we discussed above and making them persistent. It is an extension to the STM which guarantees that it has the same semantics. - -The `persistence module `_ has pluggable storage back-ends. - -They all implement persistent 'Map', 'Vector' and 'Ref'. Which can be created and retrieved by id through one of the storage modules. - -.. code-block:: scala - - val map = RedisStorage.newMap(id) - val vector = CassandraStorage.newVector(id) - val ref = MongoStorage.newRef(id) +In database terms STM gives you 'ACI' semantics; 'Atomicity', 'Consistency' and 'Isolation'. The 'D' in 'ACID'; 'Durability', you can't get with an STM since it is in memory. +It possible to implement durable persistence for the transactional data structures, but in this sample we keep them in memory. Chat storage: Backed with simple in-memory ------------------------------------------ @@ -422,7 +456,7 @@ We have now created the full functionality for the chat server, all nicely decou SessionManagement with ChatManagement with MemoryChatStorageFactory { - override def preStart = { + override def preStart() = { remote.start("localhost", 2552); remote.register("chat:service", self) //Register the actor with the specified service id } diff --git a/akka-docs/pending/typed-actors-scala.rst b/akka-docs/scala/typed-actors.rst similarity index 86% rename from akka-docs/pending/typed-actors-scala.rst rename to akka-docs/scala/typed-actors.rst index e9aa061672..d2963ddae7 100644 --- a/akka-docs/pending/typed-actors-scala.rst +++ b/akka-docs/scala/typed-actors.rst @@ -1,9 +1,13 @@ Typed Actors (Scala) ==================== +.. sidebar:: Contents + + .. contents:: :local: + Module stability: **SOLID** -The Typed Actors are implemented through `Typed Actors `_. It uses AOP through `AspectWerkz `_ to turn regular POJOs into asynchronous non-blocking Actors with semantics of the Actor Model. E.g. each message dispatch is turned into a message that is put on a queue to be processed by the Typed Actor sequentially one by one. +The Typed Actors are implemented through `Typed Actors `_. It uses AOP through `AspectWerkz `_ to turn regular POJOs into asynchronous non-blocking Actors with semantics of the Actor Model. Each method dispatch is turned into a message that is put on a queue to be processed by the Typed Actor sequentially one by one. If you are using the `Spring Framework `_ then take a look at Akka's `Spring integration `_. @@ -49,8 +53,8 @@ Then you can create an Typed Actor out of it by creating it through the 'TypedAc val service = TypedActor.newInstance(classOf[RegistrationService], classOf[RegistrationServiceImpl], 1000) // The last parameter defines the timeout for Future calls -**Creating Typed Actors with non-default constructor** -^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ +Creating Typed Actors with non-default constructor +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ To create a typed actor that takes constructor arguments use a variant of 'newInstance' or 'newRemoteInstance' that takes a call-by-name block in which you can create the Typed Actor in any way you like. @@ -66,6 +70,7 @@ Configuration factory class Using a configuration object: .. code-block:: scala + import akka.actor.TypedActorConfiguration import akka.util.Duration import akka.util.duration._ @@ -75,7 +80,7 @@ Using a configuration object: val service = TypedActor.newInstance(classOf[RegistrationService], classOf[RegistrationServiceImpl], config) -However, often you will not use these factory methods but declaratively define the Typed Actors as part of a supervisor hierarchy. More on that in the `Fault Tolerance `_ section. +However, often you will not use these factory methods but declaratively define the Typed Actors as part of a supervisor hierarchy. More on that in the :ref:`fault-tolerance-scala` section. Sending messages ---------------- @@ -132,7 +137,7 @@ Here is an example: Stopping Typed Actors --------------------- -Once Typed Actors have been created with one of the TypedActor.newInstance methods they need to be stopped with TypedActor.stop to free resources allocated by the created Typed Actor (this is not needed when the Typed Actor is `supervised `_). +Once Typed Actors have been created with one of the TypedActor.newInstance methods they need to be stopped with TypedActor.stop to free resources allocated by the created Typed Actor (this is not needed when the Typed Actor is supervised). .. code-block:: scala @@ -144,7 +149,7 @@ Once Typed Actors have been created with one of the TypedActor.newInstance metho // Free Typed Actor resources TypedActor.stop(service) -When the Typed Actor defines a `shutdown callback `_ method it will be invoked on TypedActor.stop. +When the Typed Actor defines a shutdown callback method (:ref:`fault-tolerance-scala`) it will be invoked on TypedActor.stop. How to use the TypedActorContext for runtime information access --------------------------------------------------------------- @@ -163,9 +168,21 @@ Here is an example how you can use it to in a 'void' (e.g. fire-forget) method t } } -If the sender, sender future etc. is not available, then these methods will return 'null' so you should have a way of dealing with scenario. +If the sender, sender future etc. is not available, then these methods will return 'null' so you should have a way of dealing with that scenario. Messages and immutability ------------------------- **IMPORTANT**: Messages can be any kind of object but have to be immutable (there is a workaround, see next section). Java or Scala can’t enforce immutability (yet) so this has to be by convention. Primitives like String, int, Long are always immutable. Apart from these you have to create your own immutable objects to send as messages. If you pass on a reference to an instance that is mutable then this instance can be modified concurrently by two different Typed Actors and the Actor model is broken leaving you with NO guarantees and most likely corrupt data. + +Akka can help you in this regard. It allows you to turn on an option for serializing all messages, e.g. all parameters to the Typed Actor effectively making a deep clone/copy of the parameters. This will make sending mutable messages completely safe. This option is turned on in the ‘$AKKA_HOME/config/akka.conf’ config file like this: + +.. code-block:: ruby + + akka { + actor { + serialize-messages = on # does a deep clone of messages to ensure immutability + } + } + +This will make a deep clone (using Java serialization) of all parameters. diff --git a/akka-http/src/main/scala/akka/http/Mist.scala b/akka-http/src/main/scala/akka/http/Mist.scala index 357ff3b9fb..1425a8ac81 100644 --- a/akka-http/src/main/scala/akka/http/Mist.scala +++ b/akka-http/src/main/scala/akka/http/Mist.scala @@ -268,7 +268,7 @@ class RootEndpoint extends Actor with Endpoint { // use the configurable dispatcher self.dispatcher = Endpoint.Dispatcher - override def preStart = + override def preStart() = _attachments = Tuple2((uri: String) => {uri eq Root}, (uri: String) => this.actor) :: _attachments def recv: Receive = { diff --git a/akka-http/src/main/scala/akka/security/Security.scala b/akka-http/src/main/scala/akka/security/Security.scala index 6c6577bcae..fe2033cf4f 100644 --- a/akka-http/src/main/scala/akka/security/Security.scala +++ b/akka-http/src/main/scala/akka/security/Security.scala @@ -184,7 +184,7 @@ trait AuthenticationActor[C <: Credentials] extends Actor { * Responsible for the execution flow of authentication * * Credentials are extracted and verified from the request, - * and a se3curity context is created for the ContainerRequest + * and a security context is created for the ContainerRequest * this should ensure good integration with current Jersey security */ protected val authenticate: Receive = { diff --git a/akka-http/src/test/scala/config/ConfigSpec.scala b/akka-http/src/test/scala/config/ConfigSpec.scala index 3adea2fc43..2b21f3cc34 100644 --- a/akka-http/src/test/scala/config/ConfigSpec.scala +++ b/akka-http/src/test/scala/config/ConfigSpec.scala @@ -19,7 +19,7 @@ class ConfigSpec extends WordSpec with MustMatchers { getString("akka.http.authenticator") must equal(Some("N/A")) getBool("akka.http.connection-close") must equal(Some(true)) getString("akka.http.expired-header-name") must equal(Some("Async-Timeout")) - getList("akka.http.filters") must equal(List("se.scalablesolutions.akka.security.AkkaSecurityFilterFactory")) + getList("akka.http.filters") must equal(List("akka.security.AkkaSecurityFilterFactory")) getList("akka.http.resource-packages") must equal(Nil) getString("akka.http.hostname") must equal(Some("localhost")) getString("akka.http.expired-header-value") must equal(Some("expired")) diff --git a/akka-remote/src/main/scala/akka/remote/BootableRemoteActorService.scala b/akka-remote/src/main/scala/akka/remote/BootableRemoteActorService.scala index b31c461048..04662ca8af 100644 --- a/akka-remote/src/main/scala/akka/remote/BootableRemoteActorService.scala +++ b/akka-remote/src/main/scala/akka/remote/BootableRemoteActorService.scala @@ -4,7 +4,6 @@ package akka.remote -import akka.config.Config.config import akka.actor. {Actor, BootableActorLoaderService} import akka.util. {ReflectiveAccess, Bootable} @@ -20,18 +19,18 @@ trait BootableRemoteActorService extends Bootable { def run = Actor.remote.start(self.applicationLoader.getOrElse(null)) //Use config host/port }, "Akka RemoteModule Service") - def startRemoteService = remoteServerThread.start() + def startRemoteService() { remoteServerThread.start() } - abstract override def onLoad = { + abstract override def onLoad() { if (ReflectiveAccess.isRemotingEnabled && RemoteServerSettings.isRemotingEnabled) { - startRemoteService + startRemoteService() } - super.onLoad + super.onLoad() } - abstract override def onUnload = { - Actor.remote.shutdown + abstract override def onUnload() { + Actor.remote.shutdown() if (remoteServerThread.isAlive) remoteServerThread.join(1000) - super.onUnload + super.onUnload() } } diff --git a/akka-remote/src/main/scala/akka/remote/netty/NettyRemoteSupport.scala b/akka-remote/src/main/scala/akka/remote/netty/NettyRemoteSupport.scala index 84c66e5d79..88538f1673 100644 --- a/akka-remote/src/main/scala/akka/remote/netty/NettyRemoteSupport.scala +++ b/akka-remote/src/main/scala/akka/remote/netty/NettyRemoteSupport.scala @@ -41,7 +41,7 @@ import java.util.concurrent.atomic.{AtomicReference, AtomicBoolean} import java.util.concurrent._ import akka.AkkaException -class RemoteClientMessageBufferException(message: String) extends AkkaException(message) +class RemoteClientMessageBufferException(message: String, cause: Throwable = null) extends AkkaException(message, cause) object RemoteEncoder { def encode(rmp: RemoteMessageProtocol): AkkaRemoteProtocol = { @@ -107,7 +107,7 @@ trait NettyRemoteClientModule extends RemoteClientModule { self: ListenerManagem def shutdownClientConnection(address: InetSocketAddress): Boolean = lock withWriteGuard { remoteClients.remove(Address(address)) match { - case s: Some[RemoteClient] => s.get.shutdown + case s: Some[RemoteClient] => s.get.shutdown() case None => false } } @@ -122,15 +122,15 @@ trait NettyRemoteClientModule extends RemoteClientModule { self: ListenerManagem /** * Clean-up all open connections. */ - def shutdownClientModule = { - shutdownRemoteClients + def shutdownClientModule() { + shutdownRemoteClients() //TODO: Should we empty our remoteActors too? //remoteActors.clear } - def shutdownRemoteClients = lock withWriteGuard { - remoteClients.foreach({ case (addr, client) => client.shutdown }) - remoteClients.clear + def shutdownRemoteClients() = lock withWriteGuard { + remoteClients.foreach({ case (addr, client) => client.shutdown() }) + remoteClients.clear() } } @@ -167,7 +167,7 @@ abstract class RemoteClient private[akka] ( def connect(reconnectIfAlreadyConnected: Boolean = false): Boolean - def shutdown: Boolean + def shutdown(): Boolean /** * Returns an array with the current pending messages not yet delivered. @@ -372,16 +372,16 @@ class ActiveRemoteClient private[akka] ( } //Please note that this method does _not_ remove the ARC from the NettyRemoteClientModule's map of clients - def shutdown = runSwitch switchOff { + def shutdown() = runSwitch switchOff { notifyListeners(RemoteClientShutdown(module, remoteAddress)) timer.stop() timer = null openChannels.close.awaitUninterruptibly openChannels = null - bootstrap.releaseExternalResources + bootstrap.releaseExternalResources() bootstrap = null connection = null - pendingRequests.clear + pendingRequests.clear() } private[akka] def isWithinReconnectionTimeWindow: Boolean = { @@ -571,7 +571,7 @@ class NettyRemoteServer(serverModule: NettyRemoteServerModule, val host: String, openChannels.add(bootstrap.bind(address)) serverModule.notifyListeners(RemoteServerStarted(serverModule)) - def shutdown { + def shutdown() { try { val shutdownSignal = { val b = RemoteControlProtocol.newBuilder @@ -583,7 +583,7 @@ class NettyRemoteServer(serverModule: NettyRemoteServerModule, val host: String, openChannels.write(RemoteEncoder.encode(shutdownSignal)).awaitUninterruptibly openChannels.disconnect openChannels.close.awaitUninterruptibly - bootstrap.releaseExternalResources + bootstrap.releaseExternalResources() serverModule.notifyListeners(RemoteServerShutdown(serverModule)) } catch { case e: Exception => @@ -626,11 +626,11 @@ trait NettyRemoteServerModule extends RemoteServerModule { self: RemoteModule => this } - def shutdownServerModule = guard withGuard { + def shutdownServerModule() = guard withGuard { _isRunning switchOff { currentServer.getAndSet(None) foreach { instance => - instance.shutdown + instance.shutdown() } } } diff --git a/akka-remote/src/main/scala/akka/serialization/SerializationProtocol.scala b/akka-remote/src/main/scala/akka/serialization/SerializationProtocol.scala index 5474ce6cf7..640884b761 100644 --- a/akka-remote/src/main/scala/akka/serialization/SerializationProtocol.scala +++ b/akka-remote/src/main/scala/akka/serialization/SerializationProtocol.scala @@ -183,15 +183,15 @@ object RemoteActorSerialization { /** * Serializes the ActorRef instance into a Protocol Buffers (protobuf) Message. */ - def toRemoteActorRefProtocol(ar: ActorRef): RemoteActorRefProtocol = { - import ar._ - - Actor.remote.registerByUuid(ar) - + def toRemoteActorRefProtocol(actor: ActorRef): RemoteActorRefProtocol = { + actor match { + case ar: LocalActorRef => Actor.remote.registerByUuid(ar) + case _ => {} + } RemoteActorRefProtocol.newBuilder - .setAddress("uuid:" + uuid.toString) - .setTimeout(timeout) - .build + .setAddress("uuid:" + actor.uuid.toString) + .setTimeout(actor.timeout) + .build } def createRemoteMessageProtocolBuilder( diff --git a/akka-remote/src/test/scala/remote/AkkaRemoteTest.scala b/akka-remote/src/test/scala/remote/AkkaRemoteTest.scala index 523f206df8..7ad4584a08 100644 --- a/akka-remote/src/test/scala/remote/AkkaRemoteTest.scala +++ b/akka-remote/src/test/scala/remote/AkkaRemoteTest.scala @@ -40,20 +40,20 @@ class AkkaRemoteTest extends remote.asInstanceOf[NettyRemoteSupport].optimizeLocal.set(false) //Can't run the test if we're eliminating all remote calls } - override def afterAll { + override def afterAll() { if (!OptimizeLocal) remote.asInstanceOf[NettyRemoteSupport].optimizeLocal.set(optimizeLocal_?) //Reset optimizelocal after all tests } - override def beforeEach { + override def beforeEach() { remote.start(host,port) super.beforeEach } override def afterEach() { - remote.shutdown + remote.shutdown() Actor.registry.local.shutdownAll() - super.afterEach + super.afterEach() } /* Utilities */ diff --git a/akka-remote/src/test/scala/remote/ServerInitiatedRemoteActorSample.scala b/akka-remote/src/test/scala/remote/ServerInitiatedRemoteActorSample.scala index cae866e6e2..0c5a9565a6 100644 --- a/akka-remote/src/test/scala/remote/ServerInitiatedRemoteActorSample.scala +++ b/akka-remote/src/test/scala/remote/ServerInitiatedRemoteActorSample.scala @@ -1,8 +1,8 @@ package akka.actor.remote -import akka.actor.{Actor, ActorRegistry} - +import akka.actor.Actor import Actor._ +import akka.event.EventHandler /************************************* Instructions how to run the sample: @@ -19,14 +19,12 @@ Instructions how to run the sample: * Then paste in the code below into both shells. Then run: -* ServerInitiatedRemoteActorServer.run in one shell -* ServerInitiatedRemoteActorClient.run in one shell +* ServerInitiatedRemoteActorServer.run() in one shell +* ServerInitiatedRemoteActorClient.run() in the other shell Have fun. *************************************/ class HelloWorldActor extends Actor { - self.start() - def receive = { case "Hello" => self.reply("World") } @@ -34,16 +32,22 @@ class HelloWorldActor extends Actor { object ServerInitiatedRemoteActorServer { - def main(args: Array[String]) = { - Actor.remote.start("localhost", 2552) - Actor.remote.register("hello-service", actorOf[HelloWorldActor]) + def run() { + remote.start("localhost", 2552) + remote.register("hello-service", actorOf[HelloWorldActor]) } + + def main(args: Array[String]) { run() } } object ServerInitiatedRemoteActorClient { - def main(args: Array[String]) = { - val actor = Actor.remote.actorFor("hello-service", "localhost", 2552) + + def run() { + val actor = remote.actorFor("hello-service", "localhost", 2552) val result = actor !! "Hello" + EventHandler.info("Result from Remote Actor: %s", result) } + + def main(args: Array[String]) { run() } } diff --git a/akka-remote/src/test/scala/remote/ServerInitiatedRemoteSessionActorSpec.scala b/akka-remote/src/test/scala/remote/ServerInitiatedRemoteSessionActorSpec.scala index c2277200b1..af29bb0bcb 100644 --- a/akka-remote/src/test/scala/remote/ServerInitiatedRemoteSessionActorSpec.scala +++ b/akka-remote/src/test/scala/remote/ServerInitiatedRemoteSessionActorSpec.scala @@ -19,8 +19,8 @@ object ServerInitiatedRemoteSessionActorSpec { class RemoteStatefullSessionActorSpec extends Actor { - override def preStart = instantiatedSessionActors.add(self) - override def postStop = instantiatedSessionActors.remove(self) + override def preStart() = instantiatedSessionActors.add(self) + override def postStop() = instantiatedSessionActors.remove(self) var user: String = "anonymous" def receive = { @@ -48,7 +48,7 @@ class ServerInitiatedRemoteSessionActorSpec extends AkkaRemoteTest { val result1 = session1 !! GetUser() result1.as[String] must equal (Some("session[1]")) - remote.shutdownClientModule + remote.shutdownClientModule() val session2 = remote.actorFor("untyped-session-actor-service", 5000L, host, port) @@ -66,7 +66,7 @@ class ServerInitiatedRemoteSessionActorSpec extends AkkaRemoteTest { default1.as[String] must equal (Some("anonymous")) instantiatedSessionActors must have size (1) - remote.shutdownClientModule + remote.shutdownClientModule() Thread.sleep(1000) instantiatedSessionActors must have size (0) } diff --git a/akka-remote/src/test/scala/remote/ServerInitiatedRemoteTypedSessionActorSpec.scala b/akka-remote/src/test/scala/remote/ServerInitiatedRemoteTypedSessionActorSpec.scala index e357127641..e0d1a32ac3 100644 --- a/akka-remote/src/test/scala/remote/ServerInitiatedRemoteTypedSessionActorSpec.scala +++ b/akka-remote/src/test/scala/remote/ServerInitiatedRemoteTypedSessionActorSpec.scala @@ -18,8 +18,8 @@ class ServerInitiatedRemoteTypedSessionActorSpec extends AkkaRemoteTest { } // make sure the servers shutdown cleanly after the test has finished - override def afterEach = { - super.afterEach + override def afterEach() { + super.afterEach() clearMessageLogs } @@ -32,7 +32,7 @@ class ServerInitiatedRemoteTypedSessionActorSpec extends AkkaRemoteTest { session1.login("session[1]") session1.getUser() must equal ("session[1]") - remote.shutdownClientModule + remote.shutdownClientModule() val session2 = remote.typedActorFor(classOf[RemoteTypedSessionActor], "typed-session-actor-service", 5000L, host, port) @@ -46,7 +46,7 @@ class ServerInitiatedRemoteTypedSessionActorSpec extends AkkaRemoteTest { session1.getUser() must equal ("anonymous") RemoteTypedSessionActorImpl.getInstances() must have size (1) - remote.shutdownClientModule + remote.shutdownClientModule() Thread.sleep(1000) RemoteTypedSessionActorImpl.getInstances() must have size (0) @@ -57,7 +57,7 @@ class ServerInitiatedRemoteTypedSessionActorSpec extends AkkaRemoteTest { session1.doSomethingFunny() - remote.shutdownClientModule + remote.shutdownClientModule() Thread.sleep(1000) RemoteTypedSessionActorImpl.getInstances() must have size (0) } diff --git a/akka-remote/src/test/scala/serialization/SerializableTypeClassActorSpec.scala b/akka-remote/src/test/scala/serialization/SerializableTypeClassActorSpec.scala index 682d647751..6c46ce906a 100644 --- a/akka-remote/src/test/scala/serialization/SerializableTypeClassActorSpec.scala +++ b/akka-remote/src/test/scala/serialization/SerializableTypeClassActorSpec.scala @@ -186,6 +186,7 @@ class SerializableTypeClassActorSpec extends } */ } + describe("Custom serializable actors") { it("should serialize and de-serialize") { import BinaryFormatMyActorWithSerializableMessages._ @@ -208,6 +209,27 @@ class SerializableTypeClassActorSpec extends (actor3 !! "hello-reply").getOrElse("_") should equal("world") } } + + describe("ActorRef serialization") { + it("should serialize and deserialize local actor refs ") { + val a = actorOf[MyActorWithDualCounter].start + val out = RemoteActorSerialization.toRemoteActorRefProtocol(a).toByteArray + val in = RemoteActorSerialization.fromBinaryToRemoteActorRef(out) + + in.address should equal(a.address) + in.timeout should equal(a.timeout) + a.stop + } + + it("should serialize and deserialize remote actor refs ") { + val a = Actor.remote.actorFor("foo", "localhost", 6666) + val out = RemoteActorSerialization.toRemoteActorRefProtocol(a).toByteArray + val in = RemoteActorSerialization.fromBinaryToRemoteActorRef(out) + + in.address should equal(a.address) + in.timeout should equal(a.timeout) + } + } } class MyActorWithDualCounter extends Actor { diff --git a/akka-samples/akka-sample-chat/src/main/scala/ChatServer.scala b/akka-samples/akka-sample-chat/src/main/scala/ChatServer.scala index 2be4a4b072..0c76929ff9 100644 --- a/akka-samples/akka-sample-chat/src/main/scala/ChatServer.scala +++ b/akka-samples/akka-sample-chat/src/main/scala/ChatServer.scala @@ -6,7 +6,7 @@ import scala.collection.mutable.HashMap - import akka.actor.{SupervisorFactory, Actor, ActorRef} + import akka.actor.{Actor, ActorRef} import akka.stm._ import akka.config.Supervision.{OneForOneStrategy,Permanent} import Actor._ @@ -108,7 +108,9 @@ self.reply(ChatLog(messageList)) } - override def postRestart(reason: Throwable) = chatLog = TransactionalVector() + override def postRestart(reason: Throwable) { + chatLog = TransactionalVector() + } } /** @@ -135,8 +137,9 @@ sessions -= username } - protected def shutdownSessions = + protected def shutdownSessions() { sessions.foreach { case (_, session) => session.stop() } + } } /** @@ -184,11 +187,11 @@ // abstract methods to be defined somewhere else protected def chatManagement: Receive protected def sessionManagement: Receive - protected def shutdownSessions(): Unit + protected def shutdownSessions() - override def postStop = { + override def postStop() { EventHandler.info(this, "Chat server is shutting down...") - shutdownSessions + shutdownSessions() self.unlink(storage) storage.stop() } @@ -206,7 +209,7 @@ SessionManagement with ChatManagement with MemoryChatStorageFactory { - override def preStart = { + override def preStart() { remote.start("localhost", 2552); remote.register("chat:service", self) //Register the actor with the specified service id } @@ -217,9 +220,9 @@ */ object ServerRunner { - def main(args: Array[String]): Unit = ServerRunner.run + def main(args: Array[String]) { ServerRunner.run() } - def run = { + def run() { actorOf[ChatService].start() } } @@ -229,9 +232,9 @@ */ object ClientRunner { - def main(args: Array[String]): Unit = ClientRunner.run + def main(args: Array[String]) { ClientRunner.run() } - def run = { + def run() { val client1 = new ChatClient("jonas") client1.login diff --git a/akka-samples/akka-sample-remote/README b/akka-samples/akka-sample-remote/README index b20d1c7f4e..f19386e1e3 100644 --- a/akka-samples/akka-sample-remote/README +++ b/akka-samples/akka-sample-remote/README @@ -1,10 +1,5 @@ --------------------------------------------------------- == Akka Remote Sample Application == - -This sample has two different samples: - - Server Managed Remote Actors Sample - - Client Managed Remote Actors Sample - --------------------------------------------------------- = Server Managed Remote Actors Sample = @@ -29,31 +24,4 @@ To run the sample: Now you could test client reconnect by killing the console running the ServerManagedRemoteActorClient and start it up again. See the client reconnect take place in the REPL shell. -That’s it. Have fun. - ---------------------------------------------------------- -= Client Managed Remote Actors Sample = - -To run the sample: - -1. Fire up two shells. For each of them: - - Step down into to the root of the Akka distribution. - - Set 'export AKKA_HOME=. - - Run 'sbt' - - Run 'update' followed by 'compile' if you have not done that before. - - Run 'project akka-sample-remote' - - Run 'console' to start up a REPL (interpreter). -2. In the first REPL you get execute: - - scala> import sample.remote._ - - scala> ClientManagedRemoteActorServer.run - This starts up the RemoteNode and registers the remote actor -3. In the second REPL you get execute: - - scala> import sample.remote._ - - scala> ClientManagedRemoteActorClient.run -4. See the actor conversation. -5. Run it again to see full speed after first initialization. - -Now you could test client reconnect by killing the console running the ClientManagedRemoteActorClient and start it up again. See the client reconnect take place in the REPL shell. - -That’s it. Have fun. - +That’s it. Have fun. \ No newline at end of file diff --git a/akka-stm/src/main/scala/akka/agent/Agent.scala b/akka-stm/src/main/scala/akka/agent/Agent.scala index 80db8bff21..378ee2995e 100644 --- a/akka-stm/src/main/scala/akka/agent/Agent.scala +++ b/akka-stm/src/main/scala/akka/agent/Agent.scala @@ -7,7 +7,7 @@ package akka.agent import akka.stm._ import akka.actor.Actor import akka.japi.{Function => JFunc, Procedure => JProc} -import akka.dispatch.{Dispatchers, Future} +import akka.dispatch.{DefaultCompletableFuture, Dispatchers, Future} /** * Used internally to send functions. @@ -115,15 +115,32 @@ class Agent[T](initialValue: T) { else dispatch } + /** + * Dispatch a function to update the internal state, and return a Future where that new state can be obtained + * within the given timeout + */ + def alter(f: T => T)(timeout: Long): Future[T] = { + def dispatch = updater.!!!(Update(f),timeout) + if (Stm.activeTransaction) { + val result = new DefaultCompletableFuture[T](timeout) + get //Join xa + deferred { + result completeWith dispatch + } //Attach deferred-block to current transaction + result + } + else dispatch + } + /** * Dispatch a new value for the internal state. Behaves the same - * as sending a fuction (x => newValue). + * as sending a function (x => newValue). */ def send(newValue: T): Unit = send(x => newValue) /** * Dispatch a new value for the internal state. Behaves the same - * as sending a fuction (x => newValue). + * as sending a function (x => newValue). */ def update(newValue: T) = send(newValue) @@ -140,6 +157,24 @@ class Agent[T](initialValue: T) { value }) + /** + * Dispatch a function to update the internal state but on its own thread, + * and return a Future where that new state can be obtained within the given timeout. + * This does not use the reactive thread pool and can be used for long-running + * or blocking operations. Dispatches using either `alterOff` or `alter` will + * still be executed in order. + */ + def alterOff(f: T => T)(timeout: Long): Future[T] = { + val result = new DefaultCompletableFuture[T](timeout) + send((value: T) => { + suspend + val threadBased = Actor.actorOf(new ThreadBasedAgentUpdater(this)).start() + result completeWith threadBased.!!!(Update(f), timeout) + value + }) + result + } + /** * A future to the current value that will be completed after any currently * queued updates. @@ -179,7 +214,7 @@ class Agent[T](initialValue: T) { def resume() = updater.dispatcher.resume(updater) /** - * Closes the agents and makes it eligable for garbage collection. + * Closes the agents and makes it eligible for garbage collection. * A closed agent cannot accept any `send` actions. */ def close() = updater.stop() @@ -194,6 +229,13 @@ class Agent[T](initialValue: T) { */ def send(f: JFunc[T, T]): Unit = send(x => f(x)) + /** + * Java API + * Dispatch a function to update the internal state, and return a Future where that new state can be obtained + * within the given timeout + */ + def alter(f: JFunc[T, T], timeout: Long): Future[T] = alter(x => f(x))(timeout) + /** * Java API: * Dispatch a function to update the internal state but on its own thread. @@ -203,6 +245,16 @@ class Agent[T](initialValue: T) { */ def sendOff(f: JFunc[T, T]): Unit = sendOff(x => f(x)) + /** + * Java API: + * Dispatch a function to update the internal state but on its own thread, + * and return a Future where that new state can be obtained within the given timeout. + * This does not use the reactive thread pool and can be used for long-running + * or blocking operations. Dispatches using either `alterOff` or `alter` will + * still be executed in order. + */ + def alterOff(f: JFunc[T, T], timeout: Long): Unit = alterOff(x => f(x))(timeout) + /** * Java API: * Map this agent to a new agent, applying the function to the internal state. @@ -232,7 +284,7 @@ class AgentUpdater[T](agent: Agent[T]) extends Actor { def receive = { case update: Update[T] => - atomic(txFactory) { agent.ref alter update.function } + self.reply_?(atomic(txFactory) { agent.ref alter update.function }) case Get => self reply agent.get case _ => () } @@ -247,8 +299,9 @@ class ThreadBasedAgentUpdater[T](agent: Agent[T]) extends Actor { val txFactory = TransactionFactory(familyName = "ThreadBasedAgentUpdater", readonly = false) def receive = { - case update: Update[T] => { - atomic(txFactory) { agent.ref alter update.function } + case update: Update[T] => try { + self.reply_?(atomic(txFactory) { agent.ref alter update.function }) + } finally { agent.resume self.stop() } diff --git a/akka-stm/src/main/scala/akka/stm/package.scala b/akka-stm/src/main/scala/akka/stm/package.scala index 055b1d3adf..c7587ac24a 100644 --- a/akka-stm/src/main/scala/akka/stm/package.scala +++ b/akka-stm/src/main/scala/akka/stm/package.scala @@ -5,7 +5,7 @@ package akka /** - * For easily importing everthing needed for STM. + * For easily importing everything needed for STM. */ package object stm extends akka.stm.Stm with akka.stm.StmUtil { diff --git a/akka-stm/src/test/scala/agent/AgentSpec.scala b/akka-stm/src/test/scala/agent/AgentSpec.scala index ed07dea6bd..18233917c3 100644 --- a/akka-stm/src/test/scala/agent/AgentSpec.scala +++ b/akka-stm/src/test/scala/agent/AgentSpec.scala @@ -49,6 +49,23 @@ class AgentSpec extends WordSpec with MustMatchers { agent.close } + "maintain order between alter and alterOff" in { + + val agent = Agent("a") + + val r1 = agent.alter(_ + "b")(5000) + val r2 = agent.alterOff((s: String) => { Thread.sleep(2000); s + "c" })(5000) + val r3 = agent.alter(_ + "d")(5000) + + r1.await.resultOrException.get must be === "ab" + r2.await.resultOrException.get must be === "abc" + r3.await.resultOrException.get must be === "abcd" + + agent() must be ("abcd") + + agent.close + } + "be immediately readable" in { val countDown = new CountDownFunction[Int] val readLatch = new CountDownLatch(1) diff --git a/akka-testkit/src/main/scala/akka/testkit/CallingThreadDispatcher.scala b/akka-testkit/src/main/scala/akka/testkit/CallingThreadDispatcher.scala index 971ee2e89f..319b40b6f4 100644 --- a/akka-testkit/src/main/scala/akka/testkit/CallingThreadDispatcher.scala +++ b/akka-testkit/src/main/scala/akka/testkit/CallingThreadDispatcher.scala @@ -108,9 +108,9 @@ class CallingThreadDispatcher(val warnings: Boolean = true) extends MessageDispa private def getMailbox(actor: ActorRef) = actor.mailbox.asInstanceOf[CallingThreadMailbox] - private[akka] override def start {} + private[akka] override def start() {} - private[akka] override def shutdown {} + private[akka] override def shutdown() {} private[akka] override def timeoutMs = 100L @@ -156,7 +156,7 @@ class CallingThreadDispatcher(val warnings: Boolean = true) extends MessageDispa if (execute) runQueue(mbox, queue) } - private[akka] override def executeFuture(invocation: FutureInvocation) { invocation.run } + private[akka] override def executeFuture(invocation: FutureInvocation[_]) { invocation.run } /* * This method must be called with this thread's queue, which must already diff --git a/akka-testkit/src/main/scala/akka/testkit/TestActorRef.scala b/akka-testkit/src/main/scala/akka/testkit/TestActorRef.scala index 158931d987..f0e9ceab7b 100644 --- a/akka-testkit/src/main/scala/akka/testkit/TestActorRef.scala +++ b/akka-testkit/src/main/scala/akka/testkit/TestActorRef.scala @@ -83,12 +83,13 @@ object TestActorRef { def apply[T <: Actor : Manifest](address: String): TestActorRef[T] = new TestActorRef[T] ({ () => import ReflectiveAccess.{ createInstance, noParams, noArgs } - createInstance[T](manifest[T].erasure, noParams, noArgs).getOrElse( - throw new ActorInitializationException( + createInstance[T](manifest[T].erasure, noParams, noArgs) match { + case r: Right[_, T] => r.b + case l: Left[Exception, _] => throw new ActorInitializationException( "Could not instantiate Actor" + "\nMake sure Actor is NOT defined inside a class/trait," + "\nif so put it outside the class/trait, f.e. in a companion object," + - "\nOR try to change: 'actorOf[MyActor]' to 'actorOf(new MyActor)'.")) + "\nOR try to change: 'actorOf[MyActor]' to 'actorOf(new MyActor)'.", l.a) + } }, address) - } diff --git a/akka-testkit/src/test/scala/akka/testkit/TestActorRefSpec.scala b/akka-testkit/src/test/scala/akka/testkit/TestActorRefSpec.scala index 5a89cac4ab..0bf0c8df27 100644 --- a/akka-testkit/src/test/scala/akka/testkit/TestActorRefSpec.scala +++ b/akka-testkit/src/test/scala/akka/testkit/TestActorRefSpec.scala @@ -240,7 +240,7 @@ class TestActorRefSpec extends WordSpec with MustMatchers with BeforeAndAfterEac ref ! ch val f = ch.future f must be ('completed) - f() must be ("complexReply") + f.get must be ("complexReply") } } diff --git a/akka-tutorials/akka-tutorial-first/README b/akka-tutorials/akka-tutorial-first/README new file mode 100644 index 0000000000..f4b42f631f --- /dev/null +++ b/akka-tutorials/akka-tutorial-first/README @@ -0,0 +1,7 @@ +================ + First Tutorial +================ + +This is the source code for the first tutorial. + +See the Akka Documentation for information about this tutorial. diff --git a/akka-tutorials/akka-tutorial-first/pom.xml b/akka-tutorials/akka-tutorial-first/pom.xml index f3d9589815..b3e9f7319c 100644 --- a/akka-tutorials/akka-tutorial-first/pom.xml +++ b/akka-tutorials/akka-tutorial-first/pom.xml @@ -8,14 +8,14 @@ akka.tutorial.first.java akka-tutorial-first-java jar - 1.0-SNAPSHOT + 1.2-SNAPSHOT http://akka.io se.scalablesolutions.akka akka-actor - 1.1-SNAPSHOT + 1.2-SNAPSHOT @@ -23,7 +23,7 @@ Akka Akka Maven2 Repository - http://www.scalablesolutions.se/akka/repository/ + http://akka.io/repository/ diff --git a/akka-tutorials/akka-tutorial-first/project/build.properties b/akka-tutorials/akka-tutorial-first/project/build.properties new file mode 100644 index 0000000000..efe6111c40 --- /dev/null +++ b/akka-tutorials/akka-tutorial-first/project/build.properties @@ -0,0 +1,5 @@ +project.organization=se.scalablesolutions.akka +project.name=akka-tutorial-first +project.version=1.2-SNAPSHOT +build.scala.versions=2.9.0 +sbt.version=0.7.7 diff --git a/akka-tutorials/akka-tutorial-first/project/build/Project.scala b/akka-tutorials/akka-tutorial-first/project/build/Project.scala new file mode 100644 index 0000000000..975f2ce970 --- /dev/null +++ b/akka-tutorials/akka-tutorial-first/project/build/Project.scala @@ -0,0 +1,3 @@ +import sbt._ + +class TutorialOneProject(info: ProjectInfo) extends DefaultProject(info) with AkkaProject diff --git a/akka-tutorials/akka-tutorial-first/project/plugins/Plugins.scala b/akka-tutorials/akka-tutorial-first/project/plugins/Plugins.scala new file mode 100644 index 0000000000..aa263a988e --- /dev/null +++ b/akka-tutorials/akka-tutorial-first/project/plugins/Plugins.scala @@ -0,0 +1,6 @@ +import sbt._ + +class Plugins(info: ProjectInfo) extends PluginDefinition(info) { + val akkaRepo = "Akka Repo" at "http://akka.io/repository" + val akkaPlugin = "se.scalablesolutions.akka" % "akka-sbt-plugin" % "1.2-SNAPSHOT" +} diff --git a/akka-tutorials/akka-tutorial-first/src/main/java/akka/tutorial/first/java/Pi.java b/akka-tutorials/akka-tutorial-first/src/main/java/akka/tutorial/first/java/Pi.java index 9576338fdd..653800ca3e 100644 --- a/akka-tutorials/akka-tutorial-first/src/main/java/akka/tutorial/first/java/Pi.java +++ b/akka-tutorials/akka-tutorial-first/src/main/java/akka/tutorial/first/java/Pi.java @@ -18,31 +18,6 @@ import akka.routing.UntypedLoadBalancer; import java.util.concurrent.CountDownLatch; -/** - * First part in Akka tutorial for Java. - *

- * Calculates Pi. - *

- * Run on command line: - *

- *   $ cd akka-1.1
- *   $ export AKKA_HOME=`pwd`
- *   $ javac -cp dist/akka-actor-1.1-SNAPSHOT.jar:scala-library.jar akka/tutorial/first/java/Pi.java
- *   $ java -cp dist/akka-actor-1.1-SNAPSHOT.jar:scala-library.jar:. akka.tutorial.first.java.Pi
- *   $ ...
- * 
- *

- * Run it in Maven: - *

- *   $ mvn
- *   > scala:console
- *   > val pi = new akka.tutorial.first.java.Pi
- *   > pi.calculate(4, 10000, 10000)
- *   > ...
- * 
- * - * @author Jonas Bonér - */ public class Pi { public static void main(String[] args) throws Exception { diff --git a/akka-tutorials/akka-tutorial-first/src/main/scala/Pi.scala b/akka-tutorials/akka-tutorial-first/src/main/scala/Pi.scala index e6d8b87c14..51fcfac45d 100644 --- a/akka-tutorials/akka-tutorial-first/src/main/scala/Pi.scala +++ b/akka-tutorials/akka-tutorial-first/src/main/scala/Pi.scala @@ -12,32 +12,6 @@ import Routing._ import System.{currentTimeMillis => now} import java.util.concurrent.CountDownLatch -/** - * First part in Akka tutorial. - *

- * Calculates Pi. - *

- * Run on command line: - *

- *   $ cd akka-1.1
- *   $ export AKKA_HOME=`pwd`
- *   $ scalac -cp dist/akka-actor-1.1-SNAPSHOT.jar Pi.scala
- *   $ java -cp dist/akka-actor-1.1-SNAPSHOT.jar:scala-library.jar:. akka.tutorial.first.scala.Pi
- *   $ ...
- * 
- *

- * Run it in SBT: - *

- *   $ sbt
- *   > update
- *   > console
- *   > akka.tutorial.first.scala.Pi.calculate(nrOfWorkers = 4, nrOfElements = 10000, nrOfMessages = 10000)
- *   > ...
- *   > :quit
- * 
- * - * @author Jonas Bonér - */ object Pi extends App { calculate(nrOfWorkers = 4, nrOfElements = 10000, nrOfMessages = 10000) @@ -104,11 +78,11 @@ object Pi extends App { if (nrOfResults == nrOfMessages) self.stop() } - override def preStart { + override def preStart() { start = System.currentTimeMillis } - override def postStop { + override def postStop() { // tell the world that the calculation is complete println( "\n\tPi estimate: \t\t%s\n\tCalculation time: \t%s millis" diff --git a/akka-tutorials/akka-tutorial-second/README b/akka-tutorials/akka-tutorial-second/README new file mode 100644 index 0000000000..ed31bbca22 --- /dev/null +++ b/akka-tutorials/akka-tutorial-second/README @@ -0,0 +1,7 @@ +================= + Second Tutorial +================= + +This is the source code for the second tutorial. + +See the Akka Documentation for information about this tutorial. diff --git a/akka-tutorials/akka-tutorial-second/pom.xml b/akka-tutorials/akka-tutorial-second/pom.xml new file mode 100644 index 0000000000..cf7c46fd3b --- /dev/null +++ b/akka-tutorials/akka-tutorial-second/pom.xml @@ -0,0 +1,43 @@ + + + 4.0.0 + + akka-tutorial-second-java + akka.tutorial.second.java + akka-tutorial-second-java + jar + 1.2-SNAPSHOT + http://akka.io + + + + se.scalablesolutions.akka + akka-actor + 1.2-SNAPSHOT + + + + + + Akka + Akka Maven2 Repository + http://akka.io/repository/ + + + + + + + org.apache.maven.plugins + maven-compiler-plugin + 2.3.2 + + 1.6 + 1.6 + + + + + diff --git a/akka-tutorials/akka-tutorial-second/project/build.properties b/akka-tutorials/akka-tutorial-second/project/build.properties new file mode 100644 index 0000000000..7c84e8368b --- /dev/null +++ b/akka-tutorials/akka-tutorial-second/project/build.properties @@ -0,0 +1,5 @@ +project.organization=se.scalablesolutions.akka +project.name=akka-tutorial-second +project.version=1.2-SNAPSHOT +build.scala.versions=2.9.0 +sbt.version=0.7.7 diff --git a/akka-tutorials/akka-tutorial-second/project/build/Project.scala b/akka-tutorials/akka-tutorial-second/project/build/Project.scala new file mode 100644 index 0000000000..1d0b230149 --- /dev/null +++ b/akka-tutorials/akka-tutorial-second/project/build/Project.scala @@ -0,0 +1,3 @@ +import sbt._ + +class TutorialTwoProject(info: ProjectInfo) extends DefaultProject(info) with AkkaProject diff --git a/akka-tutorials/akka-tutorial-second/project/plugins/Plugins.scala b/akka-tutorials/akka-tutorial-second/project/plugins/Plugins.scala new file mode 100644 index 0000000000..aa263a988e --- /dev/null +++ b/akka-tutorials/akka-tutorial-second/project/plugins/Plugins.scala @@ -0,0 +1,6 @@ +import sbt._ + +class Plugins(info: ProjectInfo) extends PluginDefinition(info) { + val akkaRepo = "Akka Repo" at "http://akka.io/repository" + val akkaPlugin = "se.scalablesolutions.akka" % "akka-sbt-plugin" % "1.2-SNAPSHOT" +} diff --git a/akka-tutorials/akka-tutorial-second/src/main/java/akka/tutorial/java/second/Pi.java b/akka-tutorials/akka-tutorial-second/src/main/java/akka/tutorial/java/second/Pi.java index 6d01b752b1..5caae5e365 100644 --- a/akka-tutorials/akka-tutorial-second/src/main/java/akka/tutorial/java/second/Pi.java +++ b/akka-tutorials/akka-tutorial-second/src/main/java/akka/tutorial/java/second/Pi.java @@ -20,31 +20,6 @@ import akka.routing.InfiniteIterator; import akka.routing.Routing.Broadcast; import akka.routing.UntypedLoadBalancer; -/** - * Second part in Akka tutorial for Java. - *

- * Calculates Pi. - *

- * Run on command line: - *

- *   $ cd akka-1.1
- *   $ export AKKA_HOME=`pwd`
- *   $ javac -cp dist/akka-actor-1.1-SNAPSHOT.jar:scala-library.jar akka/tutorial/java/second/Pi.java
- *   $ java -cp dist/akka-actor-1.1-SNAPSHOT.jar:scala-library.jar:. akka.tutorial.java.second.Pi
- *   $ ...
- * 
- *

- * Run it in Maven: - *

- *   $ mvn
- *   > scala:console
- *   > val pi = new akka.tutorial.java.second.Pi
- *   > pi.calculate(4, 10000, 10000)
- *   > ...
- * 
- * - * @author Jonas Bonér - */ public class Pi { public static void main(String[] args) throws Exception { diff --git a/akka-tutorials/akka-tutorial-second/src/main/scala/Pi.scala b/akka-tutorials/akka-tutorial-second/src/main/scala/Pi.scala index e7e10f56ef..ff4b8d9c4a 100644 --- a/akka-tutorials/akka-tutorial-second/src/main/scala/Pi.scala +++ b/akka-tutorials/akka-tutorial-second/src/main/scala/Pi.scala @@ -13,32 +13,6 @@ import akka.dispatch.Future import System.{currentTimeMillis => now} -/** - * Second part in Akka tutorial. - *

- * Calculates Pi. - *

- * Run on command line: - *

- *   $ cd akka-1.1
- *   $ export AKKA_HOME=`pwd`
- *   $ scalac -cp dist/akka-actor-1.1-SNAPSHOT.jar Pi.scala
- *   $ java -cp dist/akka-actor-1.1-SNAPSHOT.jar:scala-library.jar:. akka.tutorial.second.Pi
- *   $ ...
- * 
- *

- * Run it in SBT: - *

- *   $ sbt
- *   > update
- *   > console
- *   > akka.tutorial.second.Pi.calculate(nrOfWorkers = 4, nrOfElements = 10000, nrOfMessages = 10000)
- *   > ...
- *   > :quit
- * 
- * - * @author Jonas Bonér - */ object Pi extends App { calculate(nrOfWorkers = 4, nrOfElements = 10000, nrOfMessages = 10000) @@ -111,7 +85,7 @@ object Pi extends App { def receive = scatter // when we are stopped, stop our team of workers and our router - override def postStop { + override def postStop() { // send a PoisonPill to all workers telling them to shut down themselves router ! Broadcast(PoisonPill) // send a PoisonPill to the router, telling him to shut himself down diff --git a/akka-typed-actor/src/main/scala/akka/actor/TypedActor.scala b/akka-typed-actor/src/main/scala/akka/actor/TypedActor.scala index 6972307919..93265a6379 100644 --- a/akka-typed-actor/src/main/scala/akka/actor/TypedActor.scala +++ b/akka-typed-actor/src/main/scala/akka/actor/TypedActor.scala @@ -83,11 +83,11 @@ import scala.reflect.BeanProperty * * def square(x: Int): Future[Integer] = future(x * x) * - * override def preStart = { + * override def preStart() = { * ... // optional initialization on start * } * - * override def postStop = { + * override def postStop() = { * ... // optional cleanup on stop * } * @@ -160,14 +160,14 @@ abstract class TypedActor extends Actor with Proxyable { *

* Is called when an Actor is started by invoking 'actor.start()'. */ - override def preStart {} + override def preStart() {} /** * User overridable callback. *

* Is called when 'actor.stop()' is invoked. */ - override def postStop {} + override def postStop() {} /** * User overridable callback. @@ -203,11 +203,10 @@ abstract class TypedActor extends Actor with Proxyable { * Integer result = future.get(); *

*/ - def future[T](value: T): Future[T] = - self.senderFuture - .map{f => f.completeWithResult(value); f } - .getOrElse(throw new IllegalActorStateException("No sender future in scope")) - .asInstanceOf[Future[T]] + def future[T](value: T): Future[T] = self.senderFuture match { + case None => throw new IllegalActorStateException("No sender future in scope") + case Some(f) => f.completeWithResult(value).asInstanceOf[Future[T]] + } def receive = { case joinPoint: JoinPoint => @@ -459,6 +458,24 @@ object TypedActor { newInstance(intfClass, factory, TypedActorConfiguration()) } + /** + * Factory method for typed actor. + * @param intfClass interface the typed actor implements + * @param factory factory method that constructs the typed actor + * @param config configuration object for the typed actor + */ + def newInstance[T](intfClass: Class[T], factory: => AnyRef, config: TypedActorConfiguration): T = + newInstance(intfClass, actorOf(newTypedActor(factory)), config) + + /** + * Factory method for typed actor. + * @param intfClass interface the typed actor implements + * @param targetClass implementation class of the typed actor + * @param config configuration object for the typed actor + */ + def newInstance[T](intfClass: Class[T], targetClass: Class[_], config: TypedActorConfiguration): T = + newInstance(intfClass, actorOf(newTypedActor(targetClass)), config) + /** * Factory method for typed actor. * @param intfClass interface the typed actor implements @@ -479,24 +496,6 @@ object TypedActor { newInstance(intfClass, factory, TypedActorConfiguration(timeout)) } - /** - * Factory method for typed actor. - * @param intfClass interface the typed actor implements - * @param factory factory method that constructs the typed actor - * @paramm config configuration object fo the typed actor - */ - def newInstance[T](intfClass: Class[T], factory: => AnyRef, config: TypedActorConfiguration): T = - newInstance(intfClass, actorOf(newTypedActor(factory)), config) - - /** - * Factory method for typed actor. - * @param intfClass interface the typed actor implements - * @param targetClass implementation class of the typed actor - * @paramm config configuration object fo the typed actor - */ - def newInstance[T](intfClass: Class[T], targetClass: Class[_], config: TypedActorConfiguration): T = - newInstance(intfClass, actorOf(newTypedActor(targetClass)), config) - private[akka] def newInstance[T](intfClass: Class[T], actorRef: ActorRef): T = { if (!actorRef.actorInstance.get.isInstanceOf[TypedActor]) throw new IllegalArgumentException("ActorRef is not a ref to a typed actor") newInstance(intfClass, actorRef, TypedActorConfiguration()) @@ -544,8 +543,8 @@ object TypedActor { /** * Java API. */ - def newInstance[T](intfClass: Class[T], factory: TypedActorFactory, timeout: Long) : T = - newInstance(intfClass, factory.create, timeout) +// def newInstance[T](intfClass: Class[T], factory: TypedActorFactory, timeout: Long) : T = +// newInstance(intfClass, factory.create, timeout) /** * Java API. diff --git a/akka-typed-actor/src/main/scala/akka/config/TypedActorGuiceConfigurator.scala b/akka-typed-actor/src/main/scala/akka/config/TypedActorGuiceConfigurator.scala index a7f6ffbfec..826ac95ed3 100644 --- a/akka-typed-actor/src/main/scala/akka/config/TypedActorGuiceConfigurator.scala +++ b/akka-typed-actor/src/main/scala/akka/config/TypedActorGuiceConfigurator.scala @@ -166,7 +166,7 @@ private[akka] class TypedActorGuiceConfigurator extends TypedActorConfiguratorBa } def stop = synchronized { - if (supervisor.isDefined) supervisor.get.shutdown + if (supervisor.isDefined) supervisor.get.shutdown() } } diff --git a/akka-typed-actor/src/main/scala/akka/transactor/Coordination.scala b/akka-typed-actor/src/main/scala/akka/transactor/Coordination.scala index 33a74fea79..1f72176eed 100644 --- a/akka-typed-actor/src/main/scala/akka/transactor/Coordination.scala +++ b/akka-typed-actor/src/main/scala/akka/transactor/Coordination.scala @@ -9,7 +9,7 @@ import akka.stm.Atomic import scala.util.DynamicVariable -class CoordinateException private[akka](message: String) extends AkkaException(message) +class CoordinateException private[akka](message: String, cause: Throwable = null) extends AkkaException(message, cause) /** * Coordinating transactions between typed actors. diff --git a/config/akka-reference.conf b/config/akka-reference.conf index 6d0338a8f9..8b64cda636 100644 --- a/config/akka-reference.conf +++ b/config/akka-reference.conf @@ -6,7 +6,7 @@ # Modify as needed. akka { - version = "1.1-SNAPSHOT" # Akka version, checked against the runtime version of Akka. + version = "1.2-SNAPSHOT" # Akka version, checked against the runtime version of Akka. enabled-modules = [] # Comma separated list of the enabled modules. Options: ["remote", "camel", "http"] @@ -147,19 +147,12 @@ akka { trace-level = "none" } - jta { - provider = "from-jndi" # Options: - "from-jndi" (means that Akka will try to detect a TransactionManager in the JNDI) - # - "atomikos" (means that Akka will use the Atomikos based JTA impl in 'akka-jta', - # e.g. you need the akka-jta JARs on classpath). - timeout = 60 - } - http { hostname = "localhost" port = 9998 #If you are using akka.http.AkkaRestServlet - filters = ["se.scalablesolutions.akka.security.AkkaSecurityFilterFactory"] # List with all jersey filters to use + filters = ["akka.security.AkkaSecurityFilterFactory"] # List with all jersey filters to use # resource-packages = ["sample.rest.scala", # "sample.rest.java", # "sample.security"] # List with all resource packages for your Jersey services @@ -197,7 +190,7 @@ akka { remote { - # secure-cookie = "050E0A0D0D06010A00000900040D060F0C09060B" # generate your own with '$AKKA_HOME/scripts/generate_secure_cookie.sh' or using 'Crypt.generateSecureCookie' + # secure-cookie = "050E0A0D0D06010A00000900040D060F0C09060B" # generate your own with '$AKKA_HOME/scripts/generate_config_with_secure_cookie.sh' or using 'Crypt.generateSecureCookie' secure-cookie = "" compression-scheme = "zlib" # Options: "zlib" (lzf to come), leave out for no compression diff --git a/project/build.properties b/project/build.properties index 8275b16e18..fbcb83ae2d 100644 --- a/project/build.properties +++ b/project/build.properties @@ -1,5 +1,6 @@ project.organization=se.scalablesolutions.akka project.name=akka -project.version=1.1-SNAPSHOT -build.scala.versions=2.9.0.RC1 -sbt.version=0.7.6.RC0 +project.version=1.2-SNAPSHOT +build.scala.versions=2.9.0 +sbt.version=0.7.7 + diff --git a/project/build/AkkaProject.scala b/project/build/AkkaProject.scala index 96b020d13c..bfaea36280 100644 --- a/project/build/AkkaProject.scala +++ b/project/build/AkkaProject.scala @@ -10,7 +10,7 @@ import sbt._ import sbt.CompileOrder._ import spde._ -class AkkaParentProject(info: ProjectInfo) extends DefaultProject(info) { +class AkkaParentProject(info: ProjectInfo) extends ParentProject(info) with ExecProject with DocParentProject { akkaParent => // ------------------------------------------------------------------------------------------------------------------- // Compile settings @@ -24,45 +24,6 @@ class AkkaParentProject(info: ProjectInfo) extends DefaultProject(info) { val javaCompileSettings = Seq("-Xlint:unchecked") - override def compileOptions = super.compileOptions ++ scalaCompileSettings.map(CompileOption) - override def javaCompileOptions = super.javaCompileOptions ++ javaCompileSettings.map(JavaCompileOption) - - // ------------------------------------------------------------------------------------------------------------------- - // Deploy/dist settings - // ------------------------------------------------------------------------------------------------------------------- - val distName = "%s-%s".format(name, version) - val distArchiveName = distName + ".zip" - val deployPath = info.projectPath / "deploy" - val distPath = info.projectPath / "dist" - val distArchive = (distPath ##) / distArchiveName - - lazy override val `package` = task { None } - - //The distribution task, packages Akka into a zipfile and places it into the projectPath/dist directory - lazy val dist = task { - - def transferFile(from: Path, to: Path) = - if ( from.asFile.renameTo(to.asFile) ) None - else Some("Couldn't transfer %s to %s".format(from,to)) - - //Creates a temporary directory where we can assemble the distribution - val genDistDir = Path.fromFile({ - val d = File.createTempFile("akka","dist") - d.delete //delete the file - d.mkdir //Recreate it as a dir - d - }).## //## is needed to make sure that the zipped archive has the correct root folder - - //Temporary directory to hold the dist currently being generated - val currentDist = genDistDir / distName - - FileUtilities.copy(allArtifacts.get, currentDist, log).left.toOption orElse //Copy all needed artifacts into the root archive - FileUtilities.zip(List(currentDist), distArchiveName, true, log) orElse //Compress the root archive into a zipfile - transferFile(info.projectPath / distArchiveName, distArchive) orElse //Move the archive into the dist folder - FileUtilities.clean(genDistDir,log) //Cleanup the generated jars - - } dependsOn (`package`) describedAs("Zips up the distribution.") - // ------------------------------------------------------------------------------------------------------------------- // All repositories *must* go here! See ModuleConigurations below. // ------------------------------------------------------------------------------------------------------------------- @@ -97,7 +58,7 @@ class AkkaParentProject(info: ProjectInfo) extends DefaultProject(info) { lazy val jerseyModuleConfig = ModuleConfiguration("com.sun.jersey", JavaNetRepo) lazy val multiverseModuleConfig = ModuleConfiguration("org.multiverse", CodehausRepo) lazy val nettyModuleConfig = ModuleConfiguration("org.jboss.netty", JBossRepo) - lazy val scalaTestModuleConfig = ModuleConfiguration("org.scalatest", ScalaToolsSnapshotRepo) + lazy val scalaTestModuleConfig = ModuleConfiguration("org.scalatest", ScalaToolsRelRepo) lazy val spdeModuleConfig = ModuleConfiguration("us.technically.spde", DatabinderRepo) lazy val processingModuleConfig = ModuleConfiguration("org.processing", DatabinderRepo) lazy val sjsonModuleConfig = ModuleConfiguration("net.debasishg", ScalaToolsRelRepo) @@ -112,10 +73,10 @@ class AkkaParentProject(info: ProjectInfo) extends DefaultProject(info) { // Versions // ------------------------------------------------------------------------------------------------------------------- - lazy val JACKSON_VERSION = "1.7.1" + lazy val JACKSON_VERSION = "1.8.0" lazy val JERSEY_VERSION = "1.3" lazy val MULTIVERSE_VERSION = "0.6.2" - lazy val SCALATEST_VERSION = "1.4-SNAPSHOT" + lazy val SCALATEST_VERSION = "1.4.1" lazy val JETTY_VERSION = "7.4.0.v20110414" lazy val JAVAX_SERVLET_VERSION = "3.0" lazy val SLF4J_VERSION = "1.6.0" @@ -157,14 +118,14 @@ class AkkaParentProject(info: ProjectInfo) extends DefaultProject(info) { lazy val multiverse = "org.multiverse" % "multiverse-alpha" % MULTIVERSE_VERSION % "compile" //ApacheV2 lazy val multiverse_test = "org.multiverse" % "multiverse-alpha" % MULTIVERSE_VERSION % "test" //ApacheV2 - lazy val netty = "org.jboss.netty" % "netty" % "3.2.3.Final" % "compile" //ApacheV2 + lazy val netty = "org.jboss.netty" % "netty" % "3.2.4.Final" % "compile" //ApacheV2 lazy val osgi_core = "org.osgi" % "org.osgi.core" % "4.2.0" //ApacheV2 lazy val protobuf = "com.google.protobuf" % "protobuf-java" % "2.3.0" % "compile" //New BSD - lazy val sjson = "net.debasishg" % "sjson_2.9.0.RC1" % "0.11" % "compile" //ApacheV2 - lazy val sjson_test = "net.debasishg" % "sjson_2.9.0.RC1" % "0.11" % "test" //ApacheV2 + lazy val sjson = "net.debasishg" %% "sjson" % "0.11" % "compile" //ApacheV2 + lazy val sjson_test = "net.debasishg" %% "sjson" % "0.11" % "test" //ApacheV2 lazy val slf4j = "org.slf4j" % "slf4j-api" % SLF4J_VERSION lazy val logback = "ch.qos.logback" % "logback-classic" % "0.9.28" % "runtime" @@ -184,7 +145,7 @@ class AkkaParentProject(info: ProjectInfo) extends DefaultProject(info) { lazy val junit = "junit" % "junit" % "4.5" % "test" //Common Public License 1.0 lazy val mockito = "org.mockito" % "mockito-all" % "1.8.1" % "test" //MIT - lazy val scalatest = "org.scalatest" % "scalatest" % SCALATEST_VERSION % "test" //ApacheV2 + lazy val scalatest = "org.scalatest" %% "scalatest" % SCALATEST_VERSION % "test" //ApacheV2 } // ------------------------------------------------------------------------------------------------------------------- @@ -211,23 +172,18 @@ class AkkaParentProject(info: ProjectInfo) extends DefaultProject(info) { override def disableCrossPaths = true - override def packageOptions = - manifestClassPath.map(cp => ManifestAttributes( - (Attributes.Name.CLASS_PATH, cp), - (IMPLEMENTATION_TITLE, "Akka"), - (IMPLEMENTATION_URL, "http://akka.io"), - (IMPLEMENTATION_VENDOR, "Scalable Solutions AB") - )).toList + // add the sh action since it doesn't exist in ParentProject + lazy val sh = task { args => execOut { Process("sh" :: "-c" :: args.mkString(" ") :: Nil) } } - //Exclude slf4j1.5.11 from the classpath, it's conflicting... - override def fullClasspath(config: Configuration): PathFinder = { - super.fullClasspath(config) --- - (super.fullClasspath(config) ** "slf4j*1.5.11.jar") - } + // ------------------------------------------------------------------------------------------------------------------- + // Scaladocs + // ------------------------------------------------------------------------------------------------------------------- - // ------------------------------------------------------------ + override def apiProjectDependencies = dependencies.toList - akka_samples + + // ------------------------------------------------------------------------------------------------------------------- // Publishing - // ------------------------------------------------------------ + // ------------------------------------------------------------------------------------------------------------------- override def managedStyle = ManagedStyle.Maven @@ -262,15 +218,17 @@ class AkkaParentProject(info: ProjectInfo) extends DefaultProject(info) { override def artifacts = Set(Artifact(artifactID, "pom", "pom")) - override def deliverProjectDependencies = super.deliverProjectDependencies.toList - akka_samples.projectID - akka_tutorials.projectID + override def deliverProjectDependencies = (super.deliverProjectDependencies.toList + - akka_samples.projectID + - akka_tutorials.projectID + - akkaDist.projectID) - // ------------------------------------------------------------ + // ------------------------------------------------------------------------------------------------------------------- // Build release - // ------------------------------------------------------------ + // ------------------------------------------------------------------------------------------------------------------- val localReleasePath = outputPath / "release" / version.toString val localReleaseRepository = Resolver.file("Local Release", localReleasePath / "repository" asFile) - val localReleaseDownloads = localReleasePath / "downloads" override def otherRepositories = super.otherRepositories ++ Seq(localReleaseRepository) @@ -280,22 +238,54 @@ class AkkaParentProject(info: ProjectInfo) extends DefaultProject(info) { } lazy val buildRelease = task { - FileUtilities.copy(Seq(distArchive), localReleaseDownloads, log).left.toOption - } dependsOn (publishRelease, dist) + log.info("Built release.") + None + } dependsOn (publishRelease, releaseApi, releaseDocs, releaseDownloads, releaseDist) + + lazy val releaseApi = task { + val apiSources = ((apiOutputPath ##) ***) + val apiPath = localReleasePath / "api" / "akka" / version.toString + FileUtilities.copy(apiSources.get, apiPath, log).left.toOption + } dependsOn (api) + + lazy val releaseDocs = task { + val docsBuildPath = docsPath / "_build" + val docsHtmlSources = ((docsBuildPath / "html" ##) ***) + val docsPdfSources = (docsBuildPath / "latex" ##) ** "*.pdf" + val docsOutputPath = localReleasePath / "docs" / "akka" / version.toString + FileUtilities.copy(docsHtmlSources.get, docsOutputPath, log).left.toOption orElse + FileUtilities.copy(docsPdfSources.get, docsOutputPath, log).left.toOption + } dependsOn (docs) + + lazy val releaseDownloads = task { + val distArchives = akkaDist.akkaActorsDist.distArchive +++ akkaDist.akkaCoreDist.distArchive + val downloadsPath = localReleasePath / "downloads" + FileUtilities.copy(distArchives.get, downloadsPath, log).left.toOption + } dependsOn (dist) + + lazy val releaseDist = task { + val distArchives = akkaDist.akkaActorsDist.distExclusiveArchive +++ akkaDist.akkaCoreDist.distExclusiveArchive + val distPath = localReleasePath / "dist" + FileUtilities.copy(distArchives.get, distPath, log).left.toOption + } dependsOn (dist) + + lazy val dist = task { None } // dummy task // ------------------------------------------------------------------------------------------------------------------- // akka-actor subproject // ------------------------------------------------------------------------------------------------------------------- - class AkkaActorProject(info: ProjectInfo) extends AkkaDefaultProject(info, distPath) with OsgiProject { + class AkkaActorProject(info: ProjectInfo) extends AkkaDefaultProject(info) with OsgiProject with AutoCompilerPlugins { override def bndExportPackage = super.bndExportPackage ++ Seq("com.eaio.*;version=3.2") + val cont = compilerPlugin("org.scala-lang.plugins" % "continuations" % buildScalaVersion) + override def compileOptions = super.compileOptions ++ compileOptions("-P:continuations:enable") } // ------------------------------------------------------------------------------------------------------------------- // akka-stm subproject // ------------------------------------------------------------------------------------------------------------------- - class AkkaStmProject(info: ProjectInfo) extends AkkaDefaultProject(info, distPath) { + class AkkaStmProject(info: ProjectInfo) extends AkkaDefaultProject(info) { val multiverse = Dependencies.multiverse // testing @@ -307,7 +297,7 @@ class AkkaParentProject(info: ProjectInfo) extends DefaultProject(info) { // akka-typed-actor subproject // ------------------------------------------------------------------------------------------------------------------- - class AkkaTypedActorProject(info: ProjectInfo) extends AkkaDefaultProject(info, distPath) { + class AkkaTypedActorProject(info: ProjectInfo) extends AkkaDefaultProject(info) { val aopalliance = Dependencies.aopalliance val aspectwerkz = Dependencies.aspectwerkz val guicey = Dependencies.guicey @@ -324,7 +314,7 @@ class AkkaParentProject(info: ProjectInfo) extends DefaultProject(info) { // akka-remote subproject // ------------------------------------------------------------------------------------------------------------------- - class AkkaRemoteProject(info: ProjectInfo) extends AkkaDefaultProject(info, distPath) { + class AkkaRemoteProject(info: ProjectInfo) extends AkkaDefaultProject(info) { val commons_codec = Dependencies.commons_codec val commons_io = Dependencies.commons_io val guicey = Dependencies.guicey @@ -351,7 +341,7 @@ class AkkaParentProject(info: ProjectInfo) extends DefaultProject(info) { // akka-cluster sub project // ------------------------------------------------------------------------------------------------------------------- - class AkkaClusterProject(info: ProjectInfo) extends AkkaDefaultProject(info, distPath) with MultiJvmTests { + class AkkaClusterProject(info: ProjectInfo) extends AkkaDefaultProject(info) with MultiJvmTests { val bookkeeper = Dependencies.bookkeeper val zookeeper = Dependencies.zookeeper val zookeeperLock = Dependencies.zookeeperLock @@ -376,7 +366,7 @@ class AkkaParentProject(info: ProjectInfo) extends DefaultProject(info) { // akka-http subproject // ------------------------------------------------------------------------------------------------------------------- - class AkkaHttpProject(info: ProjectInfo) extends AkkaDefaultProject(info, distPath) { + class AkkaHttpProject(info: ProjectInfo) extends AkkaDefaultProject(info) { val jsr250 = Dependencies.jsr250 val javax_servlet30 = Dependencies.javax_servlet_30 val jetty = Dependencies.jetty @@ -391,7 +381,7 @@ class AkkaParentProject(info: ProjectInfo) extends DefaultProject(info) { } // ------------------------------------------------------------------------------------------------------------------- - // Examples + // Samples // ------------------------------------------------------------------------------------------------------------------- class AkkaSampleAntsProject(info: ProjectInfo) extends DefaultSpdeProject(info) { @@ -410,13 +400,13 @@ class AkkaParentProject(info: ProjectInfo) extends DefaultProject(info) { } } - class AkkaSampleRemoteProject(info: ProjectInfo) extends AkkaDefaultProject(info, deployPath) + class AkkaSampleRemoteProject(info: ProjectInfo) extends AkkaDefaultProject(info) - class AkkaSampleChatProject(info: ProjectInfo) extends AkkaDefaultProject(info, deployPath) + class AkkaSampleChatProject(info: ProjectInfo) extends AkkaDefaultProject(info) - class AkkaSampleFSMProject(info: ProjectInfo) extends AkkaDefaultProject(info, deployPath) + class AkkaSampleFSMProject(info: ProjectInfo) extends AkkaDefaultProject(info) - class AkkaSampleOsgiProject(info: ProjectInfo) extends AkkaDefaultProject(info, deployPath) with BNDPlugin { + class AkkaSampleOsgiProject(info: ProjectInfo) extends AkkaDefaultProject(info) with BNDPlugin { val osgiCore = Dependencies.osgi_core override protected def bndPrivatePackage = List("sample.osgi.*") override protected def bndBundleActivator = Some("sample.osgi.Activator") @@ -446,9 +436,18 @@ class AkkaParentProject(info: ProjectInfo) extends DefaultProject(info) { // Tutorials // ------------------------------------------------------------------------------------------------------------------- - class AkkaTutorialFirstProject(info: ProjectInfo) extends AkkaDefaultProject(info, deployPath) + class AkkaTutorialFirstProject(info: ProjectInfo) extends AkkaTutorialProject(info) - class AkkaTutorialSecondProject(info: ProjectInfo) extends AkkaDefaultProject(info, deployPath) + class AkkaTutorialSecondProject(info: ProjectInfo) extends AkkaTutorialProject(info) + + class AkkaTutorialProject(info: ProjectInfo) extends AkkaDefaultProject(info) { + def doNothing = task { None } + override def publishLocalAction = doNothing + override def deliverLocalAction = doNothing + override def publishAction = doNothing + override def deliverAction = doNothing + override lazy val publishRelease = doNothing + } class AkkaTutorialsParentProject(info: ProjectInfo) extends ParentProject(info) { override def disableCrossPaths = true @@ -459,17 +458,19 @@ class AkkaParentProject(info: ProjectInfo) extends DefaultProject(info) { lazy val akka_tutorial_second = project("akka-tutorial-second", "akka-tutorial-second", new AkkaTutorialSecondProject(_), akka_actor) - lazy val publishRelease = { - val releaseConfiguration = new DefaultPublishConfiguration(localReleaseRepository, "release", false) - publishTask(publishIvyModule, releaseConfiguration) dependsOn (deliver, publishLocal, makePom) - } + def doNothing = task { None } + override def publishLocalAction = doNothing + override def deliverLocalAction = doNothing + override def publishAction = doNothing + override def deliverAction = doNothing + lazy val publishRelease = doNothing } // ------------------------------------------------------------------------------------------------------------------- // akka-testkit subproject // ------------------------------------------------------------------------------------------------------------------- - class AkkaTestkitProject(info: ProjectInfo) extends AkkaDefaultProject(info, distPath) { + class AkkaTestkitProject(info: ProjectInfo) extends AkkaDefaultProject(info) { val scalatest = Dependencies.scalatest } @@ -477,52 +478,27 @@ class AkkaParentProject(info: ProjectInfo) extends DefaultProject(info) { // akka-actor-tests subproject // ------------------------------------------------------------------------------------------------------------------- - class AkkaActorTestsProject(info: ProjectInfo) extends AkkaDefaultProject(info, distPath) { + class AkkaActorTestsProject(info: ProjectInfo) extends AkkaDefaultProject(info) with AutoCompilerPlugins { // testing val junit = Dependencies.junit val scalatest = Dependencies.scalatest val multiverse_test = Dependencies.multiverse_test // StandardLatch + override def compileOptions = super.compileOptions ++ compileOptions("-P:continuations:enable") } // ------------------------------------------------------------------------------------------------------------------- // akka-slf4j subproject // ------------------------------------------------------------------------------------------------------------------- - class AkkaSlf4jProject(info: ProjectInfo) extends AkkaDefaultProject(info, distPath) { + class AkkaSlf4jProject(info: ProjectInfo) extends AkkaDefaultProject(info) { val slf4j = Dependencies.slf4j } // ------------------------------------------------------------------------------------------------------------------- - // Helpers + // Default project // ------------------------------------------------------------------------------------------------------------------- - def removeDupEntries(paths: PathFinder) = Path.lazyPathFinder { - val mapped = paths.get map { p => (p.relativePath, p) } - (Map() ++ mapped).values.toList - } - - def allArtifacts = { - Path.fromFile(buildScalaInstance.libraryJar) +++ - (removeDupEntries(runClasspath filter ClasspathUtilities.isArchive) +++ - ((outputPath ##) / defaultJarName) +++ - mainResources +++ - mainDependencies.scalaJars +++ - descendents(info.projectPath / "scripts", "run_akka.sh") +++ - descendents(info.projectPath / "scripts", "akka-init-script.sh") +++ - descendents(info.projectPath / "dist", "*.jar") +++ - descendents(info.projectPath / "deploy", "*.jar") +++ - descendents(path("lib") ##, "*.jar") +++ - descendents(configurationPath(Configurations.Compile) ##, "*.jar")) - .filter(jar => // remove redundant libs - !jar.toString.endsWith("stax-api-1.0.1.jar") || - !jar.toString.endsWith("scala-library-2.7.7.jar") - ) - } - - def akkaArtifacts = descendents(info.projectPath / "dist", "*-" + version + ".jar") - - // ------------------------------------------------------------ - class AkkaDefaultProject(info: ProjectInfo, val deployPath: Path) extends DefaultProject(info) with DeployProject with McPom { + class AkkaDefaultProject(info: ProjectInfo) extends DefaultProject(info) with McPom { override def disableCrossPaths = true @@ -552,26 +528,52 @@ class AkkaParentProject(info: ProjectInfo) extends DefaultProject(info) { publishTask(publishIvyModule, releaseConfiguration) dependsOn (deliver, publishLocal, makePom) } } -} -trait DeployProject { self: BasicScalaProject => - // defines where the deployTask copies jars to - def deployPath: Path + // ------------------------------------------------------------------------------------------------------------------- + // Distribution + // ------------------------------------------------------------------------------------------------------------------- - lazy val dist = deployTask(jarPath, packageDocsJar, packageSrcJar, deployPath, true, true, true) dependsOn( - `package`, packageDocs, packageSrc) describedAs("Deploying") + lazy val akkaDist = project("dist", "akka-dist", new AkkaDistParentProject(_)) - def deployTask(jar: Path, docs: Path, src: Path, toDir: Path, - genJar: Boolean, genDocs: Boolean, genSource: Boolean) = task { - def gen(jar: Path, toDir: Path, flag: Boolean, msg: String): Option[String] = - if (flag) { - log.info(msg + " " + jar) - FileUtilities.copyFile(jar, toDir / jar.name, log) - } else None + class AkkaDistParentProject(info: ProjectInfo) extends ParentProject(info) { + lazy val akkaActorsDist = project("actors", "akka-dist-actors", new AkkaActorsDistProject(_), akka_actor) - gen(jar, toDir, genJar, "Deploying bits") orElse - gen(docs, toDir, genDocs, "Deploying docs") orElse - gen(src, toDir, genSource, "Deploying sources") + lazy val akkaCoreDist = project("core", "akka-dist-core", new AkkaCoreDistProject(_), + akkaActorsDist, akka_remote, akka_http, akka_slf4j, akka_testkit, akka_actor_tests) + + def doNothing = task { None } + override def publishLocalAction = doNothing + override def deliverLocalAction = doNothing + override def publishAction = doNothing + override def deliverAction = doNothing + + class AkkaActorsDistProject(info: ProjectInfo) extends DefaultProject(info) with DistDocProject { + def distName = "akka-actors" + override def distDocName = "akka" + + override def distConfigSources = (akkaParent.info.projectPath / "config" ##) * "*" + + override def distAction = super.distAction dependsOn (distTutorials) + + val distTutorialsPath = distDocPath / "tutorials" + + lazy val distTutorials = task { + val tutorials = Set(akka_tutorials.akka_tutorial_first, + akka_tutorials.akka_tutorial_second) + + tutorials.map { tutorial => + val tutorialPath = (tutorial.info.projectPath ##) + val tutorialFilterOut = ((tutorial.outputPath ##) ***) + val tutorialSources = (tutorialPath ***) --- tutorialFilterOut + val tutorialOutputPath = distTutorialsPath / tutorial.name + copyPaths(tutorialSources, tutorialOutputPath) + }.foldLeft(None: Option[String])(_ orElse _) + } dependsOn (distBase) + } + + class AkkaCoreDistProject(info: ProjectInfo)extends DefaultProject(info) with DistProject { + def distName = "akka-core" + } } } diff --git a/project/build/DistProject.scala b/project/build/DistProject.scala new file mode 100644 index 0000000000..de1cb0cdf9 --- /dev/null +++ b/project/build/DistProject.scala @@ -0,0 +1,215 @@ +/** + * Copyright (C) 2009-2011 Scalable Solutions AB + */ + +import sbt._ + +trait DistBaseProject extends DefaultProject { + def distOutputPath: Path + def distLibPath: Path + def distSrcPath: Path + def distDocPath: Path + def dist: Task + + override def disableCrossPaths = true + + def doNothing = task { None } + override def compileAction = doNothing + override def testCompileAction = doNothing + override def testAction = doNothing + override def packageAction = doNothing + override def publishLocalAction = doNothing + override def deliverLocalAction = doNothing + override def publishAction = doNothing + override def deliverAction = doNothing +} + +trait DistProject extends DistBaseProject { + def distName: String + + val distFullName = distName + "-" + version + val distOutputBasePath = outputPath / "dist" + val distOutputPath = (distOutputBasePath ##) / distFullName + val distScalaLibPath = distOutputPath / "lib" + val distBinPath = distOutputPath / "bin" + val distConfigPath = distOutputPath / "config" + val distDeployPath = distOutputPath / "deploy" + val distLibPath = distOutputPath / "lib" / "akka" + val distSrcPath = distOutputPath / "src" / "akka" + val distDocPath = distOutputPath / "doc" / "akka" + val distDocJarsPath = distDocPath / "api" / "jars" + val distSharePath = Path.userHome / ".ivy2" / "dist" / distFullName + val distArchiveName = distFullName + ".zip" + val distArchive = (distOutputBasePath ##) / distArchiveName + + val distExclusiveOutputBasePath = distOutputBasePath / "exclusive" + val distExclusiveOutputPath = (distExclusiveOutputBasePath ##) / distFullName + val distExclusiveArchive = (distExclusiveOutputBasePath ##) / distArchiveName + + def distConfigSources = ((info.projectPath / "config" ##) ***) + def distScriptSources = ((info.projectPath / "scripts" ##) ***) + + def distAlwaysExclude(path: Path) = path.name == "scala-library.jar" + def distAlwaysInclude(path: Path) = distConfigSources.get.toList.map(_.name).contains(path.name) + + def scalaDependency = buildLibraryJar + + def allProjectDependencies = topologicalSort.dropRight(1) + + def distDependencies = { + allProjectDependencies.flatMap( p => p match { + case adp: DistBaseProject => Some(adp) + case _ => None + }) + } + + def distClasspath = runClasspath + + def dependencyJars(filter: Path => Boolean) = distClasspath.filter(filter) + + def isJar(path: Path) = path.name.endsWith(".jar") + + def isSrcJar(path: Path) = isJar(path) && path.name.contains("-sources") + + def isDocJar(path: Path) = isJar(path) && path.name.contains("-docs") + + def isClassJar(path: Path) = isJar(path) && !isSrcJar(path) && !isDocJar(path) + + def projectDependencies = allProjectDependencies -- distDependencies + + def projectDependencyJars(f: PackagePaths => Path) = { + Path.lazyPathFinder { + projectDependencies.flatMap( p => p match { + case pp: PackagePaths => Some(f(pp)) + case _ => None + }) + } + } + + def distLibs = dependencyJars(isClassJar) +++ projectDependencyJars(_.jarPath) + + def distSrcJars = dependencyJars(isSrcJar) +++ projectDependencyJars(_.packageSrcJar) + + def distDocJars = dependencyJars(isDocJar) +++ projectDependencyJars(_.packageDocsJar) + + def distShareSources = ((distOutputPath ##) ***) + + lazy val dist = (distAction dependsOn (distBase, `package`, packageSrc, packageDocs) + describedAs("Create a distribution.")) + + def distAction = task { + def exclusiveDist = { + val excludePaths = (distDependencies.map(p => ((p.distOutputPath ##) ***)) + .foldLeft(Path.emptyPathFinder)(_ +++ _)) + val excludeRelativePaths = excludePaths.get.toList.map(_.relativePath) + val allDistPaths = ((distOutputPath ##) ***) + val includePaths = allDistPaths.filter(path => { + distAlwaysInclude(path) || !(distAlwaysExclude(path) || excludeRelativePaths.contains(path.relativePath)) + }) + copyPaths(includePaths, distExclusiveOutputPath) orElse + FileUtilities.zip(List(distExclusiveOutputPath), distExclusiveArchive, true, log) + } + + copyFiles(scalaDependency, distScalaLibPath) orElse + copyFiles(distLibs, distLibPath) orElse + copyFiles(distSrcJars, distSrcPath) orElse + copyFiles(distDocJars, distDocJarsPath) orElse + copyPaths(distConfigSources, distConfigPath) orElse + copyScripts(distScriptSources, distBinPath) orElse + copyPaths(distShareSources, distSharePath) orElse + FileUtilities.zip(List(distOutputPath), distArchive, true, log) orElse + exclusiveDist + } + + lazy val distBase = distBaseAction dependsOn (distClean) describedAs "Create the dist base." + + def distBaseAction = task { + distDependencies.map( dist => { + val allFiles = ((dist.distOutputPath ##) ***) + copyPaths(allFiles, distOutputPath) + }).foldLeft(None: Option[String])(_ orElse _) + } + + def distDependencyTasks: Seq[ManagedTask] = distDependencies.map(_.dist) + + lazy val distClean = (distCleanAction dependsOn (distDependencyTasks: _*) + describedAs "Clean the dist target dir.") + + def distCleanAction = task { + FileUtilities.clean(distOutputPath, log) orElse + FileUtilities.clean(distSharePath, log) + } + + def copyFiles(from: PathFinder, to: Path): Option[String] = { + if (from.get.isEmpty) None + else FileUtilities.copyFlat(from.get, to, log).left.toOption + } + + def copyPaths(from: PathFinder, to: Path): Option[String] = { + if (from.get.isEmpty) None + else FileUtilities.copy(from.get, to, true, log).left.toOption + } + + def copyScripts(from: PathFinder, to: Path): Option[String] = { + from.get.map { script => + val target = to / script.name + FileUtilities.copyFile(script, target, log) orElse + setExecutable(target, script.asFile.canExecute) + }.foldLeft(None: Option[String])(_ orElse _) + } + + def setExecutable(target: Path, executable: Boolean): Option[String] = { + val success = target.asFile.setExecutable(executable, false) + if (success) None else Some("Couldn't set permissions of " + target) + } +} + +trait DistDocProject extends DistProject { + def distDocName = distName + + def findDocParent(project: Project): DocParentProject = project.info.parent match { + case Some(dpp: DocParentProject) => dpp + case Some(p: Project) => findDocParent(p) + case _ => error("Parent project is not a DocParentProject") + } + + def docParent = findDocParent(this) + + override def distAction = super.distAction dependsOn (distApi, distDocs) + + val apiSources = ((docParent.apiOutputPath ##) ***) + val apiPath = distDocPath / "api" / "html" / distDocName + + lazy val distApi = task { + copyPaths(apiSources, apiPath) + } dependsOn (distBase, docParent.api) + + val docsBuildPath = docParent.docsPath / "_build" + val docsHtmlSources = ((docsBuildPath / "html" ##) ***) + val docsPdfSources = (docsBuildPath / "latex" ##) ** "*.pdf" + + val docsOutputPath = distDocPath / "docs" + val docsHtmlPath = docsOutputPath / "html" / distDocName + val docsPdfPath = docsOutputPath / "pdf" + + lazy val distDocs = task { + copyPaths(docsHtmlSources, docsHtmlPath) orElse + copyPaths(docsPdfSources, docsPdfPath) + } dependsOn (distBase, docParent.docs) +} + +/* + * For wiring together akka and akka-modules. + */ +trait DistSharedProject extends DistBaseProject { + def distName: String + + val distFullName = distName + "-" + version + val distOutputPath = Path.userHome / ".ivy2" / "dist" / distFullName + + val distLibPath = distOutputPath / "lib" / "akka" + val distSrcPath = distOutputPath / "src" / "akka" + val distDocPath = distOutputPath / "doc" / "akka" + + lazy val dist = task { None } +} diff --git a/project/build/DocParentProject.scala b/project/build/DocParentProject.scala new file mode 100644 index 0000000000..1e7bf0266c --- /dev/null +++ b/project/build/DocParentProject.scala @@ -0,0 +1,49 @@ +/** + * Copyright (C) 2009-2011 Scalable Solutions AB + */ + +import sbt._ + +trait DocParentProject extends ParentProject { + def apiOutputPath = outputPath / "doc" / "main" / "api" + + def apiProjectDependencies = topologicalSort.dropRight(1) + + def apiMainSources = + apiProjectDependencies.map { + case sp: ScalaPaths => sp.mainSources + case _ => Path.emptyPathFinder + }.foldLeft(Path.emptyPathFinder)(_ +++ _) + + def apiCompileClasspath = + apiProjectDependencies.map { + case bsp: BasicScalaProject => bsp.compileClasspath + case _ => Path.emptyPathFinder + }.foldLeft(Path.emptyPathFinder)(_ +++ _) + + def apiLabel = "main" + + def apiMaxErrors = 100 + + def apiOptions: Seq[String] = Seq.empty + + lazy val api = apiAction dependsOn (doc) describedAs ("Create combined scaladoc for all subprojects.") + + def apiAction = task { + val scaladoc = new Scaladoc(apiMaxErrors, buildCompiler) + scaladoc(apiLabel, apiMainSources.get, apiCompileClasspath.get, apiOutputPath, apiOptions, log) + } + + lazy val doc = task { None } // dummy task + + val docsPath = info.projectPath / "akka-docs" + + lazy val docs = docsAction describedAs ("Create the reStructuredText documentation.") + + def docsAction = task { + import Process._ + log.info("Building docs...") + val exitCode = ((new java.lang.ProcessBuilder("make", "clean", "html", "pdf")) directory docsPath.asFile) ! log + if (exitCode > 0) Some("Failed to build docs.") else None + } +} diff --git a/project/scripts/find-replace.sh b/project/scripts/find-replace.sh index e399ce3116..884ff53a2a 100644 --- a/project/scripts/find-replace.sh +++ b/project/scripts/find-replace.sh @@ -22,7 +22,7 @@ echo "Find and replace: $FIND --> $REPLACE" # Exclude directories from search -excludedirs=".git dist deploy embedded-repo lib_managed project/boot project/scripts src_managed target" +excludedirs=".git dist deploy embedded-repo lib_managed project/boot project/scripts src_managed target akka-docs" echo "Excluding directories: $excludedirs" @@ -37,7 +37,10 @@ excludeopts="${excludeopts} \) -prune -o" # Replace in files -search="find . -type f ${excludeopts} -print0 | xargs -0 grep -Il \"${FIND}\"" +search="find . ${excludeopts} -type f -print0 | xargs -0 grep -Il \"${FIND}\"" + +echo $search +echo files=$(eval "$search") @@ -61,7 +64,10 @@ echo # Replace in file names -search="find . -type f ${excludeopts} -name \"*${FIND}*\" -print0" +search="find . ${excludeopts} -type f -name \"*${FIND}*\" -print" + +echo $search +echo files=$(eval "$search") diff --git a/project/scripts/release b/project/scripts/release index 7de585dff8..2fd97d8693 100644 --- a/project/scripts/release +++ b/project/scripts/release @@ -1,12 +1,8 @@ +sh git checkout -b releasing-{{release.arg1}} clean -sh rm -rf dist -sh rm -rf deploy script find-replace.sh {{project.version}} {{release.arg1}} script find-replace.sh //[[:space:]]*release:[[:space:]]* reload build-release sh git commit -am 'Update version for release {{project.version}}' sh git tag -m 'Version {{project.version}}' v{{project.version}} -sh git revert -n HEAD -sh git commit -am 'Restoring SNAPSHOT version' -reload diff --git a/project/scripts/test-release b/project/scripts/test-release index 6a5994065a..4e821f15f5 100644 --- a/project/scripts/test-release +++ b/project/scripts/test-release @@ -1,6 +1,4 @@ clean -sh rm -rf dist -sh rm -rf deploy clean-lib script find-replace.sh {{project.version}} {{test-release.arg1}} script find-replace.sh //[[:space:]]*release:[[:space:]]* diff --git a/scripts/run_akka.sh b/scripts/run_akka.sh index a58c021482..1d7ce97c21 100755 --- a/scripts/run_akka.sh +++ b/scripts/run_akka.sh @@ -1,6 +1,6 @@ #!/bin/bash cd $AKKA_HOME -VERSION=1.1-SNAPSHOT +VERSION=1.2-SNAPSHOT TARGET_DIR=dist/$VERSION/$1 shift 1 VMARGS=$@ @@ -13,4 +13,4 @@ else fi export AKKA_HOME=`pwd` -java -jar ${VMARGS} ${VERSION}.jar \ No newline at end of file +java -jar ${VMARGS} ${VERSION}.jar