Merge branch 'master' into wip-testkit
This commit is contained in:
commit
85daa9f8e2
218 changed files with 18876 additions and 2364 deletions
3
.gitignore
vendored
3
.gitignore
vendored
|
|
@ -45,3 +45,6 @@ run-codefellow
|
|||
multiverse.log
|
||||
.eprj
|
||||
.*.swp
|
||||
akka-docs/_build/
|
||||
akka-tutorials/akka-tutorial-first/project/boot/
|
||||
akka-tutorials/akka-tutorial-first/project/plugins/project/
|
||||
|
|
@ -0,0 +1,40 @@
|
|||
/**
|
||||
* Copyright (C) 2009-2011 Scalable Solutions AB <http://scalablesolutions.se>
|
||||
*/
|
||||
|
||||
package akka.testing
|
||||
|
||||
import akka.util.Duration
|
||||
import java.util.concurrent.{CyclicBarrier, TimeUnit, TimeoutException}
|
||||
|
||||
|
||||
class TestBarrierTimeoutException(message: String) extends RuntimeException(message)
|
||||
|
||||
/**
|
||||
* A cyclic barrier wrapper for use in testing.
|
||||
* It always uses a timeout when waiting and timeouts are specified as durations.
|
||||
* Timeouts will always throw an exception. The default timeout is 5 seconds.
|
||||
* Timeouts are multiplied by the testing time factor for Jenkins builds.
|
||||
*/
|
||||
object TestBarrier {
|
||||
val DefaultTimeout = Duration(5, TimeUnit.SECONDS)
|
||||
|
||||
def apply(count: Int) = new TestBarrier(count)
|
||||
}
|
||||
|
||||
class TestBarrier(count: Int) {
|
||||
private val barrier = new CyclicBarrier(count)
|
||||
|
||||
def await(): Unit = await(TestBarrier.DefaultTimeout)
|
||||
|
||||
def await(timeout: Duration): Unit = {
|
||||
try {
|
||||
barrier.await(Testing.testTime(timeout.toNanos), TimeUnit.NANOSECONDS)
|
||||
} catch {
|
||||
case e: TimeoutException =>
|
||||
throw new TestBarrierTimeoutException("Timeout of %s and time factor of %s" format (timeout.toString, Testing.timeFactor))
|
||||
}
|
||||
}
|
||||
|
||||
def reset = barrier.reset
|
||||
}
|
||||
55
akka-actor-tests/src/main/scala/akka/testing/TestLatch.scala
Normal file
55
akka-actor-tests/src/main/scala/akka/testing/TestLatch.scala
Normal file
|
|
@ -0,0 +1,55 @@
|
|||
/**
|
||||
* Copyright (C) 2009-2011 Scalable Solutions AB <http://scalablesolutions.se>
|
||||
*/
|
||||
|
||||
package akka.testing
|
||||
|
||||
import akka.util.Duration
|
||||
import java.util.concurrent.{CountDownLatch, TimeUnit}
|
||||
|
||||
|
||||
class TestLatchTimeoutException(message: String) extends RuntimeException(message)
|
||||
class TestLatchNoTimeoutException(message: String) extends RuntimeException(message)
|
||||
|
||||
/**
|
||||
* A count down latch wrapper for use in testing.
|
||||
* It always uses a timeout when waiting and timeouts are specified as durations.
|
||||
* There's a default timeout of 5 seconds and the default count is 1.
|
||||
* Timeouts will always throw an exception (no need to wrap in assert in tests).
|
||||
* Timeouts are multiplied by the testing time factor for Jenkins builds.
|
||||
*/
|
||||
object TestLatch {
|
||||
val DefaultTimeout = Duration(5, TimeUnit.SECONDS)
|
||||
|
||||
def apply(count: Int = 1) = new TestLatch(count)
|
||||
}
|
||||
|
||||
class TestLatch(count: Int = 1) {
|
||||
private var latch = new CountDownLatch(count)
|
||||
|
||||
def countDown() = latch.countDown()
|
||||
|
||||
def open() = countDown()
|
||||
|
||||
def await(): Boolean = await(TestLatch.DefaultTimeout)
|
||||
|
||||
def await(timeout: Duration): Boolean = {
|
||||
val opened = latch.await(Testing.testTime(timeout.toNanos), TimeUnit.NANOSECONDS)
|
||||
if (!opened) throw new TestLatchTimeoutException(
|
||||
"Timeout of %s with time factor of %s" format (timeout.toString, Testing.timeFactor))
|
||||
opened
|
||||
}
|
||||
|
||||
/**
|
||||
* Timeout is expected. Throws exception if latch is opened before timeout.
|
||||
*/
|
||||
def awaitTimeout(timeout: Duration = TestLatch.DefaultTimeout) = {
|
||||
val opened = latch.await(Testing.testTime(timeout.toNanos), TimeUnit.NANOSECONDS)
|
||||
if (opened) throw new TestLatchNoTimeoutException(
|
||||
"Latch opened before timeout of %s with time factor of %s" format (timeout.toString, Testing.timeFactor))
|
||||
opened
|
||||
}
|
||||
|
||||
def reset() = latch = new CountDownLatch(count)
|
||||
}
|
||||
|
||||
33
akka-actor-tests/src/main/scala/akka/testing/Testing.scala
Normal file
33
akka-actor-tests/src/main/scala/akka/testing/Testing.scala
Normal file
|
|
@ -0,0 +1,33 @@
|
|||
/**
|
||||
* Copyright (C) 2009-2011 Scalable Solutions AB <http://scalablesolutions.se>
|
||||
*/
|
||||
|
||||
package akka.testing
|
||||
|
||||
import akka.util.Duration
|
||||
|
||||
/**
|
||||
* Multiplying numbers used in test timeouts by a factor, set by system property.
|
||||
* Useful for Jenkins builds (where the machine may need more time).
|
||||
*/
|
||||
object Testing {
|
||||
val timeFactor: Double = {
|
||||
val factor = System.getProperty("akka.test.timefactor", "1.0")
|
||||
try {
|
||||
factor.toDouble
|
||||
} catch {
|
||||
case e: java.lang.NumberFormatException => 1.0
|
||||
}
|
||||
}
|
||||
|
||||
def testTime(t: Int): Int = (timeFactor * t).toInt
|
||||
def testTime(t: Long): Long = (timeFactor * t).toLong
|
||||
def testTime(t: Float): Float = (timeFactor * t).toFloat
|
||||
def testTime(t: Double): Double = timeFactor * t
|
||||
|
||||
def testSeconds(duration: Duration) = testTime(duration.toSeconds)
|
||||
def testMillis(duration: Duration) = testTime(duration.toMillis)
|
||||
def testNanos(duration: Duration) = testTime(duration.toNanos)
|
||||
|
||||
def sleepFor(duration: Duration) = Thread.sleep(testTime(duration.toMillis))
|
||||
}
|
||||
|
|
@ -9,13 +9,15 @@ import java.util.List;
|
|||
import static akka.config.Supervision.*;
|
||||
|
||||
public class SupervisionConfig {
|
||||
/*Just some sample code to demonstrate the declarative supervision configuration for Java */
|
||||
/*Just some sample code to demonstrate the declarative supervision configuration for Java */
|
||||
@SuppressWarnings("unchecked")
|
||||
public SupervisorConfig createSupervisorConfig(List<ActorRef> toSupervise) {
|
||||
ArrayList<Server> targets = new ArrayList<Server>(toSupervise.size());
|
||||
for(ActorRef ref : toSupervise) {
|
||||
targets.add(new Supervise(ref, permanent(), true));
|
||||
}
|
||||
|
||||
return new SupervisorConfig(new AllForOneStrategy(new Class[] { Exception.class },50,1000), targets.toArray(new Server[0]));
|
||||
|
||||
return new SupervisorConfig(new AllForOneStrategy(new Class[] { Exception.class }, 50, 1000), targets.toArray(new Server[targets.size()]));
|
||||
}
|
||||
}
|
||||
|
|
@ -0,0 +1,97 @@
|
|||
/**
|
||||
* Copyright (C) 2009-2011 Scalable Solutions AB <http://scalablesolutions.se>
|
||||
*/
|
||||
|
||||
package akka.actor
|
||||
|
||||
import org.scalatest.WordSpec
|
||||
import org.scalatest.matchers.MustMatchers
|
||||
import org.scalatest.BeforeAndAfterEach
|
||||
|
||||
import akka.testing._
|
||||
import akka.testing.Testing.sleepFor
|
||||
import akka.util.duration._
|
||||
|
||||
import Actor._
|
||||
import akka.config.Supervision._
|
||||
import akka.dispatch.Dispatchers
|
||||
|
||||
|
||||
object ActorFireForgetRequestReplySpec {
|
||||
|
||||
class ReplyActor extends Actor {
|
||||
def receive = {
|
||||
case "Send" =>
|
||||
self.reply("Reply")
|
||||
case "SendImplicit" =>
|
||||
self.sender.get ! "ReplyImplicit"
|
||||
}
|
||||
}
|
||||
|
||||
class CrashingTemporaryActor extends Actor {
|
||||
self.lifeCycle = Temporary
|
||||
|
||||
def receive = {
|
||||
case "Die" =>
|
||||
state.finished.await
|
||||
throw new Exception("Expected exception")
|
||||
}
|
||||
}
|
||||
|
||||
class SenderActor(replyActor: ActorRef) extends Actor {
|
||||
def receive = {
|
||||
case "Init" =>
|
||||
replyActor ! "Send"
|
||||
case "Reply" => {
|
||||
state.s = "Reply"
|
||||
state.finished.await
|
||||
}
|
||||
case "InitImplicit" => replyActor ! "SendImplicit"
|
||||
case "ReplyImplicit" => {
|
||||
state.s = "ReplyImplicit"
|
||||
state.finished.await
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
object state {
|
||||
var s = "NIL"
|
||||
val finished = TestBarrier(2)
|
||||
}
|
||||
}
|
||||
|
||||
class ActorFireForgetRequestReplySpec extends WordSpec with MustMatchers with BeforeAndAfterEach {
|
||||
import ActorFireForgetRequestReplySpec._
|
||||
|
||||
override def beforeEach() = {
|
||||
state.finished.reset
|
||||
}
|
||||
|
||||
"An Actor" must {
|
||||
|
||||
"reply to bang message using reply" in {
|
||||
val replyActor = actorOf[ReplyActor].start()
|
||||
val senderActor = actorOf(new SenderActor(replyActor)).start()
|
||||
senderActor ! "Init"
|
||||
state.finished.await
|
||||
state.s must be ("Reply")
|
||||
}
|
||||
|
||||
"reply to bang message using implicit sender" in {
|
||||
val replyActor = actorOf[ReplyActor].start()
|
||||
val senderActor = actorOf(new SenderActor(replyActor)).start()
|
||||
senderActor ! "InitImplicit"
|
||||
state.finished.await
|
||||
state.s must be ("ReplyImplicit")
|
||||
}
|
||||
|
||||
"should shutdown crashed temporary actor" in {
|
||||
val actor = actorOf[CrashingTemporaryActor].start()
|
||||
actor.isRunning must be (true)
|
||||
actor ! "Die"
|
||||
state.finished.await
|
||||
sleepFor(1 second)
|
||||
actor.isShutdown must be (true)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
@ -0,0 +1,177 @@
|
|||
/**
|
||||
* Copyright (C) 2009-2011 Scalable Solutions AB <http://scalablesolutions.se>
|
||||
*/
|
||||
|
||||
package akka.actor
|
||||
|
||||
import org.scalatest.WordSpec
|
||||
import org.scalatest.matchers.MustMatchers
|
||||
|
||||
import akka.testing._
|
||||
import akka.util.duration._
|
||||
import akka.testing.Testing.sleepFor
|
||||
import akka.config.Supervision.{OneForOneStrategy}
|
||||
import akka.actor._
|
||||
import akka.dispatch.Future
|
||||
import java.util.concurrent.{TimeUnit, CountDownLatch}
|
||||
|
||||
object ActorRefSpec {
|
||||
|
||||
val latch = TestLatch(4)
|
||||
|
||||
class ReplyActor extends Actor {
|
||||
var replyTo: Channel[Any] = null
|
||||
|
||||
def receive = {
|
||||
case "complexRequest" => {
|
||||
replyTo = self.channel
|
||||
val worker = Actor.actorOf[WorkerActor].start()
|
||||
worker ! "work"
|
||||
}
|
||||
case "complexRequest2" =>
|
||||
val worker = Actor.actorOf[WorkerActor].start()
|
||||
worker ! self.channel
|
||||
case "workDone" => replyTo ! "complexReply"
|
||||
case "simpleRequest" => self.reply("simpleReply")
|
||||
}
|
||||
}
|
||||
|
||||
class WorkerActor() extends Actor {
|
||||
def receive = {
|
||||
case "work" => {
|
||||
work
|
||||
self.reply("workDone")
|
||||
self.stop()
|
||||
}
|
||||
case replyTo: Channel[Any] => {
|
||||
work
|
||||
replyTo ! "complexReply"
|
||||
}
|
||||
}
|
||||
|
||||
private def work {
|
||||
sleepFor(1 second)
|
||||
}
|
||||
}
|
||||
|
||||
class SenderActor(replyActor: ActorRef) extends Actor {
|
||||
|
||||
def receive = {
|
||||
case "complex" => replyActor ! "complexRequest"
|
||||
case "complex2" => replyActor ! "complexRequest2"
|
||||
case "simple" => replyActor ! "simpleRequest"
|
||||
case "complexReply" => {
|
||||
latch.countDown()
|
||||
}
|
||||
case "simpleReply" => {
|
||||
latch.countDown()
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
class ActorRefSpec extends WordSpec with MustMatchers {
|
||||
import ActorRefSpec._
|
||||
|
||||
"An ActorRef" must {
|
||||
|
||||
"not allow Actors to be created outside of an actorOf" in {
|
||||
intercept[akka.actor.ActorInitializationException] {
|
||||
new Actor { def receive = { case _ => } }
|
||||
fail("shouldn't get here")
|
||||
}
|
||||
|
||||
intercept[akka.actor.ActorInitializationException] {
|
||||
val a = Actor.actorOf(new Actor {
|
||||
val nested = new Actor { def receive = { case _ => } }
|
||||
def receive = { case _ => }
|
||||
}).start()
|
||||
fail("shouldn't get here")
|
||||
}
|
||||
}
|
||||
|
||||
"support nested actorOfs" in {
|
||||
val a = Actor.actorOf(new Actor {
|
||||
val nested = Actor.actorOf(new Actor { def receive = { case _ => } }).start()
|
||||
def receive = { case _ => self reply nested }
|
||||
}).start()
|
||||
|
||||
val nested = (a !! "any").get.asInstanceOf[ActorRef]
|
||||
a must not be null
|
||||
nested must not be null
|
||||
(a ne nested) must be === true
|
||||
}
|
||||
|
||||
"support reply via channel" in {
|
||||
val serverRef = Actor.actorOf[ReplyActor].start()
|
||||
val clientRef = Actor.actorOf(new SenderActor(serverRef)).start()
|
||||
|
||||
clientRef ! "complex"
|
||||
clientRef ! "simple"
|
||||
clientRef ! "simple"
|
||||
clientRef ! "simple"
|
||||
|
||||
latch.await
|
||||
|
||||
latch.reset
|
||||
|
||||
clientRef ! "complex2"
|
||||
clientRef ! "simple"
|
||||
clientRef ! "simple"
|
||||
clientRef ! "simple"
|
||||
|
||||
latch.await
|
||||
|
||||
clientRef.stop()
|
||||
serverRef.stop()
|
||||
}
|
||||
|
||||
"stop when sent a poison pill" in {
|
||||
val ref = Actor.actorOf(
|
||||
new Actor {
|
||||
def receive = {
|
||||
case 5 => self reply_? "five"
|
||||
case null => self reply_? "null"
|
||||
}
|
||||
}
|
||||
).start()
|
||||
|
||||
val ffive: Future[String] = ref !!! 5
|
||||
val fnull: Future[String] = ref !!! null
|
||||
|
||||
intercept[ActorKilledException] {
|
||||
ref !! PoisonPill
|
||||
fail("shouldn't get here")
|
||||
}
|
||||
|
||||
ffive.resultOrException.get must be ("five")
|
||||
fnull.resultOrException.get must be ("null")
|
||||
|
||||
ref.isRunning must be (false)
|
||||
ref.isShutdown must be (true)
|
||||
}
|
||||
|
||||
"restart when Kill:ed" in {
|
||||
val latch = new CountDownLatch(2)
|
||||
|
||||
val boss = Actor.actorOf(new Actor{
|
||||
self.faultHandler = OneForOneStrategy(List(classOf[Throwable]), scala.Some(2), scala.Some(1000))
|
||||
|
||||
val ref = Actor.actorOf(
|
||||
new Actor {
|
||||
def receive = { case _ => }
|
||||
override def preRestart(reason: Throwable) = latch.countDown()
|
||||
override def postRestart(reason: Throwable) = latch.countDown()
|
||||
}
|
||||
).start()
|
||||
|
||||
self link ref
|
||||
|
||||
protected def receive = { case "sendKill" => ref ! Kill }
|
||||
}).start()
|
||||
|
||||
boss ! "sendKill"
|
||||
latch.await(5, TimeUnit.SECONDS) must be === true
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
@ -28,7 +28,7 @@ object Chameneos {
|
|||
|
||||
class Chameneo(var mall: ActorRef, var colour: Colour, cid: Int) extends Actor {
|
||||
var meetings = 0
|
||||
self.start
|
||||
self.start()
|
||||
mall ! Meet(self, colour)
|
||||
|
||||
def receive = {
|
||||
|
|
@ -88,7 +88,7 @@ object Chameneos {
|
|||
sumMeetings += i
|
||||
if (numFaded == numChameneos) {
|
||||
Chameneos.end = System.currentTimeMillis
|
||||
self.stop
|
||||
self.stop()
|
||||
}
|
||||
|
||||
case msg @ Meet(a, c) =>
|
||||
|
|
@ -110,7 +110,7 @@ object Chameneos {
|
|||
def run {
|
||||
// System.setProperty("akka.config", "akka.conf")
|
||||
Chameneos.start = System.currentTimeMillis
|
||||
actorOf(new Mall(1000000, 4)).start
|
||||
actorOf(new Mall(1000000, 4)).start()
|
||||
Thread.sleep(10000)
|
||||
println("Elapsed: " + (end - start))
|
||||
}
|
||||
|
|
@ -0,0 +1,148 @@
|
|||
/**
|
||||
* Copyright (C) 2009-2011 Scalable Solutions AB <http://scalablesolutions.se>
|
||||
*/
|
||||
|
||||
package akka.actor
|
||||
|
||||
import org.scalatest.WordSpec
|
||||
import org.scalatest.matchers.MustMatchers
|
||||
|
||||
import akka.testing._
|
||||
|
||||
import FSM._
|
||||
import akka.util.Duration
|
||||
import akka.util.duration._
|
||||
|
||||
|
||||
object FSMActorSpec {
|
||||
|
||||
val unlockedLatch = TestLatch()
|
||||
val lockedLatch = TestLatch()
|
||||
val unhandledLatch = TestLatch()
|
||||
val terminatedLatch = TestLatch()
|
||||
val transitionLatch = TestLatch()
|
||||
val initialStateLatch = TestLatch()
|
||||
val transitionCallBackLatch = TestLatch()
|
||||
|
||||
sealed trait LockState
|
||||
case object Locked extends LockState
|
||||
case object Open extends LockState
|
||||
|
||||
class Lock(code: String, timeout: Duration) extends Actor with FSM[LockState, CodeState] {
|
||||
|
||||
startWith(Locked, CodeState("", code))
|
||||
|
||||
when(Locked) {
|
||||
case Event(digit: Char, CodeState(soFar, code)) => {
|
||||
soFar + digit match {
|
||||
case incomplete if incomplete.length < code.length =>
|
||||
stay using CodeState(incomplete, code)
|
||||
case codeTry if (codeTry == code) => {
|
||||
doUnlock
|
||||
goto(Open) using CodeState("", code) forMax timeout
|
||||
}
|
||||
case wrong => {
|
||||
stay using CodeState("", code)
|
||||
}
|
||||
}
|
||||
}
|
||||
case Event("hello", _) => stay replying "world"
|
||||
case Event("bye", _) => stop(Shutdown)
|
||||
}
|
||||
|
||||
when(Open) {
|
||||
case Event(StateTimeout, _) => {
|
||||
doLock
|
||||
goto(Locked)
|
||||
}
|
||||
}
|
||||
|
||||
whenUnhandled {
|
||||
case Event(_, stateData) => {
|
||||
unhandledLatch.open
|
||||
stay
|
||||
}
|
||||
}
|
||||
|
||||
onTransition {
|
||||
case Locked -> Open => transitionLatch.open
|
||||
}
|
||||
|
||||
// verify that old-style does still compile
|
||||
onTransition (transitionHandler _)
|
||||
|
||||
def transitionHandler(from: LockState, to: LockState) = {
|
||||
// dummy
|
||||
}
|
||||
|
||||
onTermination {
|
||||
case StopEvent(Shutdown, Locked, _) =>
|
||||
// stop is called from lockstate with shutdown as reason...
|
||||
terminatedLatch.open
|
||||
}
|
||||
|
||||
// initialize the lock
|
||||
initialize
|
||||
|
||||
private def doLock() {
|
||||
lockedLatch.open
|
||||
}
|
||||
|
||||
private def doUnlock = {
|
||||
unlockedLatch.open
|
||||
}
|
||||
}
|
||||
|
||||
case class CodeState(soFar: String, code: String)
|
||||
}
|
||||
|
||||
class FSMActorSpec extends WordSpec with MustMatchers {
|
||||
import FSMActorSpec._
|
||||
|
||||
"An FSM Actor" must {
|
||||
|
||||
"unlock the lock" in {
|
||||
|
||||
// lock that locked after being open for 1 sec
|
||||
val lock = Actor.actorOf(new Lock("33221", 1 second)).start()
|
||||
|
||||
val transitionTester = Actor.actorOf(new Actor { def receive = {
|
||||
case Transition(_, _, _) => transitionCallBackLatch.open
|
||||
case CurrentState(_, Locked) => initialStateLatch.open
|
||||
}}).start()
|
||||
|
||||
lock ! SubscribeTransitionCallBack(transitionTester)
|
||||
initialStateLatch.await
|
||||
|
||||
lock ! '3'
|
||||
lock ! '3'
|
||||
lock ! '2'
|
||||
lock ! '2'
|
||||
lock ! '1'
|
||||
|
||||
unlockedLatch.await
|
||||
transitionLatch.await
|
||||
transitionCallBackLatch.await
|
||||
lockedLatch.await
|
||||
|
||||
lock ! "not_handled"
|
||||
unhandledLatch.await
|
||||
|
||||
val answerLatch = TestLatch()
|
||||
object Hello
|
||||
object Bye
|
||||
val tester = Actor.actorOf(new Actor {
|
||||
protected def receive = {
|
||||
case Hello => lock ! "hello"
|
||||
case "world" => answerLatch.open
|
||||
case Bye => lock ! "bye"
|
||||
}
|
||||
}).start()
|
||||
tester ! Hello
|
||||
answerLatch.await
|
||||
|
||||
tester ! Bye
|
||||
terminatedLatch.await
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
@ -1,20 +1,17 @@
|
|||
package akka.actor
|
||||
|
||||
import akka.testkit.TestKit
|
||||
import akka.util.duration._
|
||||
|
||||
import org.scalatest.WordSpec
|
||||
import org.scalatest.matchers.MustMatchers
|
||||
|
||||
class FSMTimingSpec
|
||||
extends WordSpec
|
||||
with MustMatchers
|
||||
with TestKit {
|
||||
import akka.testkit.TestKit
|
||||
import akka.util.duration._
|
||||
|
||||
|
||||
class FSMTimingSpec extends WordSpec with MustMatchers with TestKit {
|
||||
import FSMTimingSpec._
|
||||
import FSM._
|
||||
|
||||
val fsm = Actor.actorOf(new StateMachine(testActor)).start
|
||||
val fsm = Actor.actorOf(new StateMachine(testActor)).start()
|
||||
fsm ! SubscribeTransitionCallBack(testActor)
|
||||
expectMsg(200 millis, CurrentState(fsm, Initial))
|
||||
|
||||
|
|
@ -140,4 +137,3 @@ object FSMTimingSpec {
|
|||
|
||||
}
|
||||
|
||||
// vim: set ts=2 sw=2 et:
|
||||
|
|
@ -1,22 +1,29 @@
|
|||
/**
|
||||
* Copyright (C) 2009-2011 Scalable Solutions AB <http://scalablesolutions.se>
|
||||
*/
|
||||
|
||||
package akka.actor
|
||||
|
||||
import java.util.concurrent.{TimeUnit, CountDownLatch}
|
||||
import org.scalatest.junit.JUnitSuite
|
||||
import org.junit.Test
|
||||
import org.scalatest.WordSpec
|
||||
import org.scalatest.matchers.MustMatchers
|
||||
|
||||
import akka.testing._
|
||||
import akka.util.duration._
|
||||
|
||||
import Actor._
|
||||
|
||||
|
||||
object ForwardActorSpec {
|
||||
object ForwardState {
|
||||
var sender: Option[ActorRef] = None
|
||||
}
|
||||
|
||||
class ReceiverActor extends Actor {
|
||||
val latch = new CountDownLatch(1)
|
||||
val latch = TestLatch()
|
||||
def receive = {
|
||||
case "SendBang" => {
|
||||
ForwardState.sender = self.sender
|
||||
latch.countDown
|
||||
latch.countDown()
|
||||
}
|
||||
case "SendBangBang" => self.reply("SendBangBang")
|
||||
}
|
||||
|
|
@ -25,7 +32,7 @@ object ForwardActorSpec {
|
|||
|
||||
class ForwardActor extends Actor {
|
||||
val receiverActor = actorOf[ReceiverActor]
|
||||
receiverActor.start
|
||||
receiverActor.start()
|
||||
def receive = {
|
||||
case "SendBang" => receiverActor.forward("SendBang")
|
||||
case "SendBangBang" => receiverActor.forward("SendBangBang")
|
||||
|
|
@ -34,7 +41,7 @@ object ForwardActorSpec {
|
|||
|
||||
class BangSenderActor extends Actor {
|
||||
val forwardActor = actorOf[ForwardActor]
|
||||
forwardActor.start
|
||||
forwardActor.start()
|
||||
forwardActor ! "SendBang"
|
||||
def receive = {
|
||||
case _ => {}
|
||||
|
|
@ -42,11 +49,11 @@ object ForwardActorSpec {
|
|||
}
|
||||
|
||||
class BangBangSenderActor extends Actor {
|
||||
val latch = new CountDownLatch(1)
|
||||
val latch = TestLatch()
|
||||
val forwardActor = actorOf[ForwardActor]
|
||||
forwardActor.start
|
||||
forwardActor.start()
|
||||
(forwardActor !! "SendBangBang") match {
|
||||
case Some(_) => latch.countDown
|
||||
case Some(_) => latch.countDown()
|
||||
case None => {}
|
||||
}
|
||||
def receive = {
|
||||
|
|
@ -55,27 +62,27 @@ object ForwardActorSpec {
|
|||
}
|
||||
}
|
||||
|
||||
class ForwardActorSpec extends JUnitSuite {
|
||||
class ForwardActorSpec extends WordSpec with MustMatchers {
|
||||
import ForwardActorSpec._
|
||||
|
||||
@Test
|
||||
def shouldForwardActorReferenceWhenInvokingForwardOnBang {
|
||||
val senderActor = actorOf[BangSenderActor]
|
||||
val latch = senderActor.actor.asInstanceOf[BangSenderActor]
|
||||
"A Forward Actor" must {
|
||||
"forward actor reference when invoking forward on bang" in {
|
||||
val senderActor = actorOf[BangSenderActor]
|
||||
val latch = senderActor.actor.asInstanceOf[BangSenderActor]
|
||||
.forwardActor.actor.asInstanceOf[ForwardActor]
|
||||
.receiverActor.actor.asInstanceOf[ReceiverActor]
|
||||
.latch
|
||||
senderActor.start
|
||||
assert(latch.await(1L, TimeUnit.SECONDS))
|
||||
assert(ForwardState.sender ne null)
|
||||
assert(senderActor.toString === ForwardState.sender.get.toString)
|
||||
}
|
||||
senderActor.start()
|
||||
latch.await
|
||||
ForwardState.sender must not be (null)
|
||||
senderActor.toString must be (ForwardState.sender.get.toString)
|
||||
}
|
||||
|
||||
@Test
|
||||
def shouldForwardActorReferenceWhenInvokingForwardOnBangBang {
|
||||
val senderActor = actorOf[BangBangSenderActor]
|
||||
senderActor.start
|
||||
val latch = senderActor.actor.asInstanceOf[BangBangSenderActor].latch
|
||||
assert(latch.await(1L, TimeUnit.SECONDS))
|
||||
"forward actor reference when invoking forward on bang bang" in {
|
||||
val senderActor = actorOf[BangBangSenderActor]
|
||||
senderActor.start()
|
||||
val latch = senderActor.actor.asInstanceOf[BangBangSenderActor].latch
|
||||
latch.await
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
@ -1,21 +1,27 @@
|
|||
/**
|
||||
* Copyright (C) 2009-2011 Scalable Solutions AB <http://scalablesolutions.se>
|
||||
*/
|
||||
|
||||
package akka.actor
|
||||
|
||||
import org.scalatest.WordSpec
|
||||
import org.scalatest.matchers.MustMatchers
|
||||
|
||||
import akka.testing._
|
||||
|
||||
import Actor._
|
||||
|
||||
import java.util.concurrent.CyclicBarrier
|
||||
|
||||
class HotSwapSpec extends WordSpec with MustMatchers {
|
||||
|
||||
"An Actor" should {
|
||||
"An Actor" must {
|
||||
|
||||
"be able to hotswap its behavior with HotSwap(..)" in {
|
||||
val barrier = new CyclicBarrier(2)
|
||||
val barrier = TestBarrier(2)
|
||||
@volatile var _log = ""
|
||||
val a = actorOf( new Actor {
|
||||
def receive = { case _ => _log += "default" }
|
||||
}).start
|
||||
}).start()
|
||||
a ! HotSwap( self => {
|
||||
case _ =>
|
||||
_log += "swapped"
|
||||
|
|
@ -27,7 +33,7 @@ class HotSwapSpec extends WordSpec with MustMatchers {
|
|||
}
|
||||
|
||||
"be able to hotswap its behavior with become(..)" in {
|
||||
val barrier = new CyclicBarrier(2)
|
||||
val barrier = TestBarrier(2)
|
||||
@volatile var _log = ""
|
||||
val a = actorOf(new Actor {
|
||||
def receive = {
|
||||
|
|
@ -40,7 +46,7 @@ class HotSwapSpec extends WordSpec with MustMatchers {
|
|||
barrier.await
|
||||
})
|
||||
}
|
||||
}).start
|
||||
}).start()
|
||||
|
||||
a ! "init"
|
||||
barrier.await
|
||||
|
|
@ -55,7 +61,7 @@ class HotSwapSpec extends WordSpec with MustMatchers {
|
|||
}
|
||||
|
||||
"be able to revert hotswap its behavior with RevertHotSwap(..)" in {
|
||||
val barrier = new CyclicBarrier(2)
|
||||
val barrier = TestBarrier(2)
|
||||
@volatile var _log = ""
|
||||
val a = actorOf( new Actor {
|
||||
def receive = {
|
||||
|
|
@ -63,7 +69,7 @@ class HotSwapSpec extends WordSpec with MustMatchers {
|
|||
_log += "init"
|
||||
barrier.await
|
||||
}
|
||||
}).start
|
||||
}).start()
|
||||
|
||||
a ! "init"
|
||||
barrier.await
|
||||
|
|
@ -100,7 +106,7 @@ class HotSwapSpec extends WordSpec with MustMatchers {
|
|||
}
|
||||
|
||||
"be able to revert hotswap its behavior with unbecome" in {
|
||||
val barrier = new CyclicBarrier(2)
|
||||
val barrier = TestBarrier(2)
|
||||
@volatile var _log = ""
|
||||
val a = actorOf(new Actor {
|
||||
def receive = {
|
||||
|
|
@ -113,11 +119,11 @@ class HotSwapSpec extends WordSpec with MustMatchers {
|
|||
_log += "swapped"
|
||||
barrier.await
|
||||
case "revert" =>
|
||||
unbecome
|
||||
unbecome()
|
||||
})
|
||||
barrier.await
|
||||
}
|
||||
}).start
|
||||
}).start()
|
||||
|
||||
a ! "init"
|
||||
barrier.await
|
||||
|
|
@ -0,0 +1,120 @@
|
|||
/**
|
||||
* Copyright (C) 2009-2011 Scalable Solutions AB <http://scalablesolutions.se>
|
||||
*/
|
||||
|
||||
package akka.actor
|
||||
|
||||
import org.scalatest.WordSpec
|
||||
import org.scalatest.matchers.MustMatchers
|
||||
|
||||
import akka.testing._
|
||||
import akka.util.duration._
|
||||
|
||||
import Actor._
|
||||
import java.util.concurrent.atomic.AtomicInteger
|
||||
|
||||
|
||||
class ReceiveTimeoutSpec extends WordSpec with MustMatchers {
|
||||
import Actor._
|
||||
|
||||
"An actor with receive timeout" must {
|
||||
|
||||
"get timeout" in {
|
||||
val timeoutLatch = TestLatch()
|
||||
|
||||
val timeoutActor = actorOf(new Actor {
|
||||
self.receiveTimeout = Some(500L)
|
||||
|
||||
protected def receive = {
|
||||
case ReceiveTimeout => timeoutLatch.open
|
||||
}
|
||||
}).start()
|
||||
|
||||
timeoutLatch.await
|
||||
timeoutActor.stop()
|
||||
}
|
||||
|
||||
"get timeout when swapped" in {
|
||||
val timeoutLatch = TestLatch()
|
||||
|
||||
val timeoutActor = actorOf(new Actor {
|
||||
self.receiveTimeout = Some(500L)
|
||||
|
||||
protected def receive = {
|
||||
case ReceiveTimeout => timeoutLatch.open
|
||||
}
|
||||
}).start()
|
||||
|
||||
timeoutLatch.await
|
||||
|
||||
val swappedLatch = TestLatch()
|
||||
|
||||
timeoutActor ! HotSwap(self => {
|
||||
case ReceiveTimeout => swappedLatch.open
|
||||
})
|
||||
|
||||
swappedLatch.await
|
||||
timeoutActor.stop()
|
||||
}
|
||||
|
||||
"reschedule timeout after regular receive" in {
|
||||
val timeoutLatch = TestLatch()
|
||||
case object Tick
|
||||
|
||||
val timeoutActor = actorOf(new Actor {
|
||||
self.receiveTimeout = Some(500L)
|
||||
|
||||
protected def receive = {
|
||||
case Tick => ()
|
||||
case ReceiveTimeout => timeoutLatch.open
|
||||
}
|
||||
}).start()
|
||||
|
||||
timeoutActor ! Tick
|
||||
|
||||
timeoutLatch.await
|
||||
timeoutActor.stop()
|
||||
}
|
||||
|
||||
"be able to turn off timeout if desired" in {
|
||||
val count = new AtomicInteger(0)
|
||||
val timeoutLatch = TestLatch()
|
||||
case object Tick
|
||||
|
||||
val timeoutActor = actorOf(new Actor {
|
||||
self.receiveTimeout = Some(500L)
|
||||
|
||||
protected def receive = {
|
||||
case Tick => ()
|
||||
case ReceiveTimeout =>
|
||||
count.incrementAndGet
|
||||
timeoutLatch.open
|
||||
self.receiveTimeout = None
|
||||
}
|
||||
}).start()
|
||||
|
||||
timeoutActor ! Tick
|
||||
|
||||
timeoutLatch.await
|
||||
count.get must be (1)
|
||||
timeoutActor.stop()
|
||||
}
|
||||
|
||||
"not receive timeout message when not specified" in {
|
||||
val timeoutLatch = TestLatch()
|
||||
|
||||
val timeoutActor = actorOf(new Actor {
|
||||
protected def receive = {
|
||||
case ReceiveTimeout => timeoutLatch.open
|
||||
}
|
||||
}).start()
|
||||
|
||||
timeoutLatch.awaitTimeout(1 second) // timeout expected
|
||||
timeoutActor.stop()
|
||||
}
|
||||
|
||||
"have ReceiveTimeout eq to Actors ReceiveTimeout" in {
|
||||
akka.actor.Actors.receiveTimeout() must be theSameInstanceAs (ReceiveTimeout)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
@ -25,7 +25,7 @@ class RestartStrategySpec extends JUnitSuite {
|
|||
val boss = actorOf(new Actor{
|
||||
self.faultHandler = OneForOneStrategy(List(classOf[Throwable]), Some(2), Some(1000))
|
||||
protected def receive = { case _ => () }
|
||||
}).start
|
||||
}).start()
|
||||
|
||||
val restartLatch = new StandardLatch
|
||||
val secondRestartLatch = new StandardLatch
|
||||
|
|
@ -36,7 +36,7 @@ class RestartStrategySpec extends JUnitSuite {
|
|||
val slave = actorOf(new Actor{
|
||||
|
||||
protected def receive = {
|
||||
case Ping => countDownLatch.countDown
|
||||
case Ping => countDownLatch.countDown()
|
||||
case Crash => throw new Exception("Crashing...")
|
||||
}
|
||||
override def postRestart(reason: Throwable) = {
|
||||
|
|
@ -80,7 +80,7 @@ class RestartStrategySpec extends JUnitSuite {
|
|||
val boss = actorOf(new Actor{
|
||||
self.faultHandler = OneForOneStrategy(List(classOf[Throwable]), None, None)
|
||||
protected def receive = { case _ => () }
|
||||
}).start
|
||||
}).start()
|
||||
|
||||
val countDownLatch = new CountDownLatch(100)
|
||||
|
||||
|
|
@ -91,7 +91,7 @@ class RestartStrategySpec extends JUnitSuite {
|
|||
}
|
||||
|
||||
override def postRestart(reason: Throwable) = {
|
||||
countDownLatch.countDown
|
||||
countDownLatch.countDown()
|
||||
}
|
||||
})
|
||||
|
||||
|
|
@ -107,7 +107,7 @@ class RestartStrategySpec extends JUnitSuite {
|
|||
val boss = actorOf(new Actor{
|
||||
self.faultHandler = OneForOneStrategy(List(classOf[Throwable]), Some(2), Some(500))
|
||||
protected def receive = { case _ => () }
|
||||
}).start
|
||||
}).start()
|
||||
|
||||
val restartLatch = new StandardLatch
|
||||
val secondRestartLatch = new StandardLatch
|
||||
|
|
@ -168,7 +168,7 @@ class RestartStrategySpec extends JUnitSuite {
|
|||
val boss = actorOf(new Actor{
|
||||
self.faultHandler = OneForOneStrategy(List(classOf[Throwable]), Some(2), None)
|
||||
protected def receive = { case _ => () }
|
||||
}).start
|
||||
}).start()
|
||||
|
||||
val restartLatch = new StandardLatch
|
||||
val secondRestartLatch = new StandardLatch
|
||||
|
|
@ -179,7 +179,7 @@ class RestartStrategySpec extends JUnitSuite {
|
|||
val slave = actorOf(new Actor{
|
||||
|
||||
protected def receive = {
|
||||
case Ping => countDownLatch.countDown
|
||||
case Ping => countDownLatch.countDown()
|
||||
case Crash => throw new Exception("Crashing...")
|
||||
}
|
||||
override def postRestart(reason: Throwable) = {
|
||||
|
|
@ -230,12 +230,12 @@ class RestartStrategySpec extends JUnitSuite {
|
|||
protected def receive = {
|
||||
case m:MaximumNumberOfRestartsWithinTimeRangeReached => maxNoOfRestartsLatch.open
|
||||
}
|
||||
}).start
|
||||
}).start()
|
||||
|
||||
val slave = actorOf(new Actor{
|
||||
|
||||
protected def receive = {
|
||||
case Ping => countDownLatch.countDown
|
||||
case Ping => countDownLatch.countDown()
|
||||
case Crash => throw new Exception("Crashing...")
|
||||
}
|
||||
|
||||
|
|
@ -17,7 +17,7 @@ object SupervisorHierarchySpec {
|
|||
|
||||
class CountDownActor(countDown: CountDownLatch) extends Actor {
|
||||
protected def receive = { case _ => () }
|
||||
override def postRestart(reason: Throwable) = countDown.countDown
|
||||
override def postRestart(reason: Throwable) = countDown.countDown()
|
||||
}
|
||||
|
||||
class CrasherActor extends Actor {
|
||||
|
|
@ -40,7 +40,7 @@ class SupervisorHierarchySpec extends JUnitSuite {
|
|||
self.faultHandler = OneForOneStrategy(List(classOf[Throwable]), 5, 1000)
|
||||
|
||||
protected def receive = { case _ => () }
|
||||
}).start
|
||||
}).start()
|
||||
|
||||
val manager = actorOf(new CountDownActor(countDown))
|
||||
boss.startLink(manager)
|
||||
|
|
@ -65,9 +65,9 @@ class SupervisorHierarchySpec extends JUnitSuite {
|
|||
self.faultHandler = OneForOneStrategy(List(classOf[Throwable]), 1, 5000)
|
||||
protected def receive = {
|
||||
case MaximumNumberOfRestartsWithinTimeRangeReached(_, _, _, _) =>
|
||||
countDown.countDown
|
||||
countDown.countDown()
|
||||
}
|
||||
}).start
|
||||
}).start()
|
||||
boss.startLink(crasher)
|
||||
|
||||
crasher ! Exit(crasher, new FireWorkerException("Fire the worker!"))
|
||||
|
|
@ -17,43 +17,43 @@ class SupervisorMiscSpec extends WordSpec with MustMatchers {
|
|||
|
||||
val actor1 = Actor.actorOf(new Actor {
|
||||
self.dispatcher = Dispatchers.newThreadBasedDispatcher(self)
|
||||
override def postRestart(cause: Throwable) {countDownLatch.countDown}
|
||||
override def postRestart(cause: Throwable) {countDownLatch.countDown()}
|
||||
|
||||
protected def receive = {
|
||||
case "kill" => throw new Exception("killed")
|
||||
case _ => println("received unknown message")
|
||||
}
|
||||
}).start
|
||||
}).start()
|
||||
|
||||
val actor2 = Actor.actorOf(new Actor {
|
||||
self.dispatcher = Dispatchers.newThreadBasedDispatcher(self)
|
||||
override def postRestart(cause: Throwable) {countDownLatch.countDown}
|
||||
override def postRestart(cause: Throwable) {countDownLatch.countDown()}
|
||||
|
||||
protected def receive = {
|
||||
case "kill" => throw new Exception("killed")
|
||||
case _ => println("received unknown message")
|
||||
}
|
||||
}).start
|
||||
}).start()
|
||||
|
||||
val actor3 = Actor.actorOf(new Actor {
|
||||
self.dispatcher = Dispatchers.newExecutorBasedEventDrivenDispatcher("test").build
|
||||
override def postRestart(cause: Throwable) {countDownLatch.countDown}
|
||||
override def postRestart(cause: Throwable) {countDownLatch.countDown()}
|
||||
|
||||
protected def receive = {
|
||||
case "kill" => throw new Exception("killed")
|
||||
case _ => println("received unknown message")
|
||||
}
|
||||
}).start
|
||||
}).start()
|
||||
|
||||
val actor4 = Actor.actorOf(new Actor {
|
||||
self.dispatcher = Dispatchers.newThreadBasedDispatcher(self)
|
||||
override def postRestart(cause: Throwable) {countDownLatch.countDown}
|
||||
override def postRestart(cause: Throwable) {countDownLatch.countDown()}
|
||||
|
||||
protected def receive = {
|
||||
case "kill" => throw new Exception("killed")
|
||||
case _ => println("received unknown message")
|
||||
}
|
||||
}).start
|
||||
}).start()
|
||||
|
||||
val sup = Supervisor(
|
||||
SupervisorConfig(
|
||||
|
|
@ -0,0 +1,387 @@
|
|||
/**
|
||||
* Copyright (C) 2009-2011 Scalable Solutions AB <http://scalablesolutions.se>
|
||||
*/
|
||||
|
||||
package akka.actor
|
||||
|
||||
import org.scalatest.WordSpec
|
||||
import org.scalatest.matchers.MustMatchers
|
||||
import org.scalatest.BeforeAndAfterEach
|
||||
|
||||
import akka.testing._
|
||||
import akka.testing.Testing.{testMillis, sleepFor}
|
||||
import akka.util.duration._
|
||||
import akka.config.Supervision._
|
||||
import akka.{Die, Ping}
|
||||
import Actor._
|
||||
|
||||
import java.util.concurrent.atomic.AtomicInteger
|
||||
import java.util.concurrent.LinkedBlockingQueue
|
||||
|
||||
|
||||
object SupervisorSpec {
|
||||
val Timeout = 5 seconds
|
||||
val TimeoutMillis = testMillis(Timeout).toInt
|
||||
|
||||
// =====================================================
|
||||
// Message logs
|
||||
// =====================================================
|
||||
|
||||
val PingMessage = "ping"
|
||||
val PongMessage = "pong"
|
||||
val ExceptionMessage = "Expected exception; to test fault-tolerance"
|
||||
|
||||
var messageLog = new LinkedBlockingQueue[String]
|
||||
|
||||
def messageLogPoll = messageLog.poll(Timeout.length, Timeout.unit)
|
||||
|
||||
// =====================================================
|
||||
// Actors
|
||||
// =====================================================
|
||||
|
||||
class PingPongActor extends Actor {
|
||||
def receive = {
|
||||
case Ping =>
|
||||
messageLog.put(PingMessage)
|
||||
self.reply_?(PongMessage)
|
||||
case Die =>
|
||||
throw new RuntimeException(ExceptionMessage)
|
||||
}
|
||||
|
||||
override def postRestart(reason: Throwable) {
|
||||
messageLog.put(reason.getMessage)
|
||||
}
|
||||
}
|
||||
|
||||
class TemporaryActor extends PingPongActor {
|
||||
self.lifeCycle = Temporary
|
||||
}
|
||||
|
||||
class Master extends Actor {
|
||||
self.faultHandler = OneForOneStrategy(List(classOf[Exception]), 5, testMillis(1 second).toInt)
|
||||
|
||||
val temp = self.spawnLink[TemporaryActor]
|
||||
|
||||
override def receive = {
|
||||
case Die => temp !! (Die, TimeoutMillis)
|
||||
}
|
||||
}
|
||||
|
||||
// =====================================================
|
||||
// Creating actors and supervisors
|
||||
// =====================================================
|
||||
|
||||
def temporaryActorAllForOne = {
|
||||
val temporaryActor = actorOf[TemporaryActor].start()
|
||||
|
||||
val supervisor = Supervisor(
|
||||
SupervisorConfig(
|
||||
AllForOneStrategy(List(classOf[Exception]), 3, TimeoutMillis),
|
||||
Supervise(
|
||||
temporaryActor,
|
||||
Temporary)
|
||||
:: Nil))
|
||||
|
||||
(temporaryActor, supervisor)
|
||||
}
|
||||
|
||||
def singleActorAllForOne = {
|
||||
val pingpong = actorOf[PingPongActor].start()
|
||||
|
||||
val supervisor = Supervisor(
|
||||
SupervisorConfig(
|
||||
AllForOneStrategy(List(classOf[Exception]), 3, TimeoutMillis),
|
||||
Supervise(
|
||||
pingpong,
|
||||
Permanent)
|
||||
:: Nil))
|
||||
|
||||
(pingpong, supervisor)
|
||||
}
|
||||
|
||||
def singleActorOneForOne = {
|
||||
val pingpong = actorOf[PingPongActor].start()
|
||||
|
||||
val supervisor = Supervisor(
|
||||
SupervisorConfig(
|
||||
OneForOneStrategy(List(classOf[Exception]), 3, TimeoutMillis),
|
||||
Supervise(
|
||||
pingpong,
|
||||
Permanent)
|
||||
:: Nil))
|
||||
|
||||
(pingpong, supervisor)
|
||||
}
|
||||
|
||||
def multipleActorsAllForOne = {
|
||||
val pingpong1 = actorOf[PingPongActor].start()
|
||||
val pingpong2 = actorOf[PingPongActor].start()
|
||||
val pingpong3 = actorOf[PingPongActor].start()
|
||||
|
||||
val supervisor = Supervisor(
|
||||
SupervisorConfig(
|
||||
AllForOneStrategy(List(classOf[Exception]), 3, TimeoutMillis),
|
||||
Supervise(
|
||||
pingpong1,
|
||||
Permanent)
|
||||
::
|
||||
Supervise(
|
||||
pingpong2,
|
||||
Permanent)
|
||||
::
|
||||
Supervise(
|
||||
pingpong3,
|
||||
Permanent)
|
||||
:: Nil))
|
||||
|
||||
(pingpong1, pingpong2, pingpong3, supervisor)
|
||||
}
|
||||
|
||||
def multipleActorsOneForOne = {
|
||||
val pingpong1 = actorOf[PingPongActor].start()
|
||||
val pingpong2 = actorOf[PingPongActor].start()
|
||||
val pingpong3 = actorOf[PingPongActor].start()
|
||||
|
||||
val supervisor = Supervisor(
|
||||
SupervisorConfig(
|
||||
OneForOneStrategy(List(classOf[Exception]), 3, TimeoutMillis),
|
||||
Supervise(
|
||||
pingpong1,
|
||||
Permanent)
|
||||
::
|
||||
Supervise(
|
||||
pingpong2,
|
||||
Permanent)
|
||||
::
|
||||
Supervise(
|
||||
pingpong3,
|
||||
Permanent)
|
||||
:: Nil))
|
||||
|
||||
(pingpong1, pingpong2, pingpong3, supervisor)
|
||||
}
|
||||
|
||||
def nestedSupervisorsAllForOne = {
|
||||
val pingpong1 = actorOf[PingPongActor]
|
||||
val pingpong2 = actorOf[PingPongActor]
|
||||
val pingpong3 = actorOf[PingPongActor]
|
||||
|
||||
val supervisor = Supervisor(
|
||||
SupervisorConfig(
|
||||
AllForOneStrategy(List(classOf[Exception]), 3, TimeoutMillis),
|
||||
Supervise(
|
||||
pingpong1,
|
||||
Permanent)
|
||||
::
|
||||
SupervisorConfig(
|
||||
AllForOneStrategy(Nil, 3, TimeoutMillis),
|
||||
Supervise(
|
||||
pingpong2,
|
||||
Permanent)
|
||||
::
|
||||
Supervise(
|
||||
pingpong3,
|
||||
Permanent)
|
||||
:: Nil)
|
||||
:: Nil))
|
||||
|
||||
(pingpong1, pingpong2, pingpong3, supervisor)
|
||||
}
|
||||
}
|
||||
|
||||
class SupervisorSpec extends WordSpec with MustMatchers with BeforeAndAfterEach {
|
||||
import SupervisorSpec._
|
||||
|
||||
override def beforeEach() = {
|
||||
messageLog.clear
|
||||
}
|
||||
|
||||
def ping(pingPongActor: ActorRef) = {
|
||||
(pingPongActor !! (Ping, TimeoutMillis)).getOrElse("nil") must be (PongMessage)
|
||||
messageLogPoll must be (PingMessage)
|
||||
}
|
||||
|
||||
def kill(pingPongActor: ActorRef) = {
|
||||
intercept[RuntimeException] { pingPongActor !! (Die, TimeoutMillis) }
|
||||
messageLogPoll must be (ExceptionMessage)
|
||||
}
|
||||
|
||||
"A supervisor" must {
|
||||
|
||||
"not restart programmatically linked temporary actor" in {
|
||||
val master = actorOf[Master].start()
|
||||
|
||||
intercept[RuntimeException] {
|
||||
master !! (Die, TimeoutMillis)
|
||||
}
|
||||
|
||||
sleepFor(1 second)
|
||||
messageLog.size must be (0)
|
||||
}
|
||||
|
||||
"not restart temporary actor" in {
|
||||
val (temporaryActor, supervisor) = temporaryActorAllForOne
|
||||
|
||||
intercept[RuntimeException] {
|
||||
temporaryActor !! (Die, TimeoutMillis)
|
||||
}
|
||||
|
||||
sleepFor(1 second)
|
||||
messageLog.size must be (0)
|
||||
}
|
||||
|
||||
"start server for nested supervisor hierarchy" in {
|
||||
val (actor1, actor2, actor3, supervisor) = nestedSupervisorsAllForOne
|
||||
ping(actor1)
|
||||
}
|
||||
|
||||
"kill single actor OneForOne" in {
|
||||
val (actor, supervisor) = singleActorOneForOne
|
||||
kill(actor)
|
||||
}
|
||||
|
||||
"call-kill-call single actor OneForOne" in {
|
||||
val (actor, supervisor) = singleActorOneForOne
|
||||
ping(actor)
|
||||
kill(actor)
|
||||
ping(actor)
|
||||
}
|
||||
|
||||
"kill single actor AllForOne" in {
|
||||
val (actor, supervisor) = singleActorAllForOne
|
||||
kill(actor)
|
||||
}
|
||||
|
||||
"call-kill-call single actor AllForOne" in {
|
||||
val (actor, supervisor) = singleActorAllForOne
|
||||
ping(actor)
|
||||
kill(actor)
|
||||
ping(actor)
|
||||
}
|
||||
|
||||
"kill multiple actors OneForOne 1" in {
|
||||
val (actor1, actor2, actor3, supervisor) = multipleActorsOneForOne
|
||||
kill(actor1)
|
||||
}
|
||||
|
||||
"kill multiple actors OneForOne 2" in {
|
||||
val (actor1, actor2, actor3, supervisor) = multipleActorsOneForOne
|
||||
kill(actor3)
|
||||
}
|
||||
|
||||
"call-kill-call multiple actors OneForOne" in {
|
||||
val (actor1, actor2, actor3, supervisor) = multipleActorsOneForOne
|
||||
|
||||
ping(actor1)
|
||||
ping(actor2)
|
||||
ping(actor3)
|
||||
|
||||
kill(actor2)
|
||||
|
||||
ping(actor1)
|
||||
ping(actor2)
|
||||
ping(actor3)
|
||||
}
|
||||
|
||||
"kill multiple actors AllForOne" in {
|
||||
val (actor1, actor2, actor3, supervisor) = multipleActorsAllForOne
|
||||
|
||||
kill(actor2)
|
||||
|
||||
// and two more exception messages
|
||||
messageLogPoll must be (ExceptionMessage)
|
||||
messageLogPoll must be (ExceptionMessage)
|
||||
}
|
||||
|
||||
"call-kill-call multiple actors AllForOne" in {
|
||||
val (actor1, actor2, actor3, supervisor) = multipleActorsAllForOne
|
||||
|
||||
ping(actor1)
|
||||
ping(actor2)
|
||||
ping(actor3)
|
||||
|
||||
kill(actor2)
|
||||
|
||||
// and two more exception messages
|
||||
messageLogPoll must be (ExceptionMessage)
|
||||
messageLogPoll must be (ExceptionMessage)
|
||||
|
||||
ping(actor1)
|
||||
ping(actor2)
|
||||
ping(actor3)
|
||||
}
|
||||
|
||||
"one-way kill single actor OneForOne" in {
|
||||
val (actor, supervisor) = singleActorOneForOne
|
||||
|
||||
actor ! Die
|
||||
messageLogPoll must be (ExceptionMessage)
|
||||
}
|
||||
|
||||
"one-way call-kill-call single actor OneForOne" in {
|
||||
val (actor, supervisor) = singleActorOneForOne
|
||||
|
||||
actor ! Ping
|
||||
messageLogPoll must be (PingMessage)
|
||||
|
||||
actor ! Die
|
||||
messageLogPoll must be (ExceptionMessage)
|
||||
|
||||
actor ! Ping
|
||||
messageLogPoll must be (PingMessage)
|
||||
}
|
||||
|
||||
"restart killed actors in nested superviser hierarchy" in {
|
||||
val (actor1, actor2, actor3, supervisor) = nestedSupervisorsAllForOne
|
||||
|
||||
ping(actor1)
|
||||
ping(actor2)
|
||||
ping(actor3)
|
||||
|
||||
kill(actor2)
|
||||
|
||||
// and two more exception messages
|
||||
messageLogPoll must be (ExceptionMessage)
|
||||
messageLogPoll must be (ExceptionMessage)
|
||||
|
||||
ping(actor1)
|
||||
ping(actor2)
|
||||
ping(actor3)
|
||||
}
|
||||
|
||||
"must attempt restart when exception during restart" in {
|
||||
val inits = new AtomicInteger(0)
|
||||
|
||||
val dyingActor = actorOf(new Actor {
|
||||
self.lifeCycle = Permanent
|
||||
inits.incrementAndGet
|
||||
|
||||
if (inits.get % 2 == 0) throw new IllegalStateException("Don't wanna!")
|
||||
|
||||
def receive = {
|
||||
case Ping => self.reply_?(PongMessage)
|
||||
case Die => throw new Exception("expected")
|
||||
}
|
||||
})
|
||||
|
||||
val supervisor =
|
||||
Supervisor(
|
||||
SupervisorConfig(
|
||||
OneForOneStrategy(classOf[Exception] :: Nil, 3, 10000),
|
||||
Supervise(dyingActor, Permanent) :: Nil))
|
||||
|
||||
intercept[Exception] {
|
||||
dyingActor !! (Die, TimeoutMillis)
|
||||
}
|
||||
|
||||
// give time for restart
|
||||
sleepFor(3 seconds)
|
||||
|
||||
(dyingActor !! (Ping, TimeoutMillis)).getOrElse("nil") must be (PongMessage)
|
||||
|
||||
inits.get must be (3)
|
||||
|
||||
supervisor.shutdown
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
@ -6,14 +6,16 @@ package akka.actor
|
|||
import org.scalatest.WordSpec
|
||||
import org.scalatest.matchers.MustMatchers
|
||||
|
||||
import akka.util.duration._
|
||||
import akka.testing.Testing.sleepFor
|
||||
import akka.dispatch.Dispatchers
|
||||
import akka.config.Supervision.{SupervisorConfig, OneForOneStrategy, Supervise, Permanent}
|
||||
import Actor._
|
||||
|
||||
class SupervisorTreeSpec extends WordSpec with MustMatchers {
|
||||
|
||||
class SupervisorTreeSpec extends WordSpec with MustMatchers {
|
||||
var log = ""
|
||||
case object Die
|
||||
|
||||
class Chainer(myId: String, a: Option[ActorRef] = None) extends Actor {
|
||||
self.id = myId
|
||||
self.lifeCycle = Permanent
|
||||
|
|
@ -29,17 +31,17 @@ class SupervisorTreeSpec extends WordSpec with MustMatchers {
|
|||
}
|
||||
}
|
||||
|
||||
"In a 3 levels deep supervisor tree (linked in the constructor) we" should {
|
||||
"In a 3 levels deep supervisor tree (linked in the constructor) we" must {
|
||||
|
||||
"be able to kill the middle actor and see itself and its child restarted" in {
|
||||
log = "INIT"
|
||||
|
||||
val lastActor = actorOf(new Chainer("lastActor")).start
|
||||
val middleActor = actorOf(new Chainer("middleActor", Some(lastActor))).start
|
||||
val headActor = actorOf(new Chainer("headActor", Some(middleActor))).start
|
||||
val lastActor = actorOf(new Chainer("lastActor")).start()
|
||||
val middleActor = actorOf(new Chainer("middleActor", Some(lastActor))).start()
|
||||
val headActor = actorOf(new Chainer("headActor", Some(middleActor))).start()
|
||||
|
||||
middleActor ! Die
|
||||
Thread.sleep(100)
|
||||
sleepFor(500 millis)
|
||||
log must equal ("INITmiddleActorlastActor")
|
||||
}
|
||||
}
|
||||
|
|
@ -14,12 +14,12 @@ import org.scalatest.matchers.MustMatchers
|
|||
class Ticket669Spec extends WordSpec with MustMatchers with BeforeAndAfterAll {
|
||||
import Ticket669Spec._
|
||||
|
||||
override def afterAll = Actor.registry.shutdownAll
|
||||
override def afterAll = Actor.registry.shutdownAll()
|
||||
|
||||
"A supervised actor with lifecycle PERMANENT" should {
|
||||
"be able to reply on failure during preRestart" in {
|
||||
val latch = new CountDownLatch(1)
|
||||
val sender = Actor.actorOf(new Sender(latch)).start
|
||||
val sender = Actor.actorOf(new Sender(latch)).start()
|
||||
|
||||
val supervised = Actor.actorOf[Supervised]
|
||||
val supervisor = Supervisor(SupervisorConfig(
|
||||
|
|
@ -33,7 +33,7 @@ class Ticket669Spec extends WordSpec with MustMatchers with BeforeAndAfterAll {
|
|||
|
||||
"be able to reply on failure during postStop" in {
|
||||
val latch = new CountDownLatch(1)
|
||||
val sender = Actor.actorOf(new Sender(latch)).start
|
||||
val sender = Actor.actorOf(new Sender(latch)).start()
|
||||
|
||||
val supervised = Actor.actorOf[Supervised]
|
||||
val supervisor = Supervisor(SupervisorConfig(
|
||||
|
|
@ -50,8 +50,8 @@ class Ticket669Spec extends WordSpec with MustMatchers with BeforeAndAfterAll {
|
|||
object Ticket669Spec {
|
||||
class Sender(latch: CountDownLatch) extends Actor {
|
||||
def receive = {
|
||||
case "failure1" => latch.countDown
|
||||
case "failure2" => latch.countDown
|
||||
case "failure1" => latch.countDown()
|
||||
case "failure2" => latch.countDown()
|
||||
case _ => { }
|
||||
}
|
||||
}
|
||||
|
|
@ -29,7 +29,7 @@ class DataFlowTest extends Spec with ShouldMatchers with BeforeAndAfterAll {
|
|||
thread {
|
||||
z << x() + y()
|
||||
result.set(z())
|
||||
latch.countDown
|
||||
latch.countDown()
|
||||
}
|
||||
thread { x << 40 }
|
||||
thread { y << 2 }
|
||||
|
|
@ -62,7 +62,7 @@ class DataFlowTest extends Spec with ShouldMatchers with BeforeAndAfterAll {
|
|||
|
||||
thread { z << y()
|
||||
result.set(z())
|
||||
latch.countDown
|
||||
latch.countDown()
|
||||
}
|
||||
|
||||
latch.await(10,TimeUnit.SECONDS) should equal (true)
|
||||
|
|
@ -72,7 +72,7 @@ class DataFlowTest extends Spec with ShouldMatchers with BeforeAndAfterAll {
|
|||
/*
|
||||
it("should be able to join streams") {
|
||||
import DataFlow._
|
||||
Actor.registry.shutdownAll
|
||||
Actor.registry.shutdownAll()
|
||||
|
||||
def ints(n: Int, max: Int, stream: DataFlowStream[Int]): Unit = if (n != max) {
|
||||
stream <<< n
|
||||
|
|
@ -93,7 +93,7 @@ class DataFlowTest extends Spec with ShouldMatchers with BeforeAndAfterAll {
|
|||
val t2 = thread {
|
||||
Thread.sleep(1000)
|
||||
result.set(producer.map(x => x * x).foldLeft(0)(_ + _))
|
||||
latch.countDown
|
||||
latch.countDown()
|
||||
}
|
||||
|
||||
latch.await(3,TimeUnit.SECONDS) should equal (true)
|
||||
|
|
@ -123,7 +123,7 @@ class DataFlowTest extends Spec with ShouldMatchers with BeforeAndAfterAll {
|
|||
val x = stream()
|
||||
|
||||
if(result.addAndGet(x) == 166666500)
|
||||
latch.countDown
|
||||
latch.countDown()
|
||||
|
||||
recurseSum(stream)
|
||||
}
|
||||
|
|
@ -139,7 +139,7 @@ class DataFlowTest extends Spec with ShouldMatchers with BeforeAndAfterAll {
|
|||
/* it("should be able to conditionally set variables") {
|
||||
|
||||
import DataFlow._
|
||||
Actor.registry.shutdownAll
|
||||
Actor.registry.shutdownAll()
|
||||
|
||||
val latch = new CountDownLatch(1)
|
||||
val x, y, z, v = new DataFlowVariable[Int]
|
||||
|
|
@ -147,7 +147,7 @@ class DataFlowTest extends Spec with ShouldMatchers with BeforeAndAfterAll {
|
|||
val main = thread {
|
||||
x << 1
|
||||
z << Math.max(x(),y())
|
||||
latch.countDown
|
||||
latch.countDown()
|
||||
}
|
||||
|
||||
val setY = thread {
|
||||
|
|
@ -6,6 +6,7 @@ package akka.actor.dispatch
|
|||
import org.scalatest.junit.JUnitSuite
|
||||
import org.junit.Test
|
||||
import org.scalatest.Assertions._
|
||||
import akka.testing._
|
||||
import akka.dispatch._
|
||||
import akka.actor.{ActorRef, Actor}
|
||||
import akka.actor.Actor._
|
||||
|
|
@ -53,13 +54,13 @@ object ActorModelSpec {
|
|||
case Await(latch) => ack; latch.await(); busy.switchOff()
|
||||
case Meet(sign, wait) => ack; sign.countDown(); wait.await(); busy.switchOff()
|
||||
case Wait(time) => ack; Thread.sleep(time); busy.switchOff()
|
||||
case WaitAck(time, l) => ack; Thread.sleep(time); l.countDown; busy.switchOff()
|
||||
case WaitAck(time, l) => ack; Thread.sleep(time); l.countDown(); busy.switchOff()
|
||||
case Reply(msg) => ack; self.reply(msg); busy.switchOff()
|
||||
case Reply_?(msg) => ack; self.reply_?(msg); busy.switchOff()
|
||||
case Forward(to,msg) => ack; to.forward(msg); busy.switchOff()
|
||||
case CountDown(latch) => ack; latch.countDown(); busy.switchOff()
|
||||
case Increment(count) => ack; count.incrementAndGet(); busy.switchOff()
|
||||
case CountDownNStop(l)=> ack; l.countDown; self.stop; busy.switchOff()
|
||||
case CountDownNStop(l)=> ack; l.countDown(); self.stop(); busy.switchOff()
|
||||
case Restart => ack; busy.switchOff(); throw new Exception("Restart requested")
|
||||
}
|
||||
}
|
||||
|
|
@ -201,9 +202,9 @@ abstract class ActorModelSpec extends JUnitSuite {
|
|||
implicit val dispatcher = newInterceptedDispatcher
|
||||
val a = newTestActor
|
||||
assertDispatcher(dispatcher)(starts = 0, stops = 0)
|
||||
a.start
|
||||
a.start()
|
||||
assertDispatcher(dispatcher)(starts = 1, stops = 0)
|
||||
a.stop
|
||||
a.stop()
|
||||
await(dispatcher.stops.get == 1)(withinMs = dispatcher.timeoutMs * 5)
|
||||
assertDispatcher(dispatcher)(starts = 1, stops = 1)
|
||||
assertRef(a,dispatcher)(
|
||||
|
|
@ -221,19 +222,19 @@ abstract class ActorModelSpec extends JUnitSuite {
|
|||
implicit val dispatcher = newInterceptedDispatcher
|
||||
val a = newTestActor
|
||||
val start,oneAtATime = new CountDownLatch(1)
|
||||
a.start
|
||||
a.start()
|
||||
|
||||
a ! CountDown(start)
|
||||
assertCountDown(start,3000, "Should process first message within 3 seconds")
|
||||
assertCountDown(start, Testing.testTime(3000), "Should process first message within 3 seconds")
|
||||
assertRefDefaultZero(a)(registers = 1, msgsReceived = 1, msgsProcessed = 1)
|
||||
|
||||
a ! Wait(1000)
|
||||
a ! CountDown(oneAtATime)
|
||||
// in case of serialization violation, restart would happen instead of count down
|
||||
assertCountDown(oneAtATime,1500,"Processed message when allowed")
|
||||
assertCountDown(oneAtATime, Testing.testTime(1500) ,"Processed message when allowed")
|
||||
assertRefDefaultZero(a)(registers = 1, msgsReceived = 3, msgsProcessed = 3)
|
||||
|
||||
a.stop
|
||||
a.stop()
|
||||
assertRefDefaultZero(a)(registers = 1, unregisters = 1, msgsReceived = 3, msgsProcessed = 3)
|
||||
}
|
||||
|
||||
|
|
@ -241,55 +242,55 @@ abstract class ActorModelSpec extends JUnitSuite {
|
|||
implicit val dispatcher = newInterceptedDispatcher
|
||||
val a = newTestActor
|
||||
val counter = new CountDownLatch(200)
|
||||
a.start
|
||||
a.start()
|
||||
|
||||
def start = spawn { for (i <- 1 to 20) { a ! WaitAck(1, counter) } }
|
||||
for (i <- 1 to 10) { start }
|
||||
assertCountDown(counter, 3000, "Should process 200 messages")
|
||||
assertCountDown(counter, Testing.testTime(3000), "Should process 200 messages")
|
||||
assertRefDefaultZero(a)(registers = 1, msgsReceived = 200, msgsProcessed = 200)
|
||||
|
||||
a.stop
|
||||
a.stop()
|
||||
}
|
||||
|
||||
def spawn(f : => Unit) = {
|
||||
val thread = new Thread { override def run { f } }
|
||||
thread.start
|
||||
thread.start()
|
||||
thread
|
||||
}
|
||||
|
||||
@Test def dispatcherShouldProcessMessagesInParallel: Unit = {
|
||||
implicit val dispatcher = newInterceptedDispatcher
|
||||
val a, b = newTestActor.start
|
||||
val a, b = newTestActor.start()
|
||||
val aStart,aStop,bParallel = new CountDownLatch(1)
|
||||
|
||||
a ! Meet(aStart,aStop)
|
||||
assertCountDown(aStart,3000, "Should process first message within 3 seconds")
|
||||
assertCountDown(aStart, Testing.testTime(3000), "Should process first message within 3 seconds")
|
||||
|
||||
b ! CountDown(bParallel)
|
||||
assertCountDown(bParallel, 3000, "Should process other actors in parallel")
|
||||
assertCountDown(bParallel, Testing.testTime(3000), "Should process other actors in parallel")
|
||||
|
||||
aStop.countDown()
|
||||
a.stop
|
||||
b.stop
|
||||
a.stop()
|
||||
b.stop()
|
||||
assertRefDefaultZero(a)(registers = 1, unregisters = 1, msgsReceived = 1, msgsProcessed = 1)
|
||||
assertRefDefaultZero(b)(registers = 1, unregisters = 1, msgsReceived = 1, msgsProcessed = 1)
|
||||
}
|
||||
|
||||
@Test def dispatcherShouldSuspendAndResumeAFailingNonSupervisedPermanentActor {
|
||||
implicit val dispatcher = newInterceptedDispatcher
|
||||
val a = newTestActor.start
|
||||
val a = newTestActor.start()
|
||||
val done = new CountDownLatch(1)
|
||||
a ! Restart
|
||||
a ! CountDown(done)
|
||||
assertCountDown(done, 3000, "Should be suspended+resumed and done with next message within 3 seconds")
|
||||
a.stop
|
||||
assertCountDown(done, Testing.testTime(3000), "Should be suspended+resumed and done with next message within 3 seconds")
|
||||
a.stop()
|
||||
assertRefDefaultZero(a)(registers = 1,unregisters = 1, msgsReceived = 2,
|
||||
msgsProcessed = 2, suspensions = 1, resumes = 1)
|
||||
}
|
||||
|
||||
@Test def dispatcherShouldNotProcessMessagesForASuspendedActor {
|
||||
implicit val dispatcher = newInterceptedDispatcher
|
||||
val a = newTestActor.start
|
||||
val a = newTestActor.start()
|
||||
val done = new CountDownLatch(1)
|
||||
dispatcher.suspend(a)
|
||||
a ! CountDown(done)
|
||||
|
|
@ -297,11 +298,11 @@ abstract class ActorModelSpec extends JUnitSuite {
|
|||
assertRefDefaultZero(a)(registers = 1, msgsReceived = 1, suspensions = 1)
|
||||
|
||||
dispatcher.resume(a)
|
||||
assertCountDown(done, 3000, "Should resume processing of messages when resumed")
|
||||
assertCountDown(done, Testing.testTime(3000), "Should resume processing of messages when resumed")
|
||||
assertRefDefaultZero(a)(registers = 1, msgsReceived = 1, msgsProcessed = 1,
|
||||
suspensions = 1, resumes = 1)
|
||||
|
||||
a.stop
|
||||
a.stop()
|
||||
assertRefDefaultZero(a)(registers = 1,unregisters = 1, msgsReceived = 1, msgsProcessed = 1,
|
||||
suspensions = 1, resumes = 1)
|
||||
}
|
||||
|
|
@ -312,9 +313,9 @@ abstract class ActorModelSpec extends JUnitSuite {
|
|||
def flood(num: Int) {
|
||||
val cachedMessage = CountDownNStop(new CountDownLatch(num))
|
||||
(1 to num) foreach {
|
||||
_ => newTestActor.start ! cachedMessage
|
||||
_ => newTestActor.start() ! cachedMessage
|
||||
}
|
||||
assertCountDown(cachedMessage.latch,10000, "Should process " + num + " countdowns")
|
||||
assertCountDown(cachedMessage.latch, Testing.testTime(10000), "Should process " + num + " countdowns")
|
||||
}
|
||||
for(run <- 1 to 3) {
|
||||
flood(10000)
|
||||
|
|
@ -25,7 +25,7 @@ object ExecutorBasedEventDrivenDispatcherActorSpec {
|
|||
class OneWayTestActor extends Actor {
|
||||
self.dispatcher = Dispatchers.newExecutorBasedEventDrivenDispatcher(self.uuid.toString).build
|
||||
def receive = {
|
||||
case "OneWay" => OneWayTestActor.oneWay.countDown
|
||||
case "OneWay" => OneWayTestActor.oneWay.countDown()
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
@ -35,28 +35,28 @@ class ExecutorBasedEventDrivenDispatcherActorSpec extends JUnitSuite {
|
|||
private val unit = TimeUnit.MILLISECONDS
|
||||
|
||||
@Test def shouldSendOneWay = {
|
||||
val actor = actorOf[OneWayTestActor].start
|
||||
val actor = actorOf[OneWayTestActor].start()
|
||||
val result = actor ! "OneWay"
|
||||
assert(OneWayTestActor.oneWay.await(1, TimeUnit.SECONDS))
|
||||
actor.stop
|
||||
actor.stop()
|
||||
}
|
||||
|
||||
@Test def shouldSendReplySync = {
|
||||
val actor = actorOf[TestActor].start
|
||||
val actor = actorOf[TestActor].start()
|
||||
val result = (actor !! ("Hello", 10000)).as[String]
|
||||
assert("World" === result.get)
|
||||
actor.stop
|
||||
actor.stop()
|
||||
}
|
||||
|
||||
@Test def shouldSendReplyAsync = {
|
||||
val actor = actorOf[TestActor].start
|
||||
val actor = actorOf[TestActor].start()
|
||||
val result = actor !! "Hello"
|
||||
assert("World" === result.get.asInstanceOf[String])
|
||||
actor.stop
|
||||
actor.stop()
|
||||
}
|
||||
|
||||
@Test def shouldSendReceiveException = {
|
||||
val actor = actorOf[TestActor].start
|
||||
val actor = actorOf[TestActor].start()
|
||||
try {
|
||||
actor !! "Failure"
|
||||
fail("Should have thrown an exception")
|
||||
|
|
@ -64,7 +64,7 @@ class ExecutorBasedEventDrivenDispatcherActorSpec extends JUnitSuite {
|
|||
case e =>
|
||||
assert("Expected exception; to test fault-tolerance" === e.getMessage())
|
||||
}
|
||||
actor.stop
|
||||
actor.stop()
|
||||
}
|
||||
|
||||
@Test def shouldRespectThroughput {
|
||||
|
|
@ -80,24 +80,24 @@ class ExecutorBasedEventDrivenDispatcherActorSpec extends JUnitSuite {
|
|||
new Actor {
|
||||
self.dispatcher = throughputDispatcher
|
||||
def receive = { case "sabotage" => works.set(false) }
|
||||
}).start
|
||||
}).start()
|
||||
|
||||
val slowOne = actorOf(
|
||||
new Actor {
|
||||
self.dispatcher = throughputDispatcher
|
||||
def receive = {
|
||||
case "hogexecutor" => start.await
|
||||
case "ping" => if (works.get) latch.countDown
|
||||
case "ping" => if (works.get) latch.countDown()
|
||||
}
|
||||
}).start
|
||||
}).start()
|
||||
|
||||
slowOne ! "hogexecutor"
|
||||
(1 to 100) foreach { _ => slowOne ! "ping"}
|
||||
fastOne ! "sabotage"
|
||||
start.countDown
|
||||
start.countDown()
|
||||
val result = latch.await(3,TimeUnit.SECONDS)
|
||||
fastOne.stop
|
||||
slowOne.stop
|
||||
fastOne.stop()
|
||||
slowOne.stop()
|
||||
assert(result === true)
|
||||
}
|
||||
|
||||
|
|
@ -115,24 +115,24 @@ class ExecutorBasedEventDrivenDispatcherActorSpec extends JUnitSuite {
|
|||
val fastOne = actorOf(
|
||||
new Actor {
|
||||
self.dispatcher = throughputDispatcher
|
||||
def receive = { case "ping" => if(works.get) latch.countDown; self.stop }
|
||||
}).start
|
||||
def receive = { case "ping" => if(works.get) latch.countDown(); self.stop() }
|
||||
}).start()
|
||||
|
||||
val slowOne = actorOf(
|
||||
new Actor {
|
||||
self.dispatcher = throughputDispatcher
|
||||
def receive = {
|
||||
case "hogexecutor" => ready.countDown; start.await
|
||||
case "ping" => works.set(false); self.stop
|
||||
case "hogexecutor" => ready.countDown(); start.await
|
||||
case "ping" => works.set(false); self.stop()
|
||||
}
|
||||
}).start
|
||||
}).start()
|
||||
|
||||
slowOne ! "hogexecutor"
|
||||
slowOne ! "ping"
|
||||
fastOne ! "ping"
|
||||
assert(ready.await(2,TimeUnit.SECONDS) === true)
|
||||
Thread.sleep(deadlineMs+10) // wait just a bit more than the deadline
|
||||
start.countDown
|
||||
start.countDown()
|
||||
assert(latch.await(2,TimeUnit.SECONDS) === true)
|
||||
}
|
||||
}
|
||||
|
|
@ -19,7 +19,7 @@ class ExecutorBasedEventDrivenDispatcherActorsSpec extends JUnitSuite with MustM
|
|||
def receive = {
|
||||
case x: Int => {
|
||||
Thread.sleep(50) // slow actor
|
||||
finishedCounter.countDown
|
||||
finishedCounter.countDown()
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
@ -29,7 +29,7 @@ class ExecutorBasedEventDrivenDispatcherActorsSpec extends JUnitSuite with MustM
|
|||
|
||||
def receive = {
|
||||
case x: Int => {
|
||||
finishedCounter.countDown
|
||||
finishedCounter.countDown()
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
@ -37,8 +37,8 @@ class ExecutorBasedEventDrivenDispatcherActorsSpec extends JUnitSuite with MustM
|
|||
@Test def slowActorShouldntBlockFastActor {
|
||||
val sFinished = new CountDownLatch(50)
|
||||
val fFinished = new CountDownLatch(10)
|
||||
val s = actorOf(new SlowActor(sFinished)).start
|
||||
val f = actorOf(new FastActor(fFinished)).start
|
||||
val s = actorOf(new SlowActor(sFinished)).start()
|
||||
val f = actorOf(new FastActor(fFinished)).start()
|
||||
|
||||
// send a lot of stuff to s
|
||||
for (i <- 1 to 50) {
|
||||
|
|
@ -55,7 +55,7 @@ class ExecutorBasedEventDrivenDispatcherActorsSpec extends JUnitSuite with MustM
|
|||
assert(sFinished.getCount > 0)
|
||||
sFinished.await
|
||||
assert(sFinished.getCount === 0)
|
||||
f.stop
|
||||
s.stop
|
||||
f.stop()
|
||||
s.stop()
|
||||
}
|
||||
}
|
||||
|
|
@ -25,7 +25,7 @@ object ExecutorBasedEventDrivenWorkStealingDispatcherSpec {
|
|||
case x: Int => {
|
||||
Thread.sleep(delay)
|
||||
invocationCount += 1
|
||||
finishedCounter.countDown
|
||||
finishedCounter.countDown()
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
@ -58,8 +58,8 @@ class ExecutorBasedEventDrivenWorkStealingDispatcherSpec extends JUnitSuite with
|
|||
@Test def fastActorShouldStealWorkFromSlowActor {
|
||||
val finishedCounter = new CountDownLatch(110)
|
||||
|
||||
val slow = actorOf(new DelayableActor("slow", 50, finishedCounter)).start
|
||||
val fast = actorOf(new DelayableActor("fast", 10, finishedCounter)).start
|
||||
val slow = actorOf(new DelayableActor("slow", 50, finishedCounter)).start()
|
||||
val fast = actorOf(new DelayableActor("fast", 10, finishedCounter)).start()
|
||||
|
||||
var sentToFast = 0
|
||||
|
||||
|
|
@ -90,17 +90,17 @@ class ExecutorBasedEventDrivenWorkStealingDispatcherSpec extends JUnitSuite with
|
|||
fast.actor.asInstanceOf[DelayableActor].invocationCount must be > sentToFast
|
||||
fast.actor.asInstanceOf[DelayableActor].invocationCount must be >
|
||||
(slow.actor.asInstanceOf[DelayableActor].invocationCount)
|
||||
slow.stop
|
||||
fast.stop
|
||||
slow.stop()
|
||||
fast.stop()
|
||||
}
|
||||
|
||||
@Test def canNotUseActorsOfDifferentTypesInSameDispatcher(): Unit = {
|
||||
val first = actorOf[FirstActor]
|
||||
val second = actorOf[SecondActor]
|
||||
|
||||
first.start
|
||||
first.start()
|
||||
intercept[IllegalActorStateException] {
|
||||
second.start
|
||||
second.start()
|
||||
}
|
||||
}
|
||||
|
||||
|
|
@ -108,9 +108,9 @@ class ExecutorBasedEventDrivenWorkStealingDispatcherSpec extends JUnitSuite with
|
|||
val parent = actorOf[ParentActor]
|
||||
val child = actorOf[ChildActor]
|
||||
|
||||
parent.start
|
||||
parent.start()
|
||||
intercept[IllegalActorStateException] {
|
||||
child.start
|
||||
child.start()
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
@ -38,46 +38,46 @@ class FutureSpec extends JUnitSuite {
|
|||
|
||||
@Test def shouldActorReplyResultThroughExplicitFuture {
|
||||
val actor = actorOf[TestActor]
|
||||
actor.start
|
||||
actor.start()
|
||||
val future = actor !!! "Hello"
|
||||
future.await
|
||||
assert(future.result.isDefined)
|
||||
assert("World" === future.result.get)
|
||||
actor.stop
|
||||
actor.stop()
|
||||
}
|
||||
|
||||
@Test def shouldActorReplyExceptionThroughExplicitFuture {
|
||||
val actor = actorOf[TestActor]
|
||||
actor.start
|
||||
actor.start()
|
||||
val future = actor !!! "Failure"
|
||||
future.await
|
||||
assert(future.exception.isDefined)
|
||||
assert("Expected exception; to test fault-tolerance" === future.exception.get.getMessage)
|
||||
actor.stop
|
||||
actor.stop()
|
||||
}
|
||||
|
||||
@Test def shouldFutureCompose {
|
||||
val actor1 = actorOf[TestActor].start
|
||||
val actor2 = actorOf(new Actor { def receive = { case s: String => self reply s.toUpperCase } } ).start
|
||||
val actor1 = actorOf[TestActor].start()
|
||||
val actor2 = actorOf(new Actor { def receive = { case s: String => self reply s.toUpperCase } } ).start()
|
||||
val future1 = actor1 !!! "Hello" flatMap ((s: String) => actor2 !!! s)
|
||||
val future2 = actor1 !!! "Hello" flatMap (actor2 !!! (_: String))
|
||||
val future3 = actor1 !!! "Hello" flatMap (actor2 !!! (_: Int))
|
||||
assert(Some(Right("WORLD")) === future1.await.value)
|
||||
assert(Some(Right("WORLD")) === future2.await.value)
|
||||
intercept[ClassCastException] { future3.await.resultOrException }
|
||||
actor1.stop
|
||||
actor2.stop
|
||||
actor1.stop()
|
||||
actor2.stop()
|
||||
}
|
||||
|
||||
@Test def shouldFutureComposePatternMatch {
|
||||
val actor1 = actorOf[TestActor].start
|
||||
val actor2 = actorOf(new Actor { def receive = { case s: String => self reply s.toUpperCase } } ).start
|
||||
val actor1 = actorOf[TestActor].start()
|
||||
val actor2 = actorOf(new Actor { def receive = { case s: String => self reply s.toUpperCase } } ).start()
|
||||
val future1 = actor1 !!! "Hello" collect { case (s: String) => s } flatMap (actor2 !!! _)
|
||||
val future2 = actor1 !!! "Hello" collect { case (n: Int) => n } flatMap (actor2 !!! _)
|
||||
assert(Some(Right("WORLD")) === future1.await.value)
|
||||
intercept[MatchError] { future2.await.resultOrException }
|
||||
actor1.stop
|
||||
actor2.stop
|
||||
actor1.stop()
|
||||
actor2.stop()
|
||||
}
|
||||
|
||||
@Test def shouldFutureForComprehension {
|
||||
|
|
@ -86,23 +86,25 @@ class FutureSpec extends JUnitSuite {
|
|||
case s: String => self reply s.length
|
||||
case i: Int => self reply (i * 2).toString
|
||||
}
|
||||
}).start
|
||||
}).start()
|
||||
|
||||
val future0 = actor !!! "Hello"
|
||||
|
||||
val future1 = for {
|
||||
a: Int <- actor !!! "Hello" // returns 5
|
||||
a: Int <- future0 // returns 5
|
||||
b: String <- actor !!! a // returns "10"
|
||||
c: String <- actor !!! 7 // returns "14"
|
||||
} yield b + "-" + c
|
||||
|
||||
val future2 = for {
|
||||
a: Int <- actor !!! "Hello"
|
||||
a: Int <- future0
|
||||
b: Int <- actor !!! a
|
||||
c: String <- actor !!! 7
|
||||
} yield b + "-" + c
|
||||
|
||||
assert(Some(Right("10-14")) === future1.await.value)
|
||||
intercept[ClassCastException] { future2.await.resultOrException }
|
||||
actor.stop
|
||||
actor.stop()
|
||||
}
|
||||
|
||||
@Test def shouldFutureForComprehensionPatternMatch {
|
||||
|
|
@ -113,7 +115,7 @@ class FutureSpec extends JUnitSuite {
|
|||
case Req(s: String) => self reply Res(s.length)
|
||||
case Req(i: Int) => self reply Res((i * 2).toString)
|
||||
}
|
||||
}).start
|
||||
}).start()
|
||||
|
||||
val future1 = for {
|
||||
a <- actor !!! Req("Hello") collect { case Res(x: Int) => x }
|
||||
|
|
@ -129,61 +131,60 @@ class FutureSpec extends JUnitSuite {
|
|||
|
||||
assert(Some(Right("10-14")) === future1.await.value)
|
||||
intercept[MatchError] { future2.await.resultOrException }
|
||||
actor.stop
|
||||
actor.stop()
|
||||
}
|
||||
|
||||
// FIXME: implement Futures.awaitEither, and uncomment these two tests
|
||||
@Test def shouldFutureAwaitEitherLeft = {
|
||||
val actor1 = actorOf[TestActor].start
|
||||
val actor2 = actorOf[TestActor].start
|
||||
val actor1 = actorOf[TestActor].start()
|
||||
val actor2 = actorOf[TestActor].start()
|
||||
val future1 = actor1 !!! "Hello"
|
||||
val future2 = actor2 !!! "NoReply"
|
||||
val result = Futures.awaitEither(future1, future2)
|
||||
assert(result.isDefined)
|
||||
assert("World" === result.get)
|
||||
actor1.stop
|
||||
actor2.stop
|
||||
actor1.stop()
|
||||
actor2.stop()
|
||||
}
|
||||
|
||||
@Test def shouldFutureAwaitEitherRight = {
|
||||
val actor1 = actorOf[TestActor].start
|
||||
val actor2 = actorOf[TestActor].start
|
||||
val actor1 = actorOf[TestActor].start()
|
||||
val actor2 = actorOf[TestActor].start()
|
||||
val future1 = actor1 !!! "NoReply"
|
||||
val future2 = actor2 !!! "Hello"
|
||||
val result = Futures.awaitEither(future1, future2)
|
||||
assert(result.isDefined)
|
||||
assert("World" === result.get)
|
||||
actor1.stop
|
||||
actor2.stop
|
||||
actor1.stop()
|
||||
actor2.stop()
|
||||
}
|
||||
|
||||
@Test def shouldFutureAwaitOneLeft = {
|
||||
val actor1 = actorOf[TestActor].start
|
||||
val actor2 = actorOf[TestActor].start
|
||||
val actor1 = actorOf[TestActor].start()
|
||||
val actor2 = actorOf[TestActor].start()
|
||||
val future1 = actor1 !!! "NoReply"
|
||||
val future2 = actor2 !!! "Hello"
|
||||
val result = Futures.awaitOne(List(future1, future2))
|
||||
assert(result.result.isDefined)
|
||||
assert("World" === result.result.get)
|
||||
actor1.stop
|
||||
actor2.stop
|
||||
actor1.stop()
|
||||
actor2.stop()
|
||||
}
|
||||
|
||||
@Test def shouldFutureAwaitOneRight = {
|
||||
val actor1 = actorOf[TestActor].start
|
||||
val actor2 = actorOf[TestActor].start
|
||||
val actor1 = actorOf[TestActor].start()
|
||||
val actor2 = actorOf[TestActor].start()
|
||||
val future1 = actor1 !!! "Hello"
|
||||
val future2 = actor2 !!! "NoReply"
|
||||
val result = Futures.awaitOne(List(future1, future2))
|
||||
assert(result.result.isDefined)
|
||||
assert("World" === result.result.get)
|
||||
actor1.stop
|
||||
actor2.stop
|
||||
actor1.stop()
|
||||
actor2.stop()
|
||||
}
|
||||
|
||||
@Test def shouldFutureAwaitAll = {
|
||||
val actor1 = actorOf[TestActor].start
|
||||
val actor2 = actorOf[TestActor].start
|
||||
val actor1 = actorOf[TestActor].start()
|
||||
val actor2 = actorOf[TestActor].start()
|
||||
val future1 = actor1 !!! "Hello"
|
||||
val future2 = actor2 !!! "Hello"
|
||||
Futures.awaitAll(List(future1, future2))
|
||||
|
|
@ -191,8 +192,8 @@ class FutureSpec extends JUnitSuite {
|
|||
assert("World" === future1.result.get)
|
||||
assert(future2.result.isDefined)
|
||||
assert("World" === future2.result.get)
|
||||
actor1.stop
|
||||
actor2.stop
|
||||
actor1.stop()
|
||||
actor2.stop()
|
||||
}
|
||||
|
||||
@Test def shouldFuturesAwaitMapHandleEmptySequence {
|
||||
|
|
@ -201,7 +202,7 @@ class FutureSpec extends JUnitSuite {
|
|||
|
||||
@Test def shouldFuturesAwaitMapHandleNonEmptySequence {
|
||||
val latches = (1 to 3) map (_ => new StandardLatch)
|
||||
val actors = latches map (latch => actorOf(new TestDelayActor(latch)).start)
|
||||
val actors = latches map (latch => actorOf(new TestDelayActor(latch)).start())
|
||||
val futures = actors map (actor => (actor.!!))
|
||||
latches foreach { _.open }
|
||||
|
||||
|
|
@ -212,7 +213,7 @@ class FutureSpec extends JUnitSuite {
|
|||
val actors = (1 to 10).toList map { _ =>
|
||||
actorOf(new Actor {
|
||||
def receive = { case (add: Int, wait: Int) => Thread.sleep(wait); self reply_? add }
|
||||
}).start
|
||||
}).start()
|
||||
}
|
||||
def futures = actors.zipWithIndex map { case (actor: ActorRef, idx: Int) => actor.!!) }
|
||||
assert(Futures.fold(0)(futures)(_ + _).awaitBlocking.result.get === 45)
|
||||
|
|
@ -222,7 +223,7 @@ class FutureSpec extends JUnitSuite {
|
|||
val actors = (1 to 10).toList map { _ =>
|
||||
actorOf(new Actor {
|
||||
def receive = { case (add: Int, wait: Int) => Thread.sleep(wait); self reply_? add }
|
||||
}).start
|
||||
}).start()
|
||||
}
|
||||
def futures = actors.zipWithIndex map { case (actor: ActorRef, idx: Int) => actor.!!) }
|
||||
assert(futures.foldLeft(Future(0))((fr, fa) => for (r <- fr; a <- fa) yield (r + a)).awaitBlocking.result.get === 45)
|
||||
|
|
@ -237,7 +238,7 @@ class FutureSpec extends JUnitSuite {
|
|||
if (add == 6) throw new IllegalArgumentException("shouldFoldResultsWithException: expected")
|
||||
self reply_? add
|
||||
}
|
||||
}).start
|
||||
}).start()
|
||||
}
|
||||
def futures = actors.zipWithIndex map { case (actor: ActorRef, idx: Int) => actor.!!) }
|
||||
assert(Futures.fold(0)(futures)(_ + _).awaitBlocking.exception.get.getMessage === "shouldFoldResultsWithException: expected")
|
||||
|
|
@ -251,7 +252,7 @@ class FutureSpec extends JUnitSuite {
|
|||
val actors = (1 to 10).toList map { _ =>
|
||||
actorOf(new Actor {
|
||||
def receive = { case (add: Int, wait: Int) => Thread.sleep(wait); self reply_? add }
|
||||
}).start
|
||||
}).start()
|
||||
}
|
||||
def futures = actors.zipWithIndex map { case (actor: ActorRef, idx: Int) => actor.!!) }
|
||||
assert(Futures.reduce(futures)(_ + _).awaitBlocking.result.get === 45)
|
||||
|
|
@ -266,7 +267,7 @@ class FutureSpec extends JUnitSuite {
|
|||
if (add == 6) throw new IllegalArgumentException("shouldFoldResultsWithException: expected")
|
||||
self reply_? add
|
||||
}
|
||||
}).start
|
||||
}).start()
|
||||
}
|
||||
def futures = actors.zipWithIndex map { case (actor: ActorRef, idx: Int) => actor.!!) }
|
||||
assert(Futures.reduce(futures)(_ + _).awaitBlocking.exception.get.getMessage === "shouldFoldResultsWithException: expected")
|
||||
|
|
@ -282,7 +283,7 @@ class FutureSpec extends JUnitSuite {
|
|||
val actors = (1 to 10).toList map { _ =>
|
||||
actorOf(new Actor {
|
||||
def receive = { case (add: Int, wait: Boolean, latch: StandardLatch) => if (wait) latch.await; self reply_? add }
|
||||
}).start
|
||||
}).start()
|
||||
}
|
||||
|
||||
def futures = actors.zipWithIndex map { case (actor: ActorRef, idx: Int) => actor.!!) }
|
||||
|
|
@ -298,10 +299,28 @@ class FutureSpec extends JUnitSuite {
|
|||
|
||||
@Test def receiveShouldExecuteOnComplete {
|
||||
val latch = new StandardLatch
|
||||
val actor = actorOf[TestActor].start
|
||||
val actor = actorOf[TestActor].start()
|
||||
actor !!! "Hello" receive { case "World" => latch.open }
|
||||
assert(latch.tryAwait(5, TimeUnit.SECONDS))
|
||||
actor.stop
|
||||
actor.stop()
|
||||
}
|
||||
|
||||
@Test def shouldTraverseFutures {
|
||||
val oddActor = actorOf(new Actor {
|
||||
var counter = 1
|
||||
def receive = {
|
||||
case 'GetNext =>
|
||||
self reply counter
|
||||
counter += 2
|
||||
}
|
||||
}).start()
|
||||
|
||||
val oddFutures: List[Future[Int]] = List.fill(100)(oddActor !!! 'GetNext)
|
||||
assert(Futures.sequence(oddFutures).get.sum === 10000)
|
||||
oddActor.stop()
|
||||
|
||||
val list = (1 to 100).toList
|
||||
assert(Futures.traverse(list)(x => Future(x * 2 - 1)).get.sum === 10000)
|
||||
}
|
||||
|
||||
@Test def shouldHandleThrowables {
|
||||
|
|
@ -95,7 +95,7 @@ abstract class MailboxSpec extends
|
|||
case e: Throwable => result.completeWithException(e)
|
||||
}
|
||||
})
|
||||
t.start
|
||||
t.start()
|
||||
result
|
||||
}
|
||||
|
||||
|
|
@ -173,11 +173,7 @@ class DefaultMailboxSpec extends MailboxSpec {
|
|||
}
|
||||
|
||||
class PriorityMailboxSpec extends MailboxSpec {
|
||||
val comparator = new java.util.Comparator[MessageInvocation] {
|
||||
def compare(a: MessageInvocation, b: MessageInvocation): Int = {
|
||||
a.## - b.##
|
||||
}
|
||||
}
|
||||
val comparator = PriorityGenerator(_.##)
|
||||
lazy val name = "The priority mailbox implementation"
|
||||
def factory = {
|
||||
case UnboundedMailbox(blockDequeue) =>
|
||||
|
|
@ -0,0 +1,51 @@
|
|||
package akka.dispatch
|
||||
|
||||
import akka.actor.Actor._
|
||||
import akka.actor.Actor
|
||||
import org.scalatest.WordSpec
|
||||
import org.scalatest.matchers.MustMatchers
|
||||
import java.util.concurrent.CountDownLatch
|
||||
|
||||
class PriorityDispatcherSpec extends WordSpec with MustMatchers {
|
||||
|
||||
"A PriorityExecutorBasedEventDrivenDispatcher" must {
|
||||
"Order it's messages according to the specified comparator using an unbounded mailbox" in {
|
||||
testOrdering(UnboundedMailbox(false))
|
||||
}
|
||||
|
||||
"Order it's messages according to the specified comparator using a bounded mailbox" in {
|
||||
testOrdering(BoundedMailbox(false,1000))
|
||||
}
|
||||
}
|
||||
|
||||
def testOrdering(mboxType: MailboxType) {
|
||||
val dispatcher = new PriorityExecutorBasedEventDrivenDispatcher("Test",
|
||||
PriorityGenerator({
|
||||
case i: Int => i //Reverse order
|
||||
case 'Result => Int.MaxValue
|
||||
}: Any => Int),
|
||||
throughput = 1,
|
||||
mailboxType = mboxType
|
||||
)
|
||||
|
||||
val actor = actorOf(new Actor {
|
||||
self.dispatcher = dispatcher
|
||||
var acc: List[Int] = Nil
|
||||
|
||||
def receive = {
|
||||
case i: Int => acc = i :: acc
|
||||
case 'Result => self reply_? acc
|
||||
}
|
||||
}).start()
|
||||
|
||||
dispatcher.suspend(actor) //Make sure the actor isn't treating any messages, let it buffer the incoming messages
|
||||
|
||||
val msgs = (1 to 100).toList
|
||||
for(m <- msgs) actor ! m
|
||||
|
||||
dispatcher.resume(actor) //Signal the actor to start treating it's message backlog
|
||||
|
||||
actor.!!![List[Int]]('Result).await.result.get must be === (msgs.reverse)
|
||||
}
|
||||
|
||||
}
|
||||
|
|
@ -31,30 +31,30 @@ class ThreadBasedActorSpec extends JUnitSuite {
|
|||
val actor = actorOf(new Actor {
|
||||
self.dispatcher = Dispatchers.newThreadBasedDispatcher(self)
|
||||
def receive = {
|
||||
case "OneWay" => oneWay.countDown
|
||||
case "OneWay" => oneWay.countDown()
|
||||
}
|
||||
}).start
|
||||
}).start()
|
||||
val result = actor ! "OneWay"
|
||||
assert(oneWay.await(1, TimeUnit.SECONDS))
|
||||
actor.stop
|
||||
actor.stop()
|
||||
}
|
||||
|
||||
@Test def shouldSendReplySync = {
|
||||
val actor = actorOf[TestActor].start
|
||||
val actor = actorOf[TestActor].start()
|
||||
val result = (actor !! ("Hello", 10000)).as[String]
|
||||
assert("World" === result.get)
|
||||
actor.stop
|
||||
actor.stop()
|
||||
}
|
||||
|
||||
@Test def shouldSendReplyAsync = {
|
||||
val actor = actorOf[TestActor].start
|
||||
val actor = actorOf[TestActor].start()
|
||||
val result = actor !! "Hello"
|
||||
assert("World" === result.get.asInstanceOf[String])
|
||||
actor.stop
|
||||
actor.stop()
|
||||
}
|
||||
|
||||
@Test def shouldSendReceiveException = {
|
||||
val actor = actorOf[TestActor].start
|
||||
val actor = actorOf[TestActor].start()
|
||||
try {
|
||||
actor !! "Failure"
|
||||
fail("Should have thrown an exception")
|
||||
|
|
@ -62,6 +62,6 @@ class ThreadBasedActorSpec extends JUnitSuite {
|
|||
case e =>
|
||||
assert("Expected exception; to test fault-tolerance" === e.getMessage())
|
||||
}
|
||||
actor.stop
|
||||
actor.stop()
|
||||
}
|
||||
}
|
||||
|
|
@ -33,115 +33,115 @@ class ActorRegistrySpec extends JUnitSuite {
|
|||
import ActorRegistrySpec._
|
||||
|
||||
@Test def shouldGetActorByIdFromActorRegistry {
|
||||
Actor.registry.shutdownAll
|
||||
Actor.registry.shutdownAll()
|
||||
val actor = actorOf[TestActor]
|
||||
actor.start
|
||||
actor.start()
|
||||
val actors = Actor.registry.actorsFor("MyID")
|
||||
assert(actors.size === 1)
|
||||
assert(actors.head.actor.isInstanceOf[TestActor])
|
||||
assert(actors.head.id === "MyID")
|
||||
actor.stop
|
||||
actor.stop()
|
||||
}
|
||||
|
||||
@Test def shouldGetActorByUUIDFromActorRegistry {
|
||||
Actor.registry.shutdownAll
|
||||
Actor.registry.shutdownAll()
|
||||
val actor = actorOf[TestActor]
|
||||
val uuid = actor.uuid
|
||||
actor.start
|
||||
actor.start()
|
||||
val actorOrNone = Actor.registry.actorFor(uuid)
|
||||
assert(actorOrNone.isDefined)
|
||||
assert(actorOrNone.get.uuid === uuid)
|
||||
actor.stop
|
||||
actor.stop()
|
||||
}
|
||||
|
||||
@Test def shouldGetActorByClassFromActorRegistry {
|
||||
Actor.registry.shutdownAll
|
||||
Actor.registry.shutdownAll()
|
||||
val actor = actorOf[TestActor]
|
||||
actor.start
|
||||
actor.start()
|
||||
val actors = Actor.registry.actorsFor(classOf[TestActor])
|
||||
assert(actors.size === 1)
|
||||
assert(actors.head.actor.isInstanceOf[TestActor])
|
||||
assert(actors.head.id === "MyID")
|
||||
actor.stop
|
||||
actor.stop()
|
||||
}
|
||||
|
||||
@Test def shouldGetActorByManifestFromActorRegistry {
|
||||
Actor.registry.shutdownAll
|
||||
Actor.registry.shutdownAll()
|
||||
val actor = actorOf[TestActor]
|
||||
actor.start
|
||||
actor.start()
|
||||
val actors = Actor.registry.actorsFor[TestActor]
|
||||
assert(actors.size === 1)
|
||||
assert(actors.head.actor.isInstanceOf[TestActor])
|
||||
assert(actors.head.id === "MyID")
|
||||
actor.stop
|
||||
actor.stop()
|
||||
}
|
||||
|
||||
@Test def shouldFindThingsFromActorRegistry {
|
||||
Actor.registry.shutdownAll
|
||||
Actor.registry.shutdownAll()
|
||||
val actor = actorOf[TestActor]
|
||||
actor.start
|
||||
actor.start()
|
||||
val found = Actor.registry.find({ case a: ActorRef if a.actor.isInstanceOf[TestActor] => a })
|
||||
assert(found.isDefined)
|
||||
assert(found.get.actor.isInstanceOf[TestActor])
|
||||
assert(found.get.id === "MyID")
|
||||
actor.stop
|
||||
actor.stop()
|
||||
}
|
||||
|
||||
@Test def shouldGetActorsByIdFromActorRegistry {
|
||||
Actor.registry.shutdownAll
|
||||
Actor.registry.shutdownAll()
|
||||
val actor1 = actorOf[TestActor]
|
||||
actor1.start
|
||||
actor1.start()
|
||||
val actor2 = actorOf[TestActor]
|
||||
actor2.start
|
||||
actor2.start()
|
||||
val actors = Actor.registry.actorsFor("MyID")
|
||||
assert(actors.size === 2)
|
||||
assert(actors.head.actor.isInstanceOf[TestActor])
|
||||
assert(actors.head.id === "MyID")
|
||||
assert(actors.last.actor.isInstanceOf[TestActor])
|
||||
assert(actors.last.id === "MyID")
|
||||
actor1.stop
|
||||
actor2.stop
|
||||
actor1.stop()
|
||||
actor2.stop()
|
||||
}
|
||||
|
||||
@Test def shouldGetActorsByClassFromActorRegistry {
|
||||
Actor.registry.shutdownAll
|
||||
Actor.registry.shutdownAll()
|
||||
val actor1 = actorOf[TestActor]
|
||||
actor1.start
|
||||
actor1.start()
|
||||
val actor2 = actorOf[TestActor]
|
||||
actor2.start
|
||||
actor2.start()
|
||||
val actors = Actor.registry.actorsFor(classOf[TestActor])
|
||||
assert(actors.size === 2)
|
||||
assert(actors.head.actor.isInstanceOf[TestActor])
|
||||
assert(actors.head.id === "MyID")
|
||||
assert(actors.last.actor.isInstanceOf[TestActor])
|
||||
assert(actors.last.id === "MyID")
|
||||
actor1.stop
|
||||
actor2.stop
|
||||
actor1.stop()
|
||||
actor2.stop()
|
||||
}
|
||||
|
||||
@Test def shouldGetActorsByManifestFromActorRegistry {
|
||||
Actor.registry.shutdownAll
|
||||
Actor.registry.shutdownAll()
|
||||
val actor1 = actorOf[TestActor]
|
||||
actor1.start
|
||||
actor1.start()
|
||||
val actor2 = actorOf[TestActor]
|
||||
actor2.start
|
||||
actor2.start()
|
||||
val actors = Actor.registry.actorsFor[TestActor]
|
||||
assert(actors.size === 2)
|
||||
assert(actors.head.actor.isInstanceOf[TestActor])
|
||||
assert(actors.head.id === "MyID")
|
||||
assert(actors.last.actor.isInstanceOf[TestActor])
|
||||
assert(actors.last.id === "MyID")
|
||||
actor1.stop
|
||||
actor2.stop
|
||||
actor1.stop()
|
||||
actor2.stop()
|
||||
}
|
||||
|
||||
@Test def shouldGetActorsByMessageFromActorRegistry {
|
||||
|
||||
Actor.registry.shutdownAll
|
||||
Actor.registry.shutdownAll()
|
||||
val actor1 = actorOf[TestActor]
|
||||
actor1.start
|
||||
actor1.start()
|
||||
val actor2 = actorOf[TestActor2]
|
||||
actor2.start
|
||||
actor2.start()
|
||||
|
||||
val actorsForAcotrTestActor = Actor.registry.actorsFor[TestActor]
|
||||
assert(actorsForAcotrTestActor.size === 1)
|
||||
|
|
@ -159,55 +159,55 @@ class ActorRegistrySpec extends JUnitSuite {
|
|||
val actorsForMessagePing = Actor.registry.actorsFor[Actor]("ping")
|
||||
assert(actorsForMessagePing.size === 2)
|
||||
|
||||
actor1.stop
|
||||
actor2.stop
|
||||
actor1.stop()
|
||||
actor2.stop()
|
||||
}
|
||||
|
||||
@Test def shouldGetAllActorsFromActorRegistry {
|
||||
Actor.registry.shutdownAll
|
||||
Actor.registry.shutdownAll()
|
||||
val actor1 = actorOf[TestActor]
|
||||
actor1.start
|
||||
actor1.start()
|
||||
val actor2 = actorOf[TestActor]
|
||||
actor2.start
|
||||
actor2.start()
|
||||
val actors = Actor.registry.actors
|
||||
assert(actors.size === 2)
|
||||
assert(actors.head.actor.isInstanceOf[TestActor])
|
||||
assert(actors.head.id === "MyID")
|
||||
assert(actors.last.actor.isInstanceOf[TestActor])
|
||||
assert(actors.last.id === "MyID")
|
||||
actor1.stop
|
||||
actor2.stop
|
||||
actor1.stop()
|
||||
actor2.stop()
|
||||
}
|
||||
|
||||
@Test def shouldGetResponseByAllActorsInActorRegistryWhenInvokingForeach {
|
||||
Actor.registry.shutdownAll
|
||||
Actor.registry.shutdownAll()
|
||||
val actor1 = actorOf[TestActor]
|
||||
actor1.start
|
||||
actor1.start()
|
||||
val actor2 = actorOf[TestActor]
|
||||
actor2.start
|
||||
actor2.start()
|
||||
record = ""
|
||||
Actor.registry.foreach(actor => actor !! "ping")
|
||||
assert(record === "pongpong")
|
||||
actor1.stop
|
||||
actor2.stop
|
||||
actor1.stop()
|
||||
actor2.stop()
|
||||
}
|
||||
|
||||
@Test def shouldShutdownAllActorsInActorRegistry {
|
||||
Actor.registry.shutdownAll
|
||||
Actor.registry.shutdownAll()
|
||||
val actor1 = actorOf[TestActor]
|
||||
actor1.start
|
||||
actor1.start()
|
||||
val actor2 = actorOf[TestActor]
|
||||
actor2.start
|
||||
Actor.registry.shutdownAll
|
||||
actor2.start()
|
||||
Actor.registry.shutdownAll()
|
||||
assert(Actor.registry.actors.size === 0)
|
||||
}
|
||||
|
||||
@Test def shouldRemoveUnregisterActorInActorRegistry {
|
||||
Actor.registry.shutdownAll
|
||||
Actor.registry.shutdownAll()
|
||||
val actor1 = actorOf[TestActor]
|
||||
actor1.start
|
||||
actor1.start()
|
||||
val actor2 = actorOf[TestActor]
|
||||
actor2.start
|
||||
actor2.start()
|
||||
assert(Actor.registry.actors.size === 2)
|
||||
Actor.registry.unregister(actor1)
|
||||
assert(Actor.registry.actors.size === 1)
|
||||
|
|
@ -216,7 +216,7 @@ class ActorRegistrySpec extends JUnitSuite {
|
|||
}
|
||||
|
||||
@Test def shouldBeAbleToRegisterActorsConcurrently {
|
||||
Actor.registry.shutdownAll
|
||||
Actor.registry.shutdownAll()
|
||||
|
||||
def mkTestActors = for(i <- (1 to 10).toList;j <- 1 to 3000) yield actorOf( new Actor {
|
||||
self.id = i.toString
|
||||
|
|
@ -227,11 +227,11 @@ class ActorRegistrySpec extends JUnitSuite {
|
|||
val barrier = new CyclicBarrier(3)
|
||||
|
||||
def mkThread(actors: Iterable[ActorRef]) = new Thread {
|
||||
this.start
|
||||
this.start()
|
||||
override def run {
|
||||
barrier.await
|
||||
actors foreach { _.start }
|
||||
latch.countDown
|
||||
actors foreach { _.start() }
|
||||
latch.countDown()
|
||||
}
|
||||
}
|
||||
val a1,a2,a3 = mkTestActors
|
||||
|
|
@ -12,7 +12,7 @@ class SchedulerSpec extends JUnitSuite {
|
|||
def withCleanEndState(action: => Unit) {
|
||||
action
|
||||
Scheduler.restart
|
||||
Actor.registry.shutdownAll
|
||||
Actor.registry.shutdownAll()
|
||||
}
|
||||
|
||||
|
||||
|
|
@ -21,8 +21,8 @@ class SchedulerSpec extends JUnitSuite {
|
|||
case object Tick
|
||||
val countDownLatch = new CountDownLatch(3)
|
||||
val tickActor = actorOf(new Actor {
|
||||
def receive = { case Tick => countDownLatch.countDown }
|
||||
}).start
|
||||
def receive = { case Tick => countDownLatch.countDown() }
|
||||
}).start()
|
||||
// run every 50 millisec
|
||||
Scheduler.schedule(tickActor, Tick, 0, 50, TimeUnit.MILLISECONDS)
|
||||
|
||||
|
|
@ -31,7 +31,7 @@ class SchedulerSpec extends JUnitSuite {
|
|||
|
||||
val countDownLatch2 = new CountDownLatch(3)
|
||||
|
||||
Scheduler.schedule( () => countDownLatch2.countDown, 0, 50, TimeUnit.MILLISECONDS)
|
||||
Scheduler.schedule( () => countDownLatch2.countDown(), 0, 50, TimeUnit.MILLISECONDS)
|
||||
|
||||
// after max 1 second it should be executed at least the 3 times already
|
||||
assert(countDownLatch2.await(1, TimeUnit.SECONDS))
|
||||
|
|
@ -41,11 +41,11 @@ class SchedulerSpec extends JUnitSuite {
|
|||
case object Tick
|
||||
val countDownLatch = new CountDownLatch(3)
|
||||
val tickActor = actorOf(new Actor {
|
||||
def receive = { case Tick => countDownLatch.countDown }
|
||||
}).start
|
||||
def receive = { case Tick => countDownLatch.countDown() }
|
||||
}).start()
|
||||
// run every 50 millisec
|
||||
Scheduler.scheduleOnce(tickActor, Tick, 50, TimeUnit.MILLISECONDS)
|
||||
Scheduler.scheduleOnce( () => countDownLatch.countDown, 50, TimeUnit.MILLISECONDS)
|
||||
Scheduler.scheduleOnce( () => countDownLatch.countDown(), 50, TimeUnit.MILLISECONDS)
|
||||
|
||||
// after 1 second the wait should fail
|
||||
assert(countDownLatch.await(1, TimeUnit.SECONDS) == false)
|
||||
|
|
@ -60,8 +60,8 @@ class SchedulerSpec extends JUnitSuite {
|
|||
object Ping
|
||||
val ticks = new CountDownLatch(1000)
|
||||
val actor = actorOf(new Actor {
|
||||
def receive = { case Ping => ticks.countDown }
|
||||
}).start
|
||||
def receive = { case Ping => ticks.countDown() }
|
||||
}).start()
|
||||
val numActors = Actor.registry.actors.length
|
||||
(1 to 1000).foreach( _ => Scheduler.scheduleOnce(actor,Ping,1,TimeUnit.MILLISECONDS) )
|
||||
assert(ticks.await(10,TimeUnit.SECONDS))
|
||||
|
|
@ -76,8 +76,8 @@ class SchedulerSpec extends JUnitSuite {
|
|||
val ticks = new CountDownLatch(1)
|
||||
|
||||
val actor = actorOf(new Actor {
|
||||
def receive = { case Ping => ticks.countDown }
|
||||
}).start
|
||||
def receive = { case Ping => ticks.countDown() }
|
||||
}).start()
|
||||
|
||||
(1 to 10).foreach { i =>
|
||||
val future = Scheduler.scheduleOnce(actor,Ping,1,TimeUnit.SECONDS)
|
||||
|
|
@ -101,7 +101,7 @@ class SchedulerSpec extends JUnitSuite {
|
|||
self.lifeCycle = Permanent
|
||||
|
||||
def receive = {
|
||||
case Ping => pingLatch.countDown
|
||||
case Ping => pingLatch.countDown()
|
||||
case Crash => throw new Exception("CRASH")
|
||||
}
|
||||
|
||||
|
|
@ -25,18 +25,18 @@ class RoutingSpec extends junit.framework.TestCase with Suite with MustMatchers
|
|||
case `testMsg1` => self.reply(3)
|
||||
case `testMsg2` => self.reply(7)
|
||||
}
|
||||
} ).start
|
||||
} ).start()
|
||||
|
||||
val t2 = actorOf( new Actor() {
|
||||
def receive = {
|
||||
case `testMsg3` => self.reply(11)
|
||||
}
|
||||
}).start
|
||||
}).start()
|
||||
|
||||
val d = dispatcherActor {
|
||||
case `testMsg1`|`testMsg2` => t1
|
||||
case `testMsg3` => t2
|
||||
}.start
|
||||
}.start()
|
||||
|
||||
val result = for {
|
||||
a <- (d !! (testMsg1, 5000)).as[Int]
|
||||
|
|
@ -47,14 +47,14 @@ class RoutingSpec extends junit.framework.TestCase with Suite with MustMatchers
|
|||
result.isDefined must be (true)
|
||||
result.get must be(21)
|
||||
|
||||
for(a <- List(t1,t2,d)) a.stop
|
||||
for(a <- List(t1,t2,d)) a.stop()
|
||||
}
|
||||
|
||||
@Test def testLogger = {
|
||||
val msgs = new java.util.concurrent.ConcurrentSkipListSet[Any]
|
||||
val latch = new CountDownLatch(2)
|
||||
val t1 = actorOf(new Actor { def receive = { case _ => } }).start
|
||||
val l = loggerActor(t1,(x) => { msgs.add(x); latch.countDown }).start
|
||||
val t1 = actorOf(new Actor { def receive = { case _ => } }).start()
|
||||
val l = loggerActor(t1,(x) => { msgs.add(x); latch.countDown() }).start()
|
||||
val foo : Any = "foo"
|
||||
val bar : Any = "bar"
|
||||
l ! foo
|
||||
|
|
@ -62,8 +62,8 @@ class RoutingSpec extends junit.framework.TestCase with Suite with MustMatchers
|
|||
val done = latch.await(5,TimeUnit.SECONDS)
|
||||
done must be (true)
|
||||
msgs must ( have size (2) and contain (foo) and contain (bar) )
|
||||
t1.stop
|
||||
l.stop
|
||||
t1.stop()
|
||||
l.stop()
|
||||
}
|
||||
|
||||
@Test def testSmallestMailboxFirstDispatcher = {
|
||||
|
|
@ -74,23 +74,23 @@ class RoutingSpec extends junit.framework.TestCase with Suite with MustMatchers
|
|||
case x =>
|
||||
Thread.sleep(50) // slow actor
|
||||
t1ProcessedCount.incrementAndGet
|
||||
latch.countDown
|
||||
latch.countDown()
|
||||
}
|
||||
}).start
|
||||
}).start()
|
||||
|
||||
val t2ProcessedCount = new AtomicInteger(0)
|
||||
val t2 = actorOf(new Actor {
|
||||
def receive = {
|
||||
case x => t2ProcessedCount.incrementAndGet
|
||||
latch.countDown
|
||||
latch.countDown()
|
||||
}
|
||||
}).start
|
||||
}).start()
|
||||
val d = loadBalancerActor(new SmallestMailboxFirstIterator(t1 :: t2 :: Nil))
|
||||
for (i <- 1 to 500) d ! i
|
||||
val done = latch.await(10,TimeUnit.SECONDS)
|
||||
done must be (true)
|
||||
t1ProcessedCount.get must be < (t2ProcessedCount.get) // because t1 is much slower and thus has a bigger mailbox all the time
|
||||
for(a <- List(t1,t2,d)) a.stop
|
||||
for(a <- List(t1,t2,d)) a.stop()
|
||||
}
|
||||
|
||||
@Test def testListener = {
|
||||
|
|
@ -102,16 +102,16 @@ class RoutingSpec extends junit.framework.TestCase with Suite with MustMatchers
|
|||
case "foo" => gossip("bar")
|
||||
}
|
||||
})
|
||||
i.start
|
||||
i.start()
|
||||
|
||||
def newListener = actorOf(new Actor {
|
||||
def receive = {
|
||||
case "bar" =>
|
||||
num.incrementAndGet
|
||||
latch.countDown
|
||||
case "foo" => foreachListener.countDown
|
||||
latch.countDown()
|
||||
case "foo" => foreachListener.countDown()
|
||||
}
|
||||
}).start
|
||||
}).start()
|
||||
|
||||
val a1 = newListener
|
||||
val a2 = newListener
|
||||
|
|
@ -129,7 +129,7 @@ class RoutingSpec extends junit.framework.TestCase with Suite with MustMatchers
|
|||
num.get must be (2)
|
||||
val withListeners = foreachListener.await(5,TimeUnit.SECONDS)
|
||||
withListeners must be (true)
|
||||
for(a <- List(i,a1,a2,a3)) a.stop
|
||||
for(a <- List(i,a1,a2,a3)) a.stop()
|
||||
}
|
||||
|
||||
@Test def testIsDefinedAt = {
|
||||
|
|
@ -142,28 +142,28 @@ class RoutingSpec extends junit.framework.TestCase with Suite with MustMatchers
|
|||
case `testMsg1` => self.reply(3)
|
||||
case `testMsg2` => self.reply(7)
|
||||
}
|
||||
} ).start
|
||||
} ).start()
|
||||
|
||||
val t2 = actorOf( new Actor() {
|
||||
def receive = {
|
||||
case `testMsg1` => self.reply(3)
|
||||
case `testMsg2` => self.reply(7)
|
||||
}
|
||||
} ).start
|
||||
} ).start()
|
||||
|
||||
val t3 = actorOf( new Actor() {
|
||||
def receive = {
|
||||
case `testMsg1` => self.reply(3)
|
||||
case `testMsg2` => self.reply(7)
|
||||
}
|
||||
} ).start
|
||||
} ).start()
|
||||
|
||||
val t4 = actorOf( new Actor() {
|
||||
def receive = {
|
||||
case `testMsg1` => self.reply(3)
|
||||
case `testMsg2` => self.reply(7)
|
||||
}
|
||||
} ).start
|
||||
} ).start()
|
||||
|
||||
val d1 = loadBalancerActor(new SmallestMailboxFirstIterator(t1 :: t2 :: Nil))
|
||||
val d2 = loadBalancerActor(new CyclicIterator[ActorRef](t3 :: t4 :: Nil))
|
||||
|
|
@ -177,7 +177,7 @@ class RoutingSpec extends junit.framework.TestCase with Suite with MustMatchers
|
|||
d2.isDefinedAt(testMsg1) must be (true)
|
||||
d2.isDefinedAt(testMsg3) must be (false)
|
||||
|
||||
for(a <- List(t1,t2,d1,d2)) a.stop
|
||||
for(a <- List(t1,t2,d1,d2)) a.stop()
|
||||
}
|
||||
|
||||
// Actor Pool Capacity Tests
|
||||
|
|
@ -196,7 +196,7 @@ class RoutingSpec extends junit.framework.TestCase with Suite with MustMatchers
|
|||
def receive = {
|
||||
case _ =>
|
||||
counter.incrementAndGet
|
||||
latch.countDown
|
||||
latch.countDown()
|
||||
self reply_? "success"
|
||||
}
|
||||
})
|
||||
|
|
@ -211,11 +211,11 @@ class RoutingSpec extends junit.framework.TestCase with Suite with MustMatchers
|
|||
val successes = new CountDownLatch(2)
|
||||
implicit val successCounterActor = Some(actorOf(new Actor {
|
||||
def receive = {
|
||||
case "success" => successes.countDown
|
||||
case "success" => successes.countDown()
|
||||
}
|
||||
}).start)
|
||||
}).start())
|
||||
|
||||
val pool = actorOf(new TestPool).start
|
||||
val pool = actorOf(new TestPool).start()
|
||||
pool ! "a"
|
||||
pool ! "b"
|
||||
|
||||
|
|
@ -253,14 +253,14 @@ class RoutingSpec extends junit.framework.TestCase with Suite with MustMatchers
|
|||
}
|
||||
}
|
||||
})
|
||||
}).start
|
||||
}).start()
|
||||
|
||||
try {
|
||||
(for(count <- 1 to 500) yield actorPool.!!) foreach {
|
||||
_.await.resultOrException.get must be ("Response")
|
||||
}
|
||||
} finally {
|
||||
actorPool.stop
|
||||
actorPool.stop()
|
||||
}
|
||||
}
|
||||
|
||||
|
|
@ -283,7 +283,7 @@ class RoutingSpec extends junit.framework.TestCase with Suite with MustMatchers
|
|||
case n:Int =>
|
||||
Thread.sleep(n)
|
||||
counter.incrementAndGet
|
||||
latch.countDown
|
||||
latch.countDown()
|
||||
}
|
||||
})
|
||||
|
||||
|
|
@ -299,7 +299,7 @@ class RoutingSpec extends junit.framework.TestCase with Suite with MustMatchers
|
|||
//
|
||||
// first message should create the minimum number of delgates
|
||||
//
|
||||
val pool = actorOf(new TestPool).start
|
||||
val pool = actorOf(new TestPool).start()
|
||||
pool ! 1
|
||||
(pool !! ActorPool.Stat).asInstanceOf[Option[ActorPool.Stats]].get.size must be (2)
|
||||
|
||||
|
|
@ -356,7 +356,7 @@ class RoutingSpec extends junit.framework.TestCase with Suite with MustMatchers
|
|||
case n:Int =>
|
||||
Thread.sleep(n)
|
||||
counter.incrementAndGet
|
||||
latch.countDown
|
||||
latch.countDown()
|
||||
}
|
||||
})
|
||||
|
||||
|
|
@ -370,7 +370,7 @@ class RoutingSpec extends junit.framework.TestCase with Suite with MustMatchers
|
|||
def receive = _route
|
||||
}
|
||||
|
||||
val pool = actorOf(new TestPool).start
|
||||
val pool = actorOf(new TestPool).start()
|
||||
|
||||
var loops = 0
|
||||
def loop(t:Int) = {
|
||||
|
|
@ -421,7 +421,7 @@ class RoutingSpec extends junit.framework.TestCase with Suite with MustMatchers
|
|||
def receive = {
|
||||
case _ =>
|
||||
delegates put(self.uuid.toString, "")
|
||||
latch.countDown
|
||||
latch.countDown()
|
||||
}
|
||||
})
|
||||
|
||||
|
|
@ -433,7 +433,7 @@ class RoutingSpec extends junit.framework.TestCase with Suite with MustMatchers
|
|||
def receive = _route
|
||||
}
|
||||
|
||||
val pool1 = actorOf(new TestPool1).start
|
||||
val pool1 = actorOf(new TestPool1).start()
|
||||
pool1 ! "a"
|
||||
pool1 ! "b"
|
||||
var done = latch.await(1,TimeUnit.SECONDS)
|
||||
|
|
@ -450,7 +450,7 @@ class RoutingSpec extends junit.framework.TestCase with Suite with MustMatchers
|
|||
def receive = {
|
||||
case _ =>
|
||||
delegates put(self.uuid.toString, "")
|
||||
latch.countDown
|
||||
latch.countDown()
|
||||
}
|
||||
})
|
||||
|
||||
|
|
@ -465,10 +465,10 @@ class RoutingSpec extends junit.framework.TestCase with Suite with MustMatchers
|
|||
latch = new CountDownLatch(2)
|
||||
delegates clear
|
||||
|
||||
val pool2 = actorOf(new TestPool2).start
|
||||
val pool2 = actorOf(new TestPool2).start()
|
||||
pool2 ! "a"
|
||||
pool2 ! "b"
|
||||
done = latch.await(1,TimeUnit.SECONDS)
|
||||
done = latch.await(1, TimeUnit.SECONDS)
|
||||
done must be (true)
|
||||
delegates.size must be (2)
|
||||
pool2 stop
|
||||
|
|
@ -494,7 +494,7 @@ class RoutingSpec extends junit.framework.TestCase with Suite with MustMatchers
|
|||
def receive = {
|
||||
case n:Int =>
|
||||
Thread.sleep(n)
|
||||
latch.countDown
|
||||
latch.countDown()
|
||||
}
|
||||
})
|
||||
|
||||
|
|
@ -514,7 +514,7 @@ class RoutingSpec extends junit.framework.TestCase with Suite with MustMatchers
|
|||
//
|
||||
// put some pressure on the pool
|
||||
//
|
||||
val pool = actorOf(new TestPool).start
|
||||
val pool = actorOf(new TestPool).start()
|
||||
for (m <- 0 to 10) pool ! 250
|
||||
Thread.sleep(5)
|
||||
val z = (pool !! ActorPool.Stat).asInstanceOf[Option[ActorPool.Stats]].get.size
|
||||
|
|
@ -21,11 +21,11 @@ class CallingThreadDispatcherModelSpec extends ActorModelSpec {
|
|||
|
||||
def flood(num: Int) {
|
||||
val cachedMessage = CountDownNStop(new CountDownLatch(num))
|
||||
val keeper = newTestActor.start
|
||||
val keeper = newTestActor.start()
|
||||
(1 to num) foreach {
|
||||
_ => newTestActor.start ! cachedMessage
|
||||
_ => newTestActor.start() ! cachedMessage
|
||||
}
|
||||
keeper.stop
|
||||
keeper.stop()
|
||||
assertCountDown(cachedMessage.latch,10000, "Should process " + num + " countdowns")
|
||||
}
|
||||
for(run <- 1 to 3) {
|
||||
|
|
@ -5,7 +5,7 @@ import org.scalatest.matchers.MustMatchers
|
|||
|
||||
class Ticket001Spec extends WordSpec with MustMatchers {
|
||||
|
||||
"An XXX" should {
|
||||
"An XXX" must {
|
||||
"do YYY" in {
|
||||
1 must be (1)
|
||||
}
|
||||
|
|
@ -27,7 +27,7 @@ class Ticket703Spec extends WordSpec with MustMatchers {
|
|||
self.reply_?("Response")
|
||||
}
|
||||
})
|
||||
}).start
|
||||
}).start()
|
||||
(actorPool.!!).await.result must be === Some("Response")
|
||||
}
|
||||
}
|
||||
|
|
@ -70,7 +70,6 @@ public class Actors {
|
|||
return Actor$.MODULE$.actorOf(type);
|
||||
}
|
||||
|
||||
|
||||
/**
|
||||
* The message that is sent when an Actor gets a receive timeout.
|
||||
* <pre>
|
||||
|
|
@ -83,4 +82,27 @@ public class Actors {
|
|||
public final static ReceiveTimeout$ receiveTimeout() {
|
||||
return ReceiveTimeout$.MODULE$;
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* The message that when sent to an Actor kills it by throwing an exception.
|
||||
* <pre>
|
||||
* actor.sendOneWay(kill());
|
||||
* </pre>
|
||||
* @return the single instance of Kill
|
||||
*/
|
||||
public final static Kill$ kill() {
|
||||
return Kill$.MODULE$;
|
||||
}
|
||||
|
||||
|
||||
/**
|
||||
* The message that when sent to an Actor shuts it down by calling 'stop'.
|
||||
* <pre>
|
||||
* actor.sendOneWay(poisonPill());
|
||||
* </pre>
|
||||
* @return the single instance of PoisonPill
|
||||
*/
|
||||
public final static PoisonPill$ poisonPill() {
|
||||
return PoisonPill$.MODULE$;
|
||||
}
|
||||
}
|
||||
|
|
|
|||
|
|
@ -5,31 +5,33 @@
|
|||
package akka
|
||||
|
||||
import akka.actor.newUuid
|
||||
|
||||
import java.io.{StringWriter, PrintWriter}
|
||||
import java.net.{InetAddress, UnknownHostException}
|
||||
|
||||
/**
|
||||
* Akka base Exception. Each Exception gets:
|
||||
* <ul>
|
||||
* <li>a UUID for tracking purposes</li>
|
||||
* <li>a message including exception name, uuid, original message and the stacktrace</li>
|
||||
* <li>a method 'log' that will log the exception once and only once</li>
|
||||
* <li>a uuid for tracking purposes</li>
|
||||
* <li>toString that includes exception name, message, uuid, and the stacktrace</li>
|
||||
* </ul>
|
||||
*
|
||||
* @author <a href="http://jonasboner.com">Jonas Bonér</a>
|
||||
*/
|
||||
@serializable abstract class AkkaException(message: String = "") extends RuntimeException(message) {
|
||||
import AkkaException._
|
||||
val exceptionName = getClass.getName
|
||||
class AkkaException(message: String = "") extends RuntimeException(message) with Serializable {
|
||||
val uuid = "%s_%s".format(AkkaException.hostname, newUuid)
|
||||
|
||||
val uuid = "%s_%s".format(hostname, newUuid)
|
||||
override lazy val toString = {
|
||||
val name = getClass.getName
|
||||
val trace = stackTraceToString
|
||||
"%s: %s\n[%s]\n%s".format(name, message, uuid, trace)
|
||||
}
|
||||
|
||||
override val toString = "%s\n\t[%s]\n\t%s\n\t%s".format(exceptionName, uuid, message, {
|
||||
val sw = new StringWriter
|
||||
printStackTrace(new PrintWriter(sw))
|
||||
sw.toString
|
||||
})
|
||||
def stackTraceToString = {
|
||||
val trace = getStackTrace
|
||||
val sb = new StringBuffer
|
||||
for (i <- 0 until trace.length)
|
||||
sb.append("\tat %s\n" format trace(i))
|
||||
sb.toString
|
||||
}
|
||||
}
|
||||
|
||||
object AkkaException {
|
||||
|
|
|
|||
|
|
@ -6,15 +6,10 @@ package akka.actor
|
|||
|
||||
import akka.dispatch._
|
||||
import akka.config.Config._
|
||||
import akka.config.Supervision._
|
||||
import akka.config.ConfigurationException
|
||||
import akka.util.Helpers.{narrow, narrowSilently}
|
||||
import akka.util.ListenerManagement
|
||||
import akka.AkkaException
|
||||
|
||||
import java.util.concurrent.TimeUnit
|
||||
import java.net.InetSocketAddress
|
||||
|
||||
import scala.reflect.BeanProperty
|
||||
import akka.util. {ReflectiveAccess, Duration}
|
||||
import akka.remoteinterface.RemoteSupport
|
||||
|
|
@ -23,14 +18,14 @@ import akka.japi. {Creator, Procedure}
|
|||
/**
|
||||
* Life-cycle messages for the Actors
|
||||
*/
|
||||
@serializable sealed trait LifeCycleMessage
|
||||
sealed trait LifeCycleMessage extends Serializable
|
||||
|
||||
/* Marker trait to show which Messages are automatically handled by Akka */
|
||||
sealed trait AutoReceivedMessage { self: LifeCycleMessage => }
|
||||
|
||||
case class HotSwap(code: ActorRef => Actor.Receive, discardOld: Boolean = true)
|
||||
case class HotSwap(code: ActorRef => Actor.Receive, discardOld: Boolean = true)
|
||||
extends AutoReceivedMessage with LifeCycleMessage {
|
||||
|
||||
|
||||
/**
|
||||
* Java API
|
||||
*/
|
||||
|
|
@ -61,6 +56,8 @@ case class UnlinkAndStop(child: ActorRef) extends AutoReceivedMessage with LifeC
|
|||
|
||||
case object PoisonPill extends AutoReceivedMessage with LifeCycleMessage
|
||||
|
||||
case object Kill extends AutoReceivedMessage with LifeCycleMessage
|
||||
|
||||
case object ReceiveTimeout extends LifeCycleMessage
|
||||
|
||||
case class MaximumNumberOfRestartsWithinTimeRangeReached(
|
||||
|
|
@ -75,6 +72,7 @@ class IllegalActorStateException private[akka](message: String) extends AkkaEx
|
|||
class ActorKilledException private[akka](message: String) extends AkkaException(message)
|
||||
class ActorInitializationException private[akka](message: String) extends AkkaException(message)
|
||||
class ActorTimeoutException private[akka](message: String) extends AkkaException(message)
|
||||
class InvalidMessageException private[akka](message: String) extends AkkaException(message)
|
||||
|
||||
/**
|
||||
* This message is thrown by default when an Actors behavior doesn't match a message
|
||||
|
|
@ -90,7 +88,7 @@ case class UnhandledMessageException(msg: Any, ref: ActorRef) extends Exception
|
|||
* @author <a href="http://jonasboner.com">Jonas Bonér</a>
|
||||
*/
|
||||
object Actor extends ListenerManagement {
|
||||
|
||||
|
||||
/**
|
||||
* Add shutdown cleanups
|
||||
*/
|
||||
|
|
@ -128,19 +126,19 @@ object Actor extends ListenerManagement {
|
|||
type Receive = PartialFunction[Any, Unit]
|
||||
|
||||
private[actor] val actorRefInCreation = new scala.util.DynamicVariable[Option[ActorRef]](None)
|
||||
|
||||
|
||||
/**
|
||||
* Creates an ActorRef out of the Actor with type T.
|
||||
* <pre>
|
||||
* import Actor._
|
||||
* val actor = actorOf[MyActor]
|
||||
* actor.start
|
||||
* actor.start()
|
||||
* actor ! message
|
||||
* actor.stop
|
||||
* actor.stop()
|
||||
* </pre>
|
||||
* You can create and start the actor in one statement like this:
|
||||
* <pre>
|
||||
* val actor = actorOf[MyActor].start
|
||||
* val actor = actorOf[MyActor].start()
|
||||
* </pre>
|
||||
*/
|
||||
def actorOf[T <: Actor : Manifest]: ActorRef = actorOf(manifest[T].erasure.asInstanceOf[Class[_ <: Actor]])
|
||||
|
|
@ -150,13 +148,13 @@ object Actor extends ListenerManagement {
|
|||
* <pre>
|
||||
* import Actor._
|
||||
* val actor = actorOf(classOf[MyActor])
|
||||
* actor.start
|
||||
* actor.start()
|
||||
* actor ! message
|
||||
* actor.stop
|
||||
* actor.stop()
|
||||
* </pre>
|
||||
* You can create and start the actor in one statement like this:
|
||||
* <pre>
|
||||
* val actor = actorOf(classOf[MyActor]).start
|
||||
* val actor = actorOf(classOf[MyActor]).start()
|
||||
* </pre>
|
||||
*/
|
||||
def actorOf(clazz: Class[_ <: Actor]): ActorRef = new LocalActorRef(() => {
|
||||
|
|
@ -178,13 +176,13 @@ object Actor extends ListenerManagement {
|
|||
* <pre>
|
||||
* import Actor._
|
||||
* val actor = actorOf(new MyActor)
|
||||
* actor.start
|
||||
* actor.start()
|
||||
* actor ! message
|
||||
* actor.stop
|
||||
* actor.stop()
|
||||
* </pre>
|
||||
* You can create and start the actor in one statement like this:
|
||||
* <pre>
|
||||
* val actor = actorOf(new MyActor).start
|
||||
* val actor = actorOf(new MyActor).start()
|
||||
* </pre>
|
||||
*/
|
||||
def actorOf(factory: => Actor): ActorRef = new LocalActorRef(() => factory, None)
|
||||
|
|
@ -219,9 +217,9 @@ object Actor extends ListenerManagement {
|
|||
actorOf(new Actor() {
|
||||
self.dispatcher = dispatcher
|
||||
def receive = {
|
||||
case Spawn => try { body } finally { self.stop }
|
||||
case Spawn => try { body } finally { self.stop() }
|
||||
}
|
||||
}).start ! Spawn
|
||||
}).start() ! Spawn
|
||||
}
|
||||
/**
|
||||
* Implicitly converts the given Option[Any] to a AnyOptionAsTypedOption which offers the method <code>as[T]</code>
|
||||
|
|
@ -276,9 +274,6 @@ object Actor extends ListenerManagement {
|
|||
* }
|
||||
* </pre>
|
||||
*
|
||||
* <p/>
|
||||
* The Actor trait also has a 'log' member field that can be used for logging within the Actor.
|
||||
*
|
||||
* @author <a href="http://jonasboner.com">Jonas Bonér</a>
|
||||
*/
|
||||
trait Actor {
|
||||
|
|
@ -303,6 +298,7 @@ trait Actor {
|
|||
"\n\tEither use:" +
|
||||
"\n\t\t'val actor = Actor.actorOf[MyActor]', or" +
|
||||
"\n\t\t'val actor = Actor.actorOf(new MyActor(..))'")
|
||||
Actor.actorRefInCreation.value = None
|
||||
optRef.asInstanceOf[Some[ActorRef]].get.id = getClass.getName //FIXME: Is this needed?
|
||||
optRef.asInstanceOf[Some[ActorRef]]
|
||||
}
|
||||
|
|
@ -352,7 +348,7 @@ trait Actor {
|
|||
* <p/>
|
||||
* Example code:
|
||||
* <pre>
|
||||
* def receive = {
|
||||
* def receive = {
|
||||
* case Ping =>
|
||||
* println("got a 'Ping' message")
|
||||
* self.reply("pong")
|
||||
|
|
@ -370,14 +366,14 @@ trait Actor {
|
|||
/**
|
||||
* User overridable callback.
|
||||
* <p/>
|
||||
* Is called when an Actor is started by invoking 'actor.start'.
|
||||
* Is called when an Actor is started by invoking 'actor.start()'.
|
||||
*/
|
||||
def preStart {}
|
||||
|
||||
/**
|
||||
* User overridable callback.
|
||||
* <p/>
|
||||
* Is called when 'actor.stop' is invoked.
|
||||
* Is called when 'actor.stop()' is invoked.
|
||||
*/
|
||||
def postStop {}
|
||||
|
||||
|
|
@ -426,7 +422,7 @@ trait Actor {
|
|||
* If "discardOld" is true, an unbecome will be issued prior to pushing the new behavior to the stack
|
||||
*/
|
||||
def become(behavior: Receive, discardOld: Boolean = true) {
|
||||
if (discardOld) unbecome
|
||||
if (discardOld) unbecome()
|
||||
self.hotswap = self.hotswap.push(behavior)
|
||||
}
|
||||
|
||||
|
|
@ -443,8 +439,10 @@ trait Actor {
|
|||
// =========================================
|
||||
|
||||
private[akka] final def apply(msg: Any) = {
|
||||
if (msg.isInstanceOf[AnyRef] && (msg.asInstanceOf[AnyRef] eq null))
|
||||
throw new InvalidMessageException("Message from [" + self.sender + "] to [" + self.toString + "] is null")
|
||||
val behaviorStack = self.hotswap
|
||||
msg match { //FIXME Add check for currentMessage eq null throw new BadUSerException?
|
||||
msg match {
|
||||
case l: AutoReceivedMessage => autoReceiveMessage(l)
|
||||
case msg if behaviorStack.nonEmpty &&
|
||||
behaviorStack.head.isDefinedAt(msg) => behaviorStack.head.apply(msg)
|
||||
|
|
@ -456,15 +454,16 @@ trait Actor {
|
|||
|
||||
private final def autoReceiveMessage(msg: AutoReceivedMessage): Unit = msg match {
|
||||
case HotSwap(code, discardOld) => become(code(self), discardOld)
|
||||
case RevertHotSwap => unbecome
|
||||
case RevertHotSwap => unbecome()
|
||||
case Exit(dead, reason) => self.handleTrapExit(dead, reason)
|
||||
case Link(child) => self.link(child)
|
||||
case Unlink(child) => self.unlink(child)
|
||||
case UnlinkAndStop(child) => self.unlink(child); child.stop
|
||||
case UnlinkAndStop(child) => self.unlink(child); child.stop()
|
||||
case Restart(reason) => throw reason
|
||||
case Kill => throw new ActorKilledException("Kill")
|
||||
case PoisonPill =>
|
||||
val f = self.senderFuture
|
||||
self.stop
|
||||
self.stop()
|
||||
if (f.isDefined) f.get.completeWithException(new ActorKilledException("PoisonPill"))
|
||||
}
|
||||
|
||||
|
|
|
|||
|
|
@ -6,18 +6,14 @@ package akka.actor
|
|||
|
||||
import akka.event.EventHandler
|
||||
import akka.dispatch._
|
||||
import akka.config.Config._
|
||||
import akka.config.Supervision._
|
||||
import akka.AkkaException
|
||||
import akka.util._
|
||||
import ReflectiveAccess._
|
||||
|
||||
import java.net.InetSocketAddress
|
||||
import java.util.concurrent.atomic.{AtomicInteger, AtomicReference}
|
||||
import java.util.concurrent.locks.ReentrantLock
|
||||
import java.util.concurrent.atomic.AtomicReference
|
||||
import java.util.concurrent.{ ScheduledFuture, ConcurrentHashMap, TimeUnit }
|
||||
import java.util.{ Map => JMap }
|
||||
import java.lang.reflect.Field
|
||||
|
||||
import scala.reflect.BeanProperty
|
||||
import scala.collection.immutable.Stack
|
||||
|
|
@ -36,19 +32,20 @@ private[akka] object ActorRefInternals {
|
|||
}
|
||||
|
||||
/**
|
||||
* Abstraction for unification of sender and senderFuture for later reply
|
||||
* Abstraction for unification of sender and senderFuture for later reply.
|
||||
* Can be stored away and used at a later point in time.
|
||||
*/
|
||||
abstract class Channel[T] {
|
||||
|
||||
|
||||
/**
|
||||
* Sends the specified message to the channel
|
||||
* Scala API
|
||||
* Scala API. <p/>
|
||||
* Sends the specified message to the channel.
|
||||
*/
|
||||
def !(msg: T): Unit
|
||||
|
||||
/**
|
||||
* Sends the specified message to the channel
|
||||
* Java API
|
||||
* Java API. <p/>
|
||||
* Sends the specified message to the channel.
|
||||
*/
|
||||
def sendOneWay(msg: T): Unit = this.!(msg)
|
||||
}
|
||||
|
|
@ -63,14 +60,14 @@ abstract class Channel[T] {
|
|||
* import Actor._
|
||||
*
|
||||
* val actor = actorOf[MyActor]
|
||||
* actor.start
|
||||
* actor.start()
|
||||
* actor ! message
|
||||
* actor.stop
|
||||
* actor.stop()
|
||||
* </pre>
|
||||
*
|
||||
* You can also create and start actors like this:
|
||||
* <pre>
|
||||
* val actor = actorOf[MyActor].start
|
||||
* val actor = actorOf[MyActor].start()
|
||||
* </pre>
|
||||
*
|
||||
* Here is an example on how to create an actor with a non-default constructor.
|
||||
|
|
@ -78,9 +75,9 @@ abstract class Channel[T] {
|
|||
* import Actor._
|
||||
*
|
||||
* val actor = actorOf(new MyActor(...))
|
||||
* actor.start
|
||||
* actor.start()
|
||||
* actor ! message
|
||||
* actor.stop
|
||||
* actor.stop()
|
||||
* </pre>
|
||||
*
|
||||
* @author <a href="http://jonasboner.com">Jonas Bonér</a>
|
||||
|
|
@ -129,7 +126,7 @@ trait ActorRef extends ActorRefShared with java.lang.Comparable[ActorRef] { scal
|
|||
var receiveTimeout: Option[Long] = None
|
||||
|
||||
/**
|
||||
* Akka Java API
|
||||
* Akka Java API. <p/>
|
||||
* Defines the default timeout for an initial receive invocation.
|
||||
* When specified, the receive function should be able to handle a 'ReceiveTimeout' message.
|
||||
*/
|
||||
|
|
@ -137,7 +134,7 @@ trait ActorRef extends ActorRefShared with java.lang.Comparable[ActorRef] { scal
|
|||
def getReceiveTimeout(): Option[Long] = receiveTimeout
|
||||
|
||||
/**
|
||||
* Akka Java API
|
||||
* Akka Java API. <p/>
|
||||
* A faultHandler defines what should be done when a linked actor signals an error.
|
||||
* <p/>
|
||||
* Can be one of:
|
||||
|
|
@ -154,7 +151,7 @@ trait ActorRef extends ActorRefShared with java.lang.Comparable[ActorRef] { scal
|
|||
|
||||
|
||||
/**
|
||||
* Akka Java API
|
||||
* Akka Java API. <p/>
|
||||
* A lifeCycle defines whether the actor will be stopped on error (Temporary) or if it can be restarted (Permanent)
|
||||
* <p/>
|
||||
* Can be one of:
|
||||
|
|
@ -172,7 +169,7 @@ trait ActorRef extends ActorRefShared with java.lang.Comparable[ActorRef] { scal
|
|||
def getLifeCycle(): LifeCycle
|
||||
|
||||
/**
|
||||
* Akka Java API
|
||||
* Akka Java API. <p/>
|
||||
* The default dispatcher is the <tt>Dispatchers.globalExecutorBasedEventDrivenDispatcher</tt>.
|
||||
* This means that all actors will share the same event-driven executor based dispatcher.
|
||||
* <p/>
|
||||
|
|
@ -192,7 +189,7 @@ trait ActorRef extends ActorRefShared with java.lang.Comparable[ActorRef] { scal
|
|||
def homeAddress: Option[InetSocketAddress]
|
||||
|
||||
/**
|
||||
* Java API
|
||||
* Java API. <p/>
|
||||
*/
|
||||
def getHomeAddress(): InetSocketAddress = homeAddress getOrElse null
|
||||
|
||||
|
|
@ -220,14 +217,14 @@ trait ActorRef extends ActorRefShared with java.lang.Comparable[ActorRef] { scal
|
|||
def uuid = _uuid
|
||||
|
||||
/**
|
||||
* Akka Java API
|
||||
* Akka Java API. <p/>
|
||||
* The reference sender Actor of the last received message.
|
||||
* Is defined if the message was sent from another Actor, else None.
|
||||
*/
|
||||
def getSender(): Option[ActorRef] = sender
|
||||
|
||||
/**
|
||||
* Akka Java API
|
||||
* Akka Java API. <p/>
|
||||
* The reference sender future of the last received message.
|
||||
* Is defined if the message was sent with sent with '!!' or '!!!', else None.
|
||||
*/
|
||||
|
|
@ -267,7 +264,7 @@ trait ActorRef extends ActorRefShared with java.lang.Comparable[ActorRef] { scal
|
|||
protected[akka] def uuid_=(uid: Uuid) = _uuid = uid
|
||||
|
||||
/**
|
||||
* Akka Java API
|
||||
* Akka Java API. <p/>
|
||||
* Sends a one-way asynchronous message. E.g. fire-and-forget semantics.
|
||||
* <p/>
|
||||
* <pre>
|
||||
|
|
@ -278,7 +275,7 @@ trait ActorRef extends ActorRefShared with java.lang.Comparable[ActorRef] { scal
|
|||
def sendOneWay(message: AnyRef): Unit = sendOneWay(message, null)
|
||||
|
||||
/**
|
||||
* Akka Java API
|
||||
* Akka Java API. <p/>
|
||||
* Sends a one-way asynchronous message. E.g. fire-and-forget semantics.
|
||||
* <p/>
|
||||
* Allows you to pass along the sender of the messag.
|
||||
|
|
@ -291,21 +288,21 @@ trait ActorRef extends ActorRefShared with java.lang.Comparable[ActorRef] { scal
|
|||
def sendOneWay(message: AnyRef, sender: ActorRef): Unit = this.!(message)(Option(sender))
|
||||
|
||||
/**
|
||||
* Akka Java API
|
||||
* Akka Java API. <p/>
|
||||
* @see sendRequestReply(message: AnyRef, timeout: Long, sender: ActorRef)
|
||||
* Uses the defualt timeout of the Actor (setTimeout()) and omits the sender reference
|
||||
*/
|
||||
def sendRequestReply(message: AnyRef): AnyRef = sendRequestReply(message, timeout, null)
|
||||
|
||||
/**
|
||||
* Akka Java API
|
||||
* Akka Java API. <p/>
|
||||
* @see sendRequestReply(message: AnyRef, timeout: Long, sender: ActorRef)
|
||||
* Uses the defualt timeout of the Actor (setTimeout())
|
||||
*/
|
||||
def sendRequestReply(message: AnyRef, sender: ActorRef): AnyRef = sendRequestReply(message, timeout, sender)
|
||||
|
||||
/**
|
||||
* Akka Java API
|
||||
* Akka Java API. <p/>
|
||||
* Sends a message asynchronously and waits on a future for a reply message under the hood.
|
||||
* <p/>
|
||||
* It waits on the reply either until it receives it or until the timeout expires
|
||||
|
|
@ -329,21 +326,21 @@ trait ActorRef extends ActorRefShared with java.lang.Comparable[ActorRef] { scal
|
|||
}
|
||||
|
||||
/**
|
||||
* Akka Java API
|
||||
* Akka Java API. <p/>
|
||||
* @see sendRequestReplyFuture(message: AnyRef, sender: ActorRef): Future[_]
|
||||
* Uses the Actors default timeout (setTimeout()) and omits the sender
|
||||
*/
|
||||
def sendRequestReplyFuture(message: AnyRef): Future[_] = sendRequestReplyFuture(message, timeout, null)
|
||||
def sendRequestReplyFuture[T <: AnyRef](message: AnyRef): Future[T] = sendRequestReplyFuture(message, timeout, null).asInstanceOf[Future[T]]
|
||||
|
||||
/**
|
||||
* Akka Java API
|
||||
* Akka Java API. <p/>
|
||||
* @see sendRequestReplyFuture(message: AnyRef, sender: ActorRef): Future[_]
|
||||
* Uses the Actors default timeout (setTimeout())
|
||||
*/
|
||||
def sendRequestReplyFuture(message: AnyRef, sender: ActorRef): Future[_] = sendRequestReplyFuture(message, timeout, sender)
|
||||
def sendRequestReplyFuture[T <: AnyRef](message: AnyRef, sender: ActorRef): Future[T] = sendRequestReplyFuture(message, timeout, sender).asInstanceOf[Future[T]]
|
||||
|
||||
/**
|
||||
* Akka Java API
|
||||
* Akka Java API. <p/>
|
||||
* Sends a message asynchronously returns a future holding the eventual reply message.
|
||||
* <p/>
|
||||
* <b>NOTE:</b>
|
||||
|
|
@ -353,10 +350,10 @@ trait ActorRef extends ActorRefShared with java.lang.Comparable[ActorRef] { scal
|
|||
* If you are sending messages using <code>sendRequestReplyFuture</code> then you <b>have to</b> use <code>getContext().reply(..)</code>
|
||||
* to send a reply message to the original sender. If not then the sender will block until the timeout expires.
|
||||
*/
|
||||
def sendRequestReplyFuture(message: AnyRef, timeout: Long, sender: ActorRef): Future[_] = !!!(message, timeout)(Option(sender))
|
||||
def sendRequestReplyFuture[T <: AnyRef](message: AnyRef, timeout: Long, sender: ActorRef): Future[T] = !!!(message, timeout)(Option(sender)).asInstanceOf[Future[T]]
|
||||
|
||||
/**
|
||||
* Akka Java API
|
||||
* Akka Java API. <p/>
|
||||
* Forwards the message specified to this actor and preserves the original sender of the message
|
||||
*/
|
||||
def forward(message: AnyRef, sender: ActorRef): Unit =
|
||||
|
|
@ -364,7 +361,7 @@ trait ActorRef extends ActorRefShared with java.lang.Comparable[ActorRef] { scal
|
|||
else forward(message)(Some(sender))
|
||||
|
||||
/**
|
||||
* Akka Java API
|
||||
* Akka Java API. <p/>
|
||||
* Use <code>getContext().replyUnsafe(..)</code> to reply with a message to the original sender of the message currently
|
||||
* being processed.
|
||||
* <p/>
|
||||
|
|
@ -373,7 +370,7 @@ trait ActorRef extends ActorRefShared with java.lang.Comparable[ActorRef] { scal
|
|||
def replyUnsafe(message: AnyRef) = reply(message)
|
||||
|
||||
/**
|
||||
* Akka Java API
|
||||
* Akka Java API. <p/>
|
||||
* Use <code>getContext().replySafe(..)</code> to reply with a message to the original sender of the message currently
|
||||
* being processed.
|
||||
* <p/>
|
||||
|
|
@ -387,7 +384,7 @@ trait ActorRef extends ActorRefShared with java.lang.Comparable[ActorRef] { scal
|
|||
def actorClass: Class[_ <: Actor]
|
||||
|
||||
/**
|
||||
* Akka Java API
|
||||
* Akka Java API. <p/>
|
||||
* Returns the class for the Actor instance that is managed by the ActorRef.
|
||||
*/
|
||||
def getActorClass(): Class[_ <: Actor] = actorClass
|
||||
|
|
@ -398,7 +395,7 @@ trait ActorRef extends ActorRefShared with java.lang.Comparable[ActorRef] { scal
|
|||
def actorClassName: String
|
||||
|
||||
/**
|
||||
* Akka Java API
|
||||
* Akka Java API. <p/>
|
||||
* Returns the class name for the Actor instance that is managed by the ActorRef.
|
||||
*/
|
||||
def getActorClassName(): String = actorClassName
|
||||
|
|
@ -454,6 +451,7 @@ trait ActorRef extends ActorRefShared with java.lang.Comparable[ActorRef] { scal
|
|||
* <p/>
|
||||
* To be invoked from within the actor itself.
|
||||
*/
|
||||
@deprecated("Will be removed after 1.1, use Actor.actorOf instead")
|
||||
def spawn(clazz: Class[_ <: Actor]): ActorRef
|
||||
|
||||
/**
|
||||
|
|
@ -461,6 +459,7 @@ trait ActorRef extends ActorRefShared with java.lang.Comparable[ActorRef] { scal
|
|||
* <p/>
|
||||
* To be invoked from within the actor itself.
|
||||
*/
|
||||
@deprecated("Will be removed after 1.1, client managed actors will be removed")
|
||||
def spawnRemote(clazz: Class[_ <: Actor], hostname: String, port: Int, timeout: Long): ActorRef
|
||||
|
||||
/**
|
||||
|
|
@ -468,6 +467,7 @@ trait ActorRef extends ActorRefShared with java.lang.Comparable[ActorRef] { scal
|
|||
* <p/>
|
||||
* To be invoked from within the actor itself.
|
||||
*/
|
||||
@deprecated("Will be removed after 1.1, use use Actor.remote.actorOf instead and then link on success")
|
||||
def spawnLink(clazz: Class[_ <: Actor]): ActorRef
|
||||
|
||||
/**
|
||||
|
|
@ -475,6 +475,7 @@ trait ActorRef extends ActorRefShared with java.lang.Comparable[ActorRef] { scal
|
|||
* <p/>
|
||||
* To be invoked from within the actor itself.
|
||||
*/
|
||||
@deprecated("Will be removed after 1.1, client managed actors will be removed")
|
||||
def spawnLinkRemote(clazz: Class[_ <: Actor], hostname: String, port: Int, timeout: Long): ActorRef
|
||||
|
||||
/**
|
||||
|
|
@ -483,7 +484,7 @@ trait ActorRef extends ActorRefShared with java.lang.Comparable[ActorRef] { scal
|
|||
def mailboxSize = dispatcher.mailboxSize(this)
|
||||
|
||||
/**
|
||||
* Akka Java API
|
||||
* Akka Java API. <p/>
|
||||
* Returns the mailbox size.
|
||||
*/
|
||||
def getMailboxSize(): Int = mailboxSize
|
||||
|
|
@ -494,7 +495,7 @@ trait ActorRef extends ActorRefShared with java.lang.Comparable[ActorRef] { scal
|
|||
def supervisor: Option[ActorRef]
|
||||
|
||||
/**
|
||||
* Akka Java API
|
||||
* Akka Java API. <p/>
|
||||
* Returns the supervisor, if there is one.
|
||||
*/
|
||||
def getSupervisor(): ActorRef = supervisor getOrElse null
|
||||
|
|
@ -506,12 +507,36 @@ trait ActorRef extends ActorRefShared with java.lang.Comparable[ActorRef] { scal
|
|||
def linkedActors: JMap[Uuid, ActorRef]
|
||||
|
||||
/**
|
||||
* Java API
|
||||
* Java API. <p/>
|
||||
* Returns an unmodifiable Java Map containing the linked actors,
|
||||
* please note that the backing map is thread-safe but not immutable
|
||||
*/
|
||||
def getLinkedActors(): JMap[Uuid, ActorRef] = linkedActors
|
||||
|
||||
/**
|
||||
* Abstraction for unification of sender and senderFuture for later reply
|
||||
*/
|
||||
def channel: Channel[Any] = {
|
||||
if (senderFuture.isDefined) {
|
||||
new Channel[Any] {
|
||||
val future = senderFuture.get
|
||||
def !(msg: Any) = future completeWithResult msg
|
||||
}
|
||||
} else if (sender.isDefined) {
|
||||
val someSelf = Some(this)
|
||||
new Channel[Any] {
|
||||
val client = sender.get
|
||||
def !(msg: Any) = client.!(msg)(someSelf)
|
||||
}
|
||||
} else throw new IllegalActorStateException("No channel available")
|
||||
}
|
||||
|
||||
/**
|
||||
* Java API. <p/>
|
||||
* Abstraction for unification of sender and senderFuture for later reply
|
||||
*/
|
||||
def getChannel: Channel[Any] = channel
|
||||
|
||||
protected[akka] def invoke(messageHandle: MessageInvocation): Unit
|
||||
|
||||
protected[akka] def postMessageToMailbox(message: Any, senderOption: Option[ActorRef]): Unit
|
||||
|
|
@ -736,7 +761,7 @@ class LocalActorRef private[akka] (
|
|||
*/
|
||||
def startLink(actorRef: ActorRef): Unit = guard.withGuard {
|
||||
link(actorRef)
|
||||
actorRef.start
|
||||
actorRef.start()
|
||||
}
|
||||
|
||||
/**
|
||||
|
|
@ -745,7 +770,7 @@ class LocalActorRef private[akka] (
|
|||
* To be invoked from within the actor itself.
|
||||
*/
|
||||
def spawn(clazz: Class[_ <: Actor]): ActorRef =
|
||||
Actor.actorOf(clazz).start
|
||||
Actor.actorOf(clazz).start()
|
||||
|
||||
/**
|
||||
* Atomically create (from actor class), start and make an actor remote.
|
||||
|
|
@ -756,7 +781,7 @@ class LocalActorRef private[akka] (
|
|||
ensureRemotingEnabled
|
||||
val ref = Actor.remote.actorOf(clazz, hostname, port)
|
||||
ref.timeout = timeout
|
||||
ref.start
|
||||
ref.start()
|
||||
}
|
||||
|
||||
/**
|
||||
|
|
@ -767,7 +792,7 @@ class LocalActorRef private[akka] (
|
|||
def spawnLink(clazz: Class[_ <: Actor]): ActorRef = {
|
||||
val actor = spawn(clazz)
|
||||
link(actor)
|
||||
actor.start
|
||||
actor.start()
|
||||
actor
|
||||
}
|
||||
|
||||
|
|
@ -781,7 +806,7 @@ class LocalActorRef private[akka] (
|
|||
val actor = Actor.remote.actorOf(clazz, hostname, port)
|
||||
actor.timeout = timeout
|
||||
link(actor)
|
||||
actor.start
|
||||
actor.start()
|
||||
actor
|
||||
}
|
||||
|
||||
|
|
@ -866,7 +891,7 @@ class LocalActorRef private[akka] (
|
|||
|
||||
case _ =>
|
||||
if (_supervisor.isDefined) notifySupervisorWithMessage(Exit(this, reason))
|
||||
else dead.stop
|
||||
else dead.stop()
|
||||
}
|
||||
}
|
||||
|
||||
|
|
@ -906,7 +931,6 @@ class LocalActorRef private[akka] (
|
|||
|
||||
failedActor match {
|
||||
case p: Proxyable =>
|
||||
//p.swapProxiedActor(freshActor) //TODO: broken
|
||||
failedActor.preRestart(reason)
|
||||
failedActor.postRestart(reason)
|
||||
case _ =>
|
||||
|
|
@ -940,10 +964,10 @@ class LocalActorRef private[akka] (
|
|||
|
||||
case _ => // either permanent or none where default is permanent
|
||||
val success = try {
|
||||
performRestart
|
||||
performRestart()
|
||||
true
|
||||
} catch {
|
||||
case e =>
|
||||
case e =>
|
||||
EventHandler.error(e, this, "Exception in restart of Actor [%s]".format(toString))
|
||||
false // an error or exception here should trigger a retry
|
||||
} finally {
|
||||
|
|
@ -994,13 +1018,18 @@ class LocalActorRef private[akka] (
|
|||
// ========= PRIVATE FUNCTIONS =========
|
||||
|
||||
private[this] def newActor: Actor = {
|
||||
val a = Actor.actorRefInCreation.withValue(Some(this)) { actorFactory() }
|
||||
if (a eq null) throw new ActorInitializationException("Actor instance passed to ActorRef can not be 'null'")
|
||||
a
|
||||
try {
|
||||
Actor.actorRefInCreation.value = Some(this)
|
||||
val a = actorFactory()
|
||||
if (a eq null) throw new ActorInitializationException("Actor instance passed to ActorRef can not be 'null'")
|
||||
a
|
||||
} finally {
|
||||
Actor.actorRefInCreation.value = None
|
||||
}
|
||||
}
|
||||
|
||||
private def shutDownTemporaryActor(temporaryActor: ActorRef) {
|
||||
temporaryActor.stop
|
||||
temporaryActor.stop()
|
||||
_linkedActors.remove(temporaryActor.uuid) // remove the temporary actor
|
||||
// if last temporary actor is gone, then unlink me from supervisor
|
||||
if (_linkedActors.isEmpty) notifySupervisorWithMessage(UnlinkAndStop(this))
|
||||
|
|
@ -1009,7 +1038,7 @@ class LocalActorRef private[akka] (
|
|||
|
||||
private def handleExceptionInDispatch(reason: Throwable, message: Any) = {
|
||||
EventHandler.error(reason, this, message.toString)
|
||||
|
||||
|
||||
//Prevent any further messages to be processed until the actor has been restarted
|
||||
dispatcher.suspend(this)
|
||||
|
||||
|
|
@ -1032,7 +1061,7 @@ class LocalActorRef private[akka] (
|
|||
{
|
||||
val i = _linkedActors.values.iterator
|
||||
while (i.hasNext) {
|
||||
i.next.stop
|
||||
i.next.stop()
|
||||
i.remove
|
||||
}
|
||||
}
|
||||
|
|
@ -1121,9 +1150,9 @@ private[akka] case class RemoteActorRef private[akka] (
|
|||
senderOption: Option[ActorRef],
|
||||
senderFuture: Option[CompletableFuture[T]]): CompletableFuture[T] = {
|
||||
val future = Actor.remote.send[T](
|
||||
message, senderOption, senderFuture,
|
||||
homeAddress.get, timeout,
|
||||
false, this, None,
|
||||
message, senderOption, senderFuture,
|
||||
homeAddress.get, timeout,
|
||||
false, this, None,
|
||||
actorType, loader)
|
||||
if (future.isDefined) future.get
|
||||
else throw new IllegalActorStateException("Expected a future from remote call to actor " + toString)
|
||||
|
|
@ -1201,8 +1230,8 @@ trait ScalaActorRef extends ActorRefShared { ref: ActorRef =>
|
|||
*/
|
||||
def id: String
|
||||
|
||||
def id_=(id: String): Unit
|
||||
|
||||
def id_=(id: String): Unit
|
||||
|
||||
/**
|
||||
* User overridable callback/setting.
|
||||
* <p/>
|
||||
|
|
@ -1267,7 +1296,7 @@ trait ScalaActorRef extends ActorRefShared { ref: ActorRef =>
|
|||
def !(message: Any)(implicit sender: Option[ActorRef] = None): Unit = {
|
||||
if (isRunning) postMessageToMailbox(message, sender)
|
||||
else throw new ActorInitializationException(
|
||||
"Actor has not been started, you need to invoke 'actor.start' before using it")
|
||||
"Actor has not been started, you need to invoke 'actor.start()' before using it")
|
||||
}
|
||||
|
||||
/**
|
||||
|
|
@ -1298,7 +1327,7 @@ trait ScalaActorRef extends ActorRefShared { ref: ActorRef =>
|
|||
}
|
||||
future.resultOrException
|
||||
} else throw new ActorInitializationException(
|
||||
"Actor has not been started, you need to invoke 'actor.start' before using it")
|
||||
"Actor has not been started, you need to invoke 'actor.start()' before using it")
|
||||
}
|
||||
|
||||
/**
|
||||
|
|
@ -1313,7 +1342,7 @@ trait ScalaActorRef extends ActorRefShared { ref: ActorRef =>
|
|||
def !!(implicit sender: Option[ActorRef] = None): Future[T] = {
|
||||
if (isRunning) postMessageToMailboxAndCreateFutureResultWithTimeout[T](message, timeout, sender, None)
|
||||
else throw new ActorInitializationException(
|
||||
"Actor has not been started, you need to invoke 'actor.start' before using it")
|
||||
"Actor has not been started, you need to invoke 'actor.start()' before using it")
|
||||
}
|
||||
|
||||
/**
|
||||
|
|
@ -1327,7 +1356,7 @@ trait ScalaActorRef extends ActorRefShared { ref: ActorRef =>
|
|||
postMessageToMailboxAndCreateFutureResultWithTimeout(message, timeout, sender.get.sender, sender.get.senderFuture)
|
||||
else
|
||||
postMessageToMailbox(message, sender.get.sender)
|
||||
} else throw new ActorInitializationException("Actor has not been started, you need to invoke 'actor.start' before using it")
|
||||
} else throw new ActorInitializationException("Actor has not been started, you need to invoke 'actor.start()' before using it")
|
||||
}
|
||||
|
||||
/**
|
||||
|
|
@ -1360,24 +1389,6 @@ trait ScalaActorRef extends ActorRefShared { ref: ActorRef =>
|
|||
} else false
|
||||
}
|
||||
|
||||
/**
|
||||
* Abstraction for unification of sender and senderFuture for later reply
|
||||
*/
|
||||
def channel: Channel[Any] = {
|
||||
if (senderFuture.isDefined) {
|
||||
new Channel[Any] {
|
||||
val future = senderFuture.get
|
||||
def !(msg: Any) = future completeWithResult msg
|
||||
}
|
||||
} else if (sender.isDefined) {
|
||||
val someSelf = Some(this)
|
||||
new Channel[Any] {
|
||||
val client = sender.get
|
||||
def !(msg: Any) = client.!(msg)(someSelf)
|
||||
}
|
||||
} else throw new IllegalActorStateException("No channel available")
|
||||
}
|
||||
|
||||
/**
|
||||
* Atomically create (from actor class) and start an actor.
|
||||
*/
|
||||
|
|
|
|||
|
|
@ -261,9 +261,9 @@ private[actor] final class ActorRegistry private[actor] () extends ListenerManag
|
|||
val actorRef = elements.nextElement
|
||||
val proxy = typedActorFor(actorRef)
|
||||
if (proxy.isDefined) TypedActorModule.typedActorObjectInstance.get.stop(proxy.get)
|
||||
else actorRef.stop
|
||||
else actorRef.stop()
|
||||
}
|
||||
} else foreach(_.stop)
|
||||
} else foreach(_.stop())
|
||||
if (Remote.isEnabled) {
|
||||
Actor.remote.clear //TODO: REVISIT: Should this be here?
|
||||
}
|
||||
|
|
|
|||
|
|
@ -56,6 +56,6 @@ trait BootableActorLoaderService extends Bootable {
|
|||
|
||||
abstract override def onUnload = {
|
||||
super.onUnload
|
||||
Actor.registry.shutdownAll
|
||||
Actor.registry.shutdownAll()
|
||||
}
|
||||
}
|
||||
|
|
|
|||
|
|
@ -42,6 +42,14 @@ object FSM {
|
|||
}
|
||||
}
|
||||
|
||||
/*
|
||||
* This extractor is just convenience for matching a (S, S) pair, including a
|
||||
* reminder what the new state is.
|
||||
*/
|
||||
object -> {
|
||||
def unapply[S](in : (S, S)) = Some(in)
|
||||
}
|
||||
|
||||
/*
|
||||
* With these implicits in scope, you can write "5 seconds" anywhere a
|
||||
* Duration or Option[Duration] is expected. This is conveniently true
|
||||
|
|
@ -90,6 +98,23 @@ object FSM {
|
|||
* Each of the above also supports the method <code>replying(AnyRef)</code> for
|
||||
* sending a reply before changing state.
|
||||
*
|
||||
* While changing state, custom handlers may be invoked which are registered
|
||||
* using <code>onTransition</code>. This is meant to enable concentrating
|
||||
* different concerns in different places; you may choose to use
|
||||
* <code>when</code> for describing the properties of a state, including of
|
||||
* course initiating transitions, but you can describe the transitions using
|
||||
* <code>onTransision</code> to avoid having to duplicate that code among
|
||||
* multiple paths which lead to a transition:
|
||||
*
|
||||
* <pre>
|
||||
* onTransition {
|
||||
* case Active -> _ => cancelTimer("activeTimer")
|
||||
* }
|
||||
* </pre>
|
||||
*
|
||||
* Multiple such blocks are supported and all of them will be called, not only
|
||||
* the first matching one.
|
||||
*
|
||||
* Another feature is that other actors may subscribe for transition events by
|
||||
* sending a <code>SubscribeTransitionCallback</code> message to this actor;
|
||||
* use <code>UnsubscribeTransitionCallback</code> before stopping the other
|
||||
|
|
@ -119,7 +144,7 @@ trait FSM[S, D] {
|
|||
|
||||
type StateFunction = scala.PartialFunction[Event[D], State]
|
||||
type Timeout = Option[Duration]
|
||||
type TransitionHandler = (S, S) => Unit
|
||||
type TransitionHandler = PartialFunction[(S, S), Unit]
|
||||
|
||||
/* DSL */
|
||||
|
||||
|
|
@ -239,12 +264,43 @@ trait FSM[S, D] {
|
|||
|
||||
/**
|
||||
* Set handler which is called upon each state transition, i.e. not when
|
||||
* staying in the same state.
|
||||
* staying in the same state. This may use the pair extractor defined in the
|
||||
* FSM companion object like so:
|
||||
*
|
||||
* <pre>
|
||||
* onTransition {
|
||||
* case Old -> New => doSomething
|
||||
* }
|
||||
* </pre>
|
||||
*
|
||||
* It is also possible to supply a 2-ary function object:
|
||||
*
|
||||
* <pre>
|
||||
* onTransition(handler _)
|
||||
*
|
||||
* private def handler(from: S, to: S) { ... }
|
||||
* </pre>
|
||||
*
|
||||
* The underscore is unfortunately necessary to enable the nicer syntax shown
|
||||
* above (it uses the implicit conversion total2pf under the hood).
|
||||
*
|
||||
* <b>Multiple handlers may be installed, and every one of them will be
|
||||
* called, not only the first one matching.</b>
|
||||
*/
|
||||
protected final def onTransition(transitionHandler: TransitionHandler) = {
|
||||
transitionEvent = transitionHandler
|
||||
protected final def onTransition(transitionHandler: TransitionHandler) {
|
||||
transitionEvent :+= transitionHandler
|
||||
}
|
||||
|
||||
/**
|
||||
* Convenience wrapper for using a total function instead of a partial
|
||||
* function literal. To be used with onTransition.
|
||||
*/
|
||||
implicit protected final def total2pf(transitionHandler: (S, S) => Unit) =
|
||||
new PartialFunction[(S, S), Unit] {
|
||||
def isDefinedAt(in : (S, S)) = true
|
||||
def apply(in : (S, S)) { transitionHandler(in._1, in._2) }
|
||||
}
|
||||
|
||||
/**
|
||||
* Set handler which is called upon termination of this FSM actor.
|
||||
*/
|
||||
|
|
@ -300,7 +356,10 @@ trait FSM[S, D] {
|
|||
case StopEvent(reason, _, _) =>
|
||||
}
|
||||
|
||||
private var transitionEvent: TransitionHandler = (from, to) => {
|
||||
private var transitionEvent: List[TransitionHandler] = Nil
|
||||
private def handleTransition(prev : S, next : S) {
|
||||
val tuple = (prev, next)
|
||||
for (te <- transitionEvent) { if (te.isDefinedAt(tuple)) te(tuple) }
|
||||
}
|
||||
|
||||
override final protected def receive: Receive = {
|
||||
|
|
@ -351,7 +410,7 @@ trait FSM[S, D] {
|
|||
terminate(Failure("Next state %s does not exist".format(nextState.stateName)))
|
||||
} else {
|
||||
if (currentState.stateName != nextState.stateName) {
|
||||
transitionEvent.apply(currentState.stateName, nextState.stateName)
|
||||
handleTransition(currentState.stateName, nextState.stateName)
|
||||
if (!transitionCallBackList.isEmpty) {
|
||||
val transition = Transition(self, currentState.stateName, nextState.stateName)
|
||||
transitionCallBackList.foreach(_ ! transition)
|
||||
|
|
@ -374,7 +433,7 @@ trait FSM[S, D] {
|
|||
|
||||
private def terminate(reason: Reason) = {
|
||||
terminateEvent.apply(StopEvent(reason, currentState.stateName, currentState.stateData))
|
||||
self.stop
|
||||
self.stop()
|
||||
}
|
||||
|
||||
case class Event[D](event: Any, stateData: D)
|
||||
|
|
|
|||
|
|
@ -106,7 +106,7 @@ sealed class Supervisor(handler: FaultHandlingStrategy) {
|
|||
private val _childActors = new ConcurrentHashMap[String, List[ActorRef]]
|
||||
private val _childSupervisors = new CopyOnWriteArrayList[Supervisor]
|
||||
|
||||
private[akka] val supervisor = actorOf(new SupervisorActor(handler)).start
|
||||
private[akka] val supervisor = actorOf(new SupervisorActor(handler)).start()
|
||||
|
||||
def uuid = supervisor.uuid
|
||||
|
||||
|
|
@ -114,7 +114,7 @@ sealed class Supervisor(handler: FaultHandlingStrategy) {
|
|||
this
|
||||
}
|
||||
|
||||
def shutdown(): Unit = supervisor.stop
|
||||
def shutdown(): Unit = supervisor.stop()
|
||||
|
||||
def link(child: ActorRef) = supervisor.link(child)
|
||||
|
||||
|
|
@ -131,7 +131,7 @@ sealed class Supervisor(handler: FaultHandlingStrategy) {
|
|||
servers.map(server =>
|
||||
server match {
|
||||
case Supervise(actorRef, lifeCycle, registerAsRemoteService) =>
|
||||
actorRef.start
|
||||
actorRef.start()
|
||||
val className = actorRef.actor.getClass.getName
|
||||
val currentActors = {
|
||||
val list = _childActors.get(className)
|
||||
|
|
@ -163,7 +163,7 @@ final class SupervisorActor private[akka] (handler: FaultHandlingStrategy) exten
|
|||
val i = self.linkedActors.values.iterator
|
||||
while(i.hasNext) {
|
||||
val ref = i.next
|
||||
ref.stop
|
||||
ref.stop()
|
||||
self.unlink(ref)
|
||||
}
|
||||
}
|
||||
|
|
|
|||
|
|
@ -4,14 +4,8 @@
|
|||
|
||||
package akka.actor
|
||||
|
||||
import akka.dispatch._
|
||||
import akka.config.Supervision._
|
||||
import akka.japi.{Creator, Procedure}
|
||||
|
||||
import java.net.InetSocketAddress
|
||||
|
||||
import scala.reflect.BeanProperty
|
||||
|
||||
/**
|
||||
* Subclass this abstract class to create a MDB-style untyped actor.
|
||||
* <p/>
|
||||
|
|
@ -62,11 +56,21 @@ import scala.reflect.BeanProperty
|
|||
*/
|
||||
abstract class UntypedActor extends Actor {
|
||||
|
||||
/**
|
||||
* To be implemented by concrete UntypedActor. Defines the message handler.
|
||||
*/
|
||||
@throws(classOf[Exception])
|
||||
def onReceive(message: Any): Unit
|
||||
|
||||
/**
|
||||
* Returns the 'self' reference with the API.
|
||||
*/
|
||||
def getContext(): ActorRef = self
|
||||
|
||||
final protected def receive = {
|
||||
case msg => onReceive(msg)
|
||||
}
|
||||
/**
|
||||
* Returns the 'self' reference with the API.
|
||||
*/
|
||||
def context(): ActorRef = self
|
||||
|
||||
/**
|
||||
* Java API for become
|
||||
|
|
@ -79,8 +83,47 @@ abstract class UntypedActor extends Actor {
|
|||
def become(behavior: Procedure[Any], discardOld: Boolean): Unit =
|
||||
super.become({ case msg => behavior.apply(msg) }, discardOld)
|
||||
|
||||
@throws(classOf[Exception])
|
||||
def onReceive(message: Any): Unit
|
||||
/**
|
||||
* User overridable callback.
|
||||
* <p/>
|
||||
* Is called when an Actor is started by invoking 'actor.start()'.
|
||||
*/
|
||||
override def preStart {}
|
||||
|
||||
/**
|
||||
* User overridable callback.
|
||||
* <p/>
|
||||
* Is called when 'actor.stop()' is invoked.
|
||||
*/
|
||||
override def postStop {}
|
||||
|
||||
/**
|
||||
* User overridable callback.
|
||||
* <p/>
|
||||
* Is called on a crashed Actor right BEFORE it is restarted to allow clean up of resources before Actor is terminated.
|
||||
*/
|
||||
override def preRestart(reason: Throwable) {}
|
||||
|
||||
/**
|
||||
* User overridable callback.
|
||||
* <p/>
|
||||
* Is called right AFTER restart on the newly created Actor to allow reinitialization after an Actor crash.
|
||||
*/
|
||||
override def postRestart(reason: Throwable) {}
|
||||
|
||||
/**
|
||||
* User overridable callback.
|
||||
* <p/>
|
||||
* Is called when a message isn't handled by the current behavior of the actor
|
||||
* by default it throws an UnhandledMessageException
|
||||
*/
|
||||
override def unhandled(msg: Any) {
|
||||
throw new UnhandledMessageException(msg, self)
|
||||
}
|
||||
|
||||
final protected def receive = {
|
||||
case msg => onReceive(msg)
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
|
|
@ -88,4 +131,4 @@ abstract class UntypedActor extends Actor {
|
|||
*
|
||||
* @author <a href="http://jonasboner.com">Jonas Bonér</a>
|
||||
*/
|
||||
trait UntypedActorFactory extends Creator[Actor]
|
||||
trait UntypedActorFactory extends Creator[Actor]
|
||||
|
|
|
|||
|
|
@ -14,7 +14,10 @@ package object actor {
|
|||
ref.asInstanceOf[ActorRef]
|
||||
|
||||
type Uuid = com.eaio.uuid.UUID
|
||||
|
||||
def newUuid(): Uuid = new Uuid()
|
||||
def uuidFrom(time: Long, clockSeqAndNode: Long): Uuid = new Uuid(time,clockSeqAndNode)
|
||||
|
||||
def uuidFrom(time: Long, clockSeqAndNode: Long): Uuid = new Uuid(time, clockSeqAndNode)
|
||||
|
||||
def uuidFrom(uuid: String): Uuid = new Uuid(uuid)
|
||||
}
|
||||
|
|
@ -40,19 +40,19 @@ object DataFlow {
|
|||
* Executes the supplied function in another thread.
|
||||
*/
|
||||
def thread[A <: AnyRef, R <: AnyRef](body: A => R) =
|
||||
actorOf(new ReactiveEventBasedThread(body)).start
|
||||
actorOf(new ReactiveEventBasedThread(body)).start()
|
||||
|
||||
/**
|
||||
* JavaAPI.
|
||||
* Executes the supplied Function in another thread.
|
||||
*/
|
||||
def thread[A <: AnyRef, R <: AnyRef](body: Function[A,R]) =
|
||||
actorOf(new ReactiveEventBasedThread(body.apply)).start
|
||||
actorOf(new ReactiveEventBasedThread(body.apply)).start()
|
||||
|
||||
private class ReactiveEventBasedThread[A <: AnyRef, T <: AnyRef](body: A => T)
|
||||
extends Actor {
|
||||
def receive = {
|
||||
case Exit => self.stop
|
||||
case Exit => self.stop()
|
||||
case message => self.reply(body(message.asInstanceOf[A]))
|
||||
}
|
||||
}
|
||||
|
|
@ -84,7 +84,7 @@ object DataFlow {
|
|||
dataFlow.blockedReaders.poll ! s
|
||||
} else throw new DataFlowVariableException(
|
||||
"Attempt to change data flow variable (from [" + dataFlow.value.get + "] to [" + v + "])")
|
||||
case Exit => self.stop
|
||||
case Exit => self.stop()
|
||||
}
|
||||
}
|
||||
|
||||
|
|
@ -97,11 +97,11 @@ object DataFlow {
|
|||
case None => readerFuture = self.senderFuture
|
||||
}
|
||||
case Set(v:T) => readerFuture.map(_ completeWithResult v)
|
||||
case Exit => self.stop
|
||||
case Exit => self.stop()
|
||||
}
|
||||
}
|
||||
|
||||
private[this] val in = actorOf(new In(this)).start
|
||||
private[this] val in = actorOf(new In(this)).start()
|
||||
|
||||
/**
|
||||
* Sets the value of this variable (if unset) with the value of the supplied variable.
|
||||
|
|
@ -143,7 +143,7 @@ object DataFlow {
|
|||
*/
|
||||
def apply(): T = {
|
||||
value.get getOrElse {
|
||||
val out = actorOf(new Out(this)).start
|
||||
val out = actorOf(new Out(this)).start()
|
||||
|
||||
val result = try {
|
||||
blockedReaders offer out
|
||||
|
|
|
|||
|
|
@ -54,7 +54,7 @@ object Dispatchers {
|
|||
val MAILBOX_PUSH_TIME_OUT = Duration(config.getInt("akka.actor.default-dispatcher.mailbox-push-timeout-time", 10), TIME_UNIT)
|
||||
val THROUGHPUT_DEADLINE_TIME = Duration(config.getInt("akka.actor.throughput-deadline-time",-1), TIME_UNIT)
|
||||
val THROUGHPUT_DEADLINE_TIME_MILLIS = THROUGHPUT_DEADLINE_TIME.toMillis.toInt
|
||||
val MAILBOX_TYPE: MailboxType = if (MAILBOX_CAPACITY < 0) UnboundedMailbox() else BoundedMailbox()
|
||||
val MAILBOX_TYPE: MailboxType = if (MAILBOX_CAPACITY < 1) UnboundedMailbox() else BoundedMailbox()
|
||||
|
||||
lazy val defaultGlobalDispatcher = {
|
||||
config.getSection("akka.actor.default-dispatcher").flatMap(from).getOrElse(globalExecutorBasedEventDrivenDispatcher)
|
||||
|
|
|
|||
|
|
@ -116,18 +116,18 @@ class ExecutorBasedEventDrivenDispatcher(
|
|||
override def mailboxSize(actorRef: ActorRef) = getMailbox(actorRef).size
|
||||
|
||||
def createMailbox(actorRef: ActorRef): AnyRef = mailboxType match {
|
||||
case b: UnboundedMailbox if b.blocking =>
|
||||
new DefaultUnboundedMessageQueue(true) with ExecutableMailbox {
|
||||
final def dispatcher = ExecutorBasedEventDrivenDispatcher.this
|
||||
case b: UnboundedMailbox =>
|
||||
if (b.blocking) {
|
||||
new DefaultUnboundedMessageQueue(true) with ExecutableMailbox {
|
||||
final def dispatcher = ExecutorBasedEventDrivenDispatcher.this
|
||||
}
|
||||
} else { //If we have an unbounded, non-blocking mailbox, we can go lockless
|
||||
new ConcurrentLinkedQueue[MessageInvocation] with MessageQueue with ExecutableMailbox {
|
||||
final def dispatcher = ExecutorBasedEventDrivenDispatcher.this
|
||||
final def enqueue(m: MessageInvocation) = this.add(m)
|
||||
final def dequeue(): MessageInvocation = this.poll()
|
||||
}
|
||||
}
|
||||
|
||||
case b: UnboundedMailbox if !b.blocking => //If we have an unbounded, non-blocking mailbox, we can go lockless
|
||||
new ConcurrentLinkedQueue[MessageInvocation] with MessageQueue with ExecutableMailbox {
|
||||
final def dispatcher = ExecutorBasedEventDrivenDispatcher.this
|
||||
final def enqueue(m: MessageInvocation) = this.add(m)
|
||||
final def dequeue(): MessageInvocation = this.poll()
|
||||
}
|
||||
|
||||
case b: BoundedMailbox =>
|
||||
new DefaultBoundedMessageQueue(b.capacity, b.pushTimeOut, b.blocking) with ExecutableMailbox {
|
||||
final def dispatcher = ExecutorBasedEventDrivenDispatcher.this
|
||||
|
|
@ -229,9 +229,31 @@ trait ExecutableMailbox extends Runnable { self: MessageQueue =>
|
|||
}
|
||||
}
|
||||
|
||||
object PriorityGenerator {
|
||||
/**
|
||||
* Creates a PriorityGenerator that uses the supplied function as priority generator
|
||||
*/
|
||||
def apply(priorityFunction: Any => Int): PriorityGenerator = new PriorityGenerator {
|
||||
def gen(message: Any): Int = priorityFunction(message)
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* A PriorityGenerator is a convenience API to create a Comparator that orders the messages of a
|
||||
* PriorityExecutorBasedEventDrivenDispatcher
|
||||
*/
|
||||
abstract class PriorityGenerator extends java.util.Comparator[MessageInvocation] {
|
||||
def gen(message: Any): Int
|
||||
|
||||
final def compare(thisMessage: MessageInvocation, thatMessage: MessageInvocation): Int =
|
||||
gen(thisMessage.message) - gen(thatMessage.message)
|
||||
}
|
||||
|
||||
/**
|
||||
* A version of ExecutorBasedEventDrivenDispatcher that gives all actors registered to it a priority mailbox,
|
||||
* prioritized according to the supplied comparator.
|
||||
*
|
||||
* The dispatcher will process the messages with the _lowest_ priority first.
|
||||
*/
|
||||
class PriorityExecutorBasedEventDrivenDispatcher(
|
||||
name: String,
|
||||
|
|
@ -242,10 +264,10 @@ class PriorityExecutorBasedEventDrivenDispatcher(
|
|||
config: ThreadPoolConfig = ThreadPoolConfig()
|
||||
) extends ExecutorBasedEventDrivenDispatcher(name, throughput, throughputDeadlineTime, mailboxType, config) with PriorityMailbox {
|
||||
|
||||
def this(name: String, comparator: java.util.Comparator[MessageInvocation], throughput: Int, throughputDeadlineTime: Int, mailboxType: UnboundedMailbox) =
|
||||
def this(name: String, comparator: java.util.Comparator[MessageInvocation], throughput: Int, throughputDeadlineTime: Int, mailboxType: MailboxType) =
|
||||
this(name, comparator, throughput, throughputDeadlineTime, mailboxType,ThreadPoolConfig()) // Needed for Java API usage
|
||||
|
||||
def this(name: String, comparator: java.util.Comparator[MessageInvocation], throughput: Int, mailboxType: UnboundedMailbox) =
|
||||
def this(name: String, comparator: java.util.Comparator[MessageInvocation], throughput: Int, mailboxType: MailboxType) =
|
||||
this(name, comparator, throughput, Dispatchers.THROUGHPUT_DEADLINE_TIME_MILLIS, mailboxType) // Needed for Java API usage
|
||||
|
||||
def this(name: String, comparator: java.util.Comparator[MessageInvocation], throughput: Int) =
|
||||
|
|
@ -258,6 +280,15 @@ class PriorityExecutorBasedEventDrivenDispatcher(
|
|||
this(name, comparator, Dispatchers.THROUGHPUT, Dispatchers.THROUGHPUT_DEADLINE_TIME_MILLIS, Dispatchers.MAILBOX_TYPE) // Needed for Java API usage
|
||||
}
|
||||
|
||||
|
||||
/**
|
||||
* Can be used to give an ExecutorBasedEventDrivenDispatcher's actors priority-enabled mailboxes
|
||||
*
|
||||
* Usage:
|
||||
* new ExecutorBasedEventDrivenDispatcher(...) with PriorityMailbox {
|
||||
* val comparator = ...comparator that determines mailbox priority ordering...
|
||||
* }
|
||||
*/
|
||||
trait PriorityMailbox { self: ExecutorBasedEventDrivenDispatcher =>
|
||||
def comparator: java.util.Comparator[MessageInvocation]
|
||||
|
||||
|
|
|
|||
|
|
@ -78,12 +78,12 @@ class ExecutorBasedEventDrivenWorkStealingDispatcher(
|
|||
|
||||
override private[akka] def dispatch(invocation: MessageInvocation) = {
|
||||
val mbox = getMailbox(invocation.receiver)
|
||||
if (mbox.dispatcherLock.locked && attemptDonationOf(invocation, mbox)) {
|
||||
/*if (!mbox.isEmpty && attemptDonationOf(invocation, mbox)) {
|
||||
//We were busy and we got to donate the message to some other lucky guy, we're done here
|
||||
} else {
|
||||
} else {*/
|
||||
mbox enqueue invocation
|
||||
registerForExecution(mbox)
|
||||
}
|
||||
//}
|
||||
}
|
||||
|
||||
override private[akka] def reRegisterForExecution(mbox: MessageQueue with ExecutableMailbox): Unit = {
|
||||
|
|
@ -110,13 +110,13 @@ class ExecutorBasedEventDrivenWorkStealingDispatcher(
|
|||
/**
|
||||
* Returns true if the donation succeeded or false otherwise
|
||||
*/
|
||||
protected def attemptDonationOf(message: MessageInvocation, donorMbox: MessageQueue with ExecutableMailbox): Boolean = {
|
||||
/*protected def attemptDonationOf(message: MessageInvocation, donorMbox: MessageQueue with ExecutableMailbox): Boolean = {
|
||||
val actors = members // copy to prevent concurrent modifications having any impact
|
||||
doFindDonorRecipient(donorMbox, actors, System.identityHashCode(message) % actors.size) match {
|
||||
case null => false
|
||||
case recipient => donate(message, recipient)
|
||||
}
|
||||
}
|
||||
}*/
|
||||
|
||||
/**
|
||||
* Rewrites the message and adds that message to the recipients mailbox
|
||||
|
|
|
|||
|
|
@ -57,17 +57,21 @@ object Futures {
|
|||
}
|
||||
|
||||
/**
|
||||
* Java API
|
||||
* Java API.
|
||||
* Returns a Future to the result of the first future in the list that is completed
|
||||
*/
|
||||
def firstCompletedOf[T <: AnyRef](futures: java.lang.Iterable[Future[T]], timeout: Long): Future[T] =
|
||||
firstCompletedOf(scala.collection.JavaConversions.asScalaIterable(futures),timeout)
|
||||
firstCompletedOf(scala.collection.JavaConversions.iterableAsScalaIterable(futures),timeout)
|
||||
|
||||
/**
|
||||
* A non-blocking fold over the specified futures.
|
||||
* The fold is performed on the thread where the last future is completed,
|
||||
* the result will be the first failure of any of the futures, or any failure in the actual fold,
|
||||
* or the result of the fold.
|
||||
* Example:
|
||||
* <pre>
|
||||
* val result = Futures.fold(0)(futures)(_ + _).await.result
|
||||
* </pre>
|
||||
*/
|
||||
def fold[T,R](zero: R, timeout: Long = Actor.TIMEOUT)(futures: Iterable[Future[T]])(foldFun: (R, T) => R): Future[R] = {
|
||||
if(futures.isEmpty) {
|
||||
|
|
@ -83,7 +87,7 @@ object Futures {
|
|||
results add r.b
|
||||
if (results.size == allDone) { //Only one thread can get here
|
||||
try {
|
||||
result completeWithResult scala.collection.JavaConversions.asScalaIterable(results).foldLeft(zero)(foldFun)
|
||||
result completeWithResult scala.collection.JavaConversions.collectionAsScalaIterable(results).foldLeft(zero)(foldFun)
|
||||
} catch {
|
||||
case e: Exception =>
|
||||
EventHandler.error(e, this, e.getMessage)
|
||||
|
|
@ -111,10 +115,14 @@ object Futures {
|
|||
* or the result of the fold.
|
||||
*/
|
||||
def fold[T <: AnyRef, R <: AnyRef](zero: R, timeout: Long, futures: java.lang.Iterable[Future[T]], fun: akka.japi.Function2[R, T, R]): Future[R] =
|
||||
fold(zero, timeout)(scala.collection.JavaConversions.asScalaIterable(futures))( fun.apply _ )
|
||||
fold(zero, timeout)(scala.collection.JavaConversions.iterableAsScalaIterable(futures))( fun.apply _ )
|
||||
|
||||
/**
|
||||
* Initiates a fold over the supplied futures where the fold-zero is the result value of the Future that's completed first
|
||||
* Example:
|
||||
* <pre>
|
||||
* val result = Futures.reduce(futures)(_ + _).await.result
|
||||
* </pre>
|
||||
*/
|
||||
def reduce[T, R >: T](futures: Iterable[Future[T]], timeout: Long = Actor.TIMEOUT)(op: (R,T) => T): Future[R] = {
|
||||
if (futures.isEmpty)
|
||||
|
|
@ -138,27 +146,40 @@ object Futures {
|
|||
}
|
||||
|
||||
/**
|
||||
* Java API
|
||||
* Java API.
|
||||
* Initiates a fold over the supplied futures where the fold-zero is the result value of the Future that's completed first
|
||||
*/
|
||||
def reduce[T <: AnyRef, R >: T](futures: java.lang.Iterable[Future[T]], timeout: Long, fun: akka.japi.Function2[R, T, T]): Future[R] =
|
||||
reduce(scala.collection.JavaConversions.asScalaIterable(futures), timeout)(fun.apply _)
|
||||
reduce(scala.collection.JavaConversions.iterableAsScalaIterable(futures), timeout)(fun.apply _)
|
||||
|
||||
import scala.collection.mutable.Builder
|
||||
import scala.collection.generic.CanBuildFrom
|
||||
|
||||
/**
|
||||
* Simple version of Futures.traverse. Transforms a Traversable[Future[A]] into a Future[Traversable[A]].
|
||||
* Useful for reducing many Futures into a single Future.
|
||||
*/
|
||||
def sequence[A, M[_] <: Traversable[_]](in: M[Future[A]], timeout: Long = Actor.TIMEOUT)(implicit cbf: CanBuildFrom[M[Future[A]], A, M[A]]): Future[M[A]] =
|
||||
in.foldLeft(new DefaultCompletableFuture[Builder[A, M[A]]](timeout).completeWithResult(cbf(in)): Future[Builder[A, M[A]]])((fr, fa) => for (r <- fr; a <- fa.asInstanceOf[Future[A]]) yield (r += a)).map(_.result)
|
||||
|
||||
/**
|
||||
* Transforms a Traversable[A] into a Future[Traversable[B]] using the provided Function A => Future[B].
|
||||
* This is useful for performing a parallel map. For example, to apply a function to all items of a list
|
||||
* in parallel:
|
||||
* <pre>
|
||||
* val myFutureList = Futures.traverse(myList)(x => Future(myFunc(x)))
|
||||
* </pre>
|
||||
*/
|
||||
def traverse[A, B, M[_] <: Traversable[_]](in: M[A], timeout: Long = Actor.TIMEOUT)(fn: A => Future[B])(implicit cbf: CanBuildFrom[M[A], B, M[B]]): Future[M[B]] =
|
||||
in.foldLeft(new DefaultCompletableFuture[Builder[B, M[B]]](timeout).completeWithResult(cbf(in)): Future[Builder[B, M[B]]]) { (fr, a) =>
|
||||
val fb = fn(a.asInstanceOf[A])
|
||||
for (r <- fr; b <-fb) yield (r += b)
|
||||
}.map(_.result)
|
||||
|
||||
//Deprecations
|
||||
|
||||
|
||||
// =====================================
|
||||
// Deprecations
|
||||
// =====================================
|
||||
|
||||
/**
|
||||
* (Blocking!)
|
||||
*/
|
||||
|
|
@ -299,6 +320,12 @@ sealed trait Future[+T] {
|
|||
/**
|
||||
* When the future is compeleted with a valid result, apply the provided
|
||||
* PartialFunction to the result.
|
||||
* <pre>
|
||||
* val result = future receive {
|
||||
* case Foo => "foo"
|
||||
* case Bar => "bar"
|
||||
* }.await.result
|
||||
* </pre>
|
||||
*/
|
||||
final def receive(pf: PartialFunction[Any, Unit]): Future[T] = onComplete { f =>
|
||||
val optr = f.result
|
||||
|
|
@ -313,6 +340,14 @@ sealed trait Future[+T] {
|
|||
* result of this Future if a match is found, or else return a MatchError.
|
||||
* If this Future is completed with an exception then the new Future will
|
||||
* also contain this exception.
|
||||
* Example:
|
||||
* <pre>
|
||||
* val future1 = for {
|
||||
* a <- actor !!! Req("Hello") collect { case Res(x: Int) => x }
|
||||
* b <- actor !!! Req(a) collect { case Res(x: String) => x }
|
||||
* c <- actor !!! Req(7) collect { case Res(x: String) => x }
|
||||
* } yield b + "-" + c
|
||||
* </pre>
|
||||
*/
|
||||
final def collect[A](pf: PartialFunction[Any, A]): Future[A] = {
|
||||
val fa = new DefaultCompletableFuture[A](timeoutInNanos, NANOS)
|
||||
|
|
@ -343,6 +378,14 @@ sealed trait Future[+T] {
|
|||
* Creates a new Future by applying a function to the successful result of
|
||||
* this Future. If this Future is completed with an exception then the new
|
||||
* Future will also contain this exception.
|
||||
* Example:
|
||||
* <pre>
|
||||
* val future1 = for {
|
||||
* a: Int <- actor !!! "Hello" // returns 5
|
||||
* b: String <- actor !!! a // returns "10"
|
||||
* c: String <- actor !!! 7 // returns "14"
|
||||
* } yield b + "-" + c
|
||||
* </pre>
|
||||
*/
|
||||
final def map[A](f: T => A): Future[A] = {
|
||||
val fa = new DefaultCompletableFuture[A](timeoutInNanos, NANOS)
|
||||
|
|
@ -371,6 +414,14 @@ sealed trait Future[+T] {
|
|||
* this Future, and returns the result of the function as the new Future.
|
||||
* If this Future is completed with an exception then the new Future will
|
||||
* also contain this exception.
|
||||
* Example:
|
||||
* <pre>
|
||||
* val future1 = for {
|
||||
* a: Int <- actor !!! "Hello" // returns 5
|
||||
* b: String <- actor !!! a // returns "10"
|
||||
* c: String <- actor !!! 7 // returns "14"
|
||||
* } yield b + "-" + c
|
||||
* </pre>
|
||||
*/
|
||||
final def flatMap[A](f: T => Future[A]): Future[A] = {
|
||||
val fa = new DefaultCompletableFuture[A](timeoutInNanos, NANOS)
|
||||
|
|
@ -425,7 +476,7 @@ sealed trait Future[+T] {
|
|||
}
|
||||
|
||||
/**
|
||||
* Returns the current result, throws the exception is one has been raised, else returns None
|
||||
* Returns the current result, throws the exception is one has been raised, else returns None
|
||||
*/
|
||||
final def resultOrException: Option[T] = {
|
||||
val v = value
|
||||
|
|
@ -450,50 +501,50 @@ sealed trait Future[+T] {
|
|||
}
|
||||
|
||||
/**
|
||||
* Essentially this is the Promise (or write-side) of a Future (read-side)
|
||||
* Essentially this is the Promise (or write-side) of a Future (read-side).
|
||||
*/
|
||||
trait CompletableFuture[T] extends Future[T] {
|
||||
/**
|
||||
* Completes this Future with the specified result, if not already completed,
|
||||
* returns this
|
||||
* Completes this Future with the specified result, if not already completed.
|
||||
* @return this
|
||||
*/
|
||||
def complete(value: Either[Throwable, T]): CompletableFuture[T]
|
||||
def complete(value: Either[Throwable, T]): Future[T]
|
||||
|
||||
/**
|
||||
* Completes this Future with the specified result, if not already completed,
|
||||
* returns this
|
||||
* Completes this Future with the specified result, if not already completed.
|
||||
* @return this
|
||||
*/
|
||||
final def completeWithResult(result: T): CompletableFuture[T] = complete(Right(result))
|
||||
final def completeWithResult(result: T): Future[T] = complete(Right(result))
|
||||
|
||||
/**
|
||||
* Completes this Future with the specified exception, if not already completed,
|
||||
* returns this
|
||||
* Completes this Future with the specified exception, if not already completed.
|
||||
* @return this
|
||||
*/
|
||||
final def completeWithException(exception: Throwable): CompletableFuture[T] = complete(Left(exception))
|
||||
final def completeWithException(exception: Throwable): Future[T] = complete(Left(exception))
|
||||
|
||||
/**
|
||||
* Completes this Future with the specified other Future, when that Future is completed,
|
||||
* unless this Future has already been completed
|
||||
* returns this
|
||||
* unless this Future has already been completed.
|
||||
* @return this.
|
||||
*/
|
||||
final def completeWith(other: Future[T]): CompletableFuture[T] = {
|
||||
final def completeWith(other: Future[T]): Future[T] = {
|
||||
other onComplete { f => complete(f.value.get) }
|
||||
this
|
||||
}
|
||||
|
||||
/**
|
||||
* Alias for complete(Right(value))
|
||||
* Alias for complete(Right(value)).
|
||||
*/
|
||||
final def << (value: T): CompletableFuture[T] = complete(Right(value))
|
||||
final def << (value: T): Future[T] = complete(Right(value))
|
||||
|
||||
/**
|
||||
* Alias for completeWith(other)
|
||||
* Alias for completeWith(other).
|
||||
*/
|
||||
final def << (other : Future[T]): CompletableFuture[T] = completeWith(other)
|
||||
final def << (other : Future[T]): Future[T] = completeWith(other)
|
||||
}
|
||||
|
||||
/**
|
||||
* Based on code from the actorom actor framework by Sergio Bossa [http://code.google.com/p/actorom/].
|
||||
* The default concrete Future implementation.
|
||||
*/
|
||||
class DefaultCompletableFuture[T](timeout: Long, timeunit: TimeUnit) extends CompletableFuture[T] {
|
||||
|
||||
|
|
|
|||
|
|
@ -143,7 +143,7 @@ trait MessageDispatcher {
|
|||
while (i.hasNext()) {
|
||||
val uuid = i.next()
|
||||
Actor.registry.actorFor(uuid) match {
|
||||
case Some(actor) => actor.stop
|
||||
case Some(actor) => actor.stop()
|
||||
case None => {}
|
||||
}
|
||||
}
|
||||
|
|
@ -215,12 +215,15 @@ trait MessageDispatcher {
|
|||
* Trait to be used for hooking in new dispatchers into Dispatchers.fromConfig
|
||||
*/
|
||||
abstract class MessageDispatcherConfigurator {
|
||||
/**
|
||||
* Returns an instance of MessageDispatcher given a Configuration
|
||||
*/
|
||||
def configure(config: Configuration): MessageDispatcher
|
||||
|
||||
def mailboxType(config: Configuration): MailboxType = {
|
||||
val capacity = config.getInt("mailbox-capacity", Dispatchers.MAILBOX_CAPACITY)
|
||||
// FIXME how do we read in isBlocking for mailbox? Now set to 'false'.
|
||||
if (capacity < 0) UnboundedMailbox()
|
||||
if (capacity < 1) UnboundedMailbox()
|
||||
else BoundedMailbox(false, capacity, Duration(config.getInt("mailbox-push-timeout-time", Dispatchers.MAILBOX_PUSH_TIME_OUT.toMillis.toInt), TIME_UNIT))
|
||||
}
|
||||
|
||||
|
|
|
|||
|
|
@ -160,12 +160,11 @@ class MonitorableThreadFactory(val name: String) extends ThreadFactory {
|
|||
*/
|
||||
object MonitorableThread {
|
||||
val DEFAULT_NAME = "MonitorableThread"
|
||||
val created = new AtomicInteger
|
||||
val alive = new AtomicInteger
|
||||
@volatile var debugLifecycle = false
|
||||
}
|
||||
|
||||
// FIXME fix the issues with using the monitoring in MonitorableThread
|
||||
// FIXME use MonitorableThread.created and MonitorableThread.alive in monitoring
|
||||
val created = new AtomicInteger
|
||||
val alive = new AtomicInteger
|
||||
}
|
||||
|
||||
/**
|
||||
* @author <a href="http://jonasboner.com">Jonas Bonér</a>
|
||||
|
|
@ -178,7 +177,6 @@ class MonitorableThread(runnable: Runnable, name: String)
|
|||
})
|
||||
|
||||
override def run = {
|
||||
val debug = MonitorableThread.debugLifecycle
|
||||
try {
|
||||
MonitorableThread.alive.incrementAndGet
|
||||
super.run
|
||||
|
|
|
|||
|
|
@ -5,8 +5,6 @@
|
|||
package akka.event
|
||||
|
||||
import akka.actor._
|
||||
import Actor._
|
||||
import akka.dispatch._
|
||||
import akka.config.Config._
|
||||
import akka.config.ConfigurationException
|
||||
import akka.util.{ListenerManagement, ReflectiveAccess}
|
||||
|
|
@ -25,7 +23,7 @@ import akka.AkkaException
|
|||
* case EventHandler.Warning(instance, message) => ...
|
||||
* case EventHandler.Info(instance, message) => ...
|
||||
* case EventHandler.Debug(instance, message) => ...
|
||||
* case genericEvent => ...
|
||||
* case genericEvent => ...
|
||||
* }
|
||||
* })
|
||||
*
|
||||
|
|
@ -35,17 +33,17 @@ import akka.AkkaException
|
|||
* </pre>
|
||||
* <p/>
|
||||
* However best is probably to register the listener in the 'akka.conf'
|
||||
* configuration file.
|
||||
* configuration file.
|
||||
* <p/>
|
||||
* Log an error event:
|
||||
* <pre>
|
||||
* EventHandler.notify(EventHandler.Error(exception, this, message.toString))
|
||||
* EventHandler.notify(EventHandler.Error(exception, this, message))
|
||||
* </pre>
|
||||
* Or use the direct methods (better performance):
|
||||
* <pre>
|
||||
* EventHandler.error(exception, this, message.toString)
|
||||
* EventHandler.error(exception, this, message)
|
||||
* </pre>
|
||||
*
|
||||
*
|
||||
* @author <a href="http://jonasboner.com">Jonas Bonér</a>
|
||||
*/
|
||||
object EventHandler extends ListenerManagement {
|
||||
|
|
@ -61,11 +59,20 @@ object EventHandler extends ListenerManagement {
|
|||
|
||||
sealed trait Event {
|
||||
@transient val thread: Thread = Thread.currentThread
|
||||
val level: Int
|
||||
}
|
||||
case class Error(cause: Throwable, instance: AnyRef, message: Any = "") extends Event {
|
||||
override val level = ErrorLevel
|
||||
}
|
||||
case class Warning(instance: AnyRef, message: Any = "") extends Event {
|
||||
override val level = WarningLevel
|
||||
}
|
||||
case class Info(instance: AnyRef, message: Any = "") extends Event {
|
||||
override val level = InfoLevel
|
||||
}
|
||||
case class Debug(instance: AnyRef, message: Any = "") extends Event {
|
||||
override val level = DebugLevel
|
||||
}
|
||||
case class Error(cause: Throwable, instance: AnyRef, message: String = "") extends Event
|
||||
case class Warning(instance: AnyRef, message: String = "") extends Event
|
||||
case class Info(instance: AnyRef, message: String = "") extends Event
|
||||
case class Debug(instance: AnyRef, message: String = "") extends Event
|
||||
|
||||
val error = "[ERROR] [%s] [%s] [%s] %s\n%s".intern
|
||||
val warning = "[WARN] [%s] [%s] [%s] %s".intern
|
||||
|
|
@ -73,7 +80,7 @@ object EventHandler extends ListenerManagement {
|
|||
val debug = "[DEBUG] [%s] [%s] [%s] %s".intern
|
||||
val generic = "[GENERIC] [%s] [%s]".intern
|
||||
val ID = "event:handler".intern
|
||||
|
||||
|
||||
class EventHandlerException extends AkkaException
|
||||
|
||||
lazy val EventHandlerDispatcher = Dispatchers.newExecutorBasedEventDrivenDispatcher(ID).build
|
||||
|
|
@ -87,32 +94,61 @@ object EventHandler extends ListenerManagement {
|
|||
"Configuration option 'akka.event-handler-level' is invalid [" + unknown + "]")
|
||||
}
|
||||
|
||||
def notify(event: => AnyRef) = notifyListeners(event)
|
||||
def notify(event: Any) {
|
||||
if (event.isInstanceOf[Event]) {
|
||||
if (level >= event.asInstanceOf[Event].level) notifyListeners(event)
|
||||
} else
|
||||
notifyListeners(event)
|
||||
}
|
||||
|
||||
def notify[T <: Event : ClassManifest](event: => T) {
|
||||
if (level >= levelFor(classManifest[T].erasure.asInstanceOf[Class[_ <: Event]])) notifyListeners(event)
|
||||
}
|
||||
|
||||
def error(cause: Throwable, instance: AnyRef, message: => String) = {
|
||||
def error(cause: Throwable, instance: AnyRef, message: => String) {
|
||||
if (level >= ErrorLevel) notifyListeners(Error(cause, instance, message))
|
||||
}
|
||||
|
||||
def error(instance: AnyRef, message: => String) = {
|
||||
def error(cause: Throwable, instance: AnyRef, message: Any) {
|
||||
if (level >= ErrorLevel) notifyListeners(Error(cause, instance, message))
|
||||
}
|
||||
|
||||
def error(instance: AnyRef, message: => String) {
|
||||
if (level >= ErrorLevel) notifyListeners(Error(new EventHandlerException, instance, message))
|
||||
}
|
||||
|
||||
def warning(instance: AnyRef, message: => String) = {
|
||||
def error(instance: AnyRef, message: Any) {
|
||||
if (level >= ErrorLevel) notifyListeners(Error(new EventHandlerException, instance, message))
|
||||
}
|
||||
|
||||
def warning(instance: AnyRef, message: => String) {
|
||||
if (level >= WarningLevel) notifyListeners(Warning(instance, message))
|
||||
}
|
||||
|
||||
def info(instance: AnyRef, message: => String) = {
|
||||
def warning(instance: AnyRef, message: Any) {
|
||||
if (level >= WarningLevel) notifyListeners(Warning(instance, message))
|
||||
}
|
||||
|
||||
def info(instance: AnyRef, message: => String) {
|
||||
if (level >= InfoLevel) notifyListeners(Info(instance, message))
|
||||
}
|
||||
|
||||
def debug(instance: AnyRef, message: => String) = {
|
||||
def info(instance: AnyRef, message: Any) {
|
||||
if (level >= InfoLevel) notifyListeners(Info(instance, message))
|
||||
}
|
||||
|
||||
def debug(instance: AnyRef, message: => String) {
|
||||
if (level >= DebugLevel) notifyListeners(Debug(instance, message))
|
||||
}
|
||||
|
||||
def debug(instance: AnyRef, message: Any) {
|
||||
if (level >= DebugLevel) notifyListeners(Debug(instance, message))
|
||||
}
|
||||
|
||||
def isInfoEnabled = level >= InfoLevel
|
||||
|
||||
def isDebugEnabled = level >= DebugLevel
|
||||
|
||||
def formattedTimestamp = DateFormat.getInstance.format(new Date)
|
||||
|
||||
def stackTraceFor(e: Throwable) = {
|
||||
|
|
@ -129,7 +165,7 @@ object EventHandler extends ListenerManagement {
|
|||
else if (eventClass.isInstanceOf[Debug]) DebugLevel
|
||||
else DebugLevel
|
||||
}
|
||||
|
||||
|
||||
class DefaultListener extends Actor {
|
||||
self.id = ID
|
||||
self.dispatcher = EventHandlerDispatcher
|
||||
|
|
@ -165,10 +201,14 @@ object EventHandler extends ListenerManagement {
|
|||
}
|
||||
}
|
||||
|
||||
config.getList("akka.event-handlers") foreach { listenerName =>
|
||||
val defaultListeners = config.getList("akka.event-handlers") match {
|
||||
case Nil => "akka.event.EventHandler$DefaultListener" :: Nil
|
||||
case listeners => listeners
|
||||
}
|
||||
defaultListeners foreach { listenerName =>
|
||||
try {
|
||||
ReflectiveAccess.getClassFor[Actor](listenerName) map {
|
||||
clazz => addListener(Actor.actorOf(clazz).start)
|
||||
ReflectiveAccess.getClassFor[Actor](listenerName) map { clazz =>
|
||||
addListener(Actor.actorOf(clazz).start())
|
||||
}
|
||||
} catch {
|
||||
case e: Exception =>
|
||||
|
|
|
|||
|
|
@ -0,0 +1,44 @@
|
|||
package akka.remoteinterface
|
||||
|
||||
/**
|
||||
* Copyright (C) 2009-2011 Scalable Solutions AB <http://scalablesolutions.se>
|
||||
*/
|
||||
|
||||
import akka.actor.Actor
|
||||
import akka.event.EventHandler
|
||||
|
||||
/**
|
||||
* Remote client and server event listener that pipes the events to the standard Akka EventHander.
|
||||
*
|
||||
* @author <a href="http://jonasboner.com">Jonas Bonér</a>
|
||||
*/
|
||||
class RemoteEventHandler extends Actor {
|
||||
import EventHandler._
|
||||
|
||||
self.id = ID
|
||||
self.dispatcher = EventHandlerDispatcher
|
||||
|
||||
def receive = {
|
||||
|
||||
// client
|
||||
case RemoteClientError(cause, client, address) => EventHandler.error(cause, client, "RemoteClientError - Address[%s]" format address.toString)
|
||||
case RemoteClientWriteFailed(request, cause, client, address) => EventHandler.error(cause, client, "RemoteClientWriteFailed - Request[%s] Address[%s]".format(address.toString))
|
||||
case RemoteClientDisconnected(client, address) => EventHandler.info(client, "RemoteClientDisconnected - Address[%s]" format address.toString)
|
||||
case RemoteClientConnected(client, address) => EventHandler.info(client, "RemoteClientConnected - Address[%s]" format address.toString)
|
||||
case RemoteClientStarted(client, address) => EventHandler.info(client, "RemoteClientStarted - Address[%s]" format address.toString)
|
||||
case RemoteClientShutdown(client, address) => EventHandler.info(client, "RemoteClientShutdown - Address[%s]" format address.toString)
|
||||
|
||||
// server
|
||||
case RemoteServerError(cause, server) => EventHandler.error(cause, server, "RemoteServerError")
|
||||
case RemoteServerWriteFailed(request, cause, server, clientAddress) => EventHandler.error(cause, server, "RemoteServerWriteFailed - Request[%s] Address[%s]" format (request, clientAddress.toString))
|
||||
case RemoteServerStarted(server) => EventHandler.info(server, "RemoteServerStarted")
|
||||
case RemoteServerShutdown(server) => EventHandler.info(server, "RemoteServerShutdown")
|
||||
case RemoteServerClientConnected(server, clientAddress) => EventHandler.info(server, "RemoteServerClientConnected - Address[%s]" format clientAddress.toString)
|
||||
case RemoteServerClientDisconnected(server, clientAddress) => EventHandler.info(server, "RemoteServerClientDisconnected - Address[%s]" format clientAddress.toString)
|
||||
case RemoteServerClientClosed(server, clientAddress) => EventHandler.info(server, "RemoteServerClientClosed - Address[%s]" format clientAddress.toString)
|
||||
|
||||
case _ => //ignore other
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
|
|
@ -5,22 +5,23 @@
|
|||
package akka.remoteinterface
|
||||
|
||||
import akka.japi.Creator
|
||||
import java.net.InetSocketAddress
|
||||
import akka.actor._
|
||||
import akka.util._
|
||||
import akka.dispatch.CompletableFuture
|
||||
import akka.config.Config.{config, TIME_UNIT}
|
||||
import java.util.concurrent.ConcurrentHashMap
|
||||
import akka.AkkaException
|
||||
import reflect.BeanProperty
|
||||
|
||||
import scala.reflect.BeanProperty
|
||||
|
||||
import java.net.InetSocketAddress
|
||||
import java.util.concurrent.ConcurrentHashMap
|
||||
import java.io.{PrintWriter, PrintStream}
|
||||
|
||||
trait RemoteModule {
|
||||
val UUID_PREFIX = "uuid:"
|
||||
val UUID_PREFIX = "uuid:".intern
|
||||
|
||||
def optimizeLocalScoped_?(): Boolean //Apply optimizations for remote operations in local scope
|
||||
protected[akka] def notifyListeners(message: => Any): Unit
|
||||
|
||||
|
||||
private[akka] def actors: ConcurrentHashMap[String, ActorRef]
|
||||
private[akka] def actorsByUuid: ConcurrentHashMap[String, ActorRef]
|
||||
private[akka] def actorsFactories: ConcurrentHashMap[String, () => ActorRef]
|
||||
|
|
@ -28,7 +29,6 @@ trait RemoteModule {
|
|||
private[akka] def typedActorsByUuid: ConcurrentHashMap[String, AnyRef]
|
||||
private[akka] def typedActorsFactories: ConcurrentHashMap[String, () => AnyRef]
|
||||
|
||||
|
||||
/** Lookup methods **/
|
||||
|
||||
private[akka] def findActorById(id: String) : ActorRef = actors.get(id)
|
||||
|
|
@ -84,7 +84,6 @@ case class RemoteClientWriteFailed(
|
|||
@BeanProperty client: RemoteClientModule,
|
||||
@BeanProperty remoteAddress: InetSocketAddress) extends RemoteClientLifeCycleEvent
|
||||
|
||||
|
||||
/**
|
||||
* Life-cycle events for RemoteServer.
|
||||
*/
|
||||
|
|
@ -114,38 +113,57 @@ case class RemoteServerWriteFailed(
|
|||
/**
|
||||
* Thrown for example when trying to send a message using a RemoteClient that is either not started or shut down.
|
||||
*/
|
||||
class RemoteClientException private[akka] (message: String,
|
||||
@BeanProperty val client: RemoteClientModule,
|
||||
val remoteAddress: InetSocketAddress) extends AkkaException(message)
|
||||
class RemoteClientException private[akka] (
|
||||
message: String,
|
||||
@BeanProperty val client: RemoteClientModule,
|
||||
val remoteAddress: InetSocketAddress) extends AkkaException(message)
|
||||
|
||||
/**
|
||||
* Returned when a remote exception cannot be instantiated or parsed
|
||||
* Thrown when the remote server actor dispatching fails for some reason.
|
||||
*/
|
||||
case class UnparsableException private[akka] (originalClassName: String,
|
||||
originalMessage: String) extends AkkaException(originalMessage)
|
||||
class RemoteServerException private[akka] (message: String) extends AkkaException(message)
|
||||
|
||||
/**
|
||||
* Thrown when a remote exception sent over the wire cannot be loaded and instantiated
|
||||
*/
|
||||
case class CannotInstantiateRemoteExceptionDueToRemoteProtocolParsingErrorException private[akka] (cause: Throwable, originalClassName: String, originalMessage: String)
|
||||
extends AkkaException("\nParsingError[%s]\nOriginalException[%s]\nOriginalMessage[%s]"
|
||||
.format(cause.toString, originalClassName, originalMessage)) {
|
||||
override def printStackTrace = cause.printStackTrace
|
||||
override def printStackTrace(printStream: PrintStream) = cause.printStackTrace(printStream)
|
||||
override def printStackTrace(printWriter: PrintWriter) = cause.printStackTrace(printWriter)
|
||||
}
|
||||
|
||||
abstract class RemoteSupport extends ListenerManagement with RemoteServerModule with RemoteClientModule {
|
||||
|
||||
lazy val eventHandler: ActorRef = {
|
||||
val handler = Actor.actorOf[RemoteEventHandler].start()
|
||||
// add the remote client and server listener that pipes the events to the event handler system
|
||||
addListener(handler)
|
||||
handler
|
||||
}
|
||||
|
||||
def shutdown {
|
||||
eventHandler.stop()
|
||||
removeListener(eventHandler)
|
||||
this.shutdownClientModule
|
||||
this.shutdownServerModule
|
||||
clear
|
||||
}
|
||||
|
||||
|
||||
/**
|
||||
* Creates a Client-managed ActorRef out of the Actor of the specified Class.
|
||||
* If the supplied host and port is identical of the configured local node, it will be a local actor
|
||||
* <pre>
|
||||
* import Actor._
|
||||
* val actor = actorOf(classOf[MyActor],"www.akka.io", 2552)
|
||||
* actor.start
|
||||
* actor.start()
|
||||
* actor ! message
|
||||
* actor.stop
|
||||
* actor.stop()
|
||||
* </pre>
|
||||
* You can create and start the actor in one statement like this:
|
||||
* <pre>
|
||||
* val actor = actorOf(classOf[MyActor],"www.akka.io", 2552).start
|
||||
* val actor = actorOf(classOf[MyActor],"www.akka.io", 2552).start()
|
||||
* </pre>
|
||||
*/
|
||||
@deprecated("Will be removed after 1.1")
|
||||
|
|
@ -158,13 +176,13 @@ abstract class RemoteSupport extends ListenerManagement with RemoteServerModule
|
|||
* <pre>
|
||||
* import Actor._
|
||||
* val actor = actorOf(classOf[MyActor],"www.akka.io",2552)
|
||||
* actor.start
|
||||
* actor.start()
|
||||
* actor ! message
|
||||
* actor.stop
|
||||
* actor.stop()
|
||||
* </pre>
|
||||
* You can create and start the actor in one statement like this:
|
||||
* <pre>
|
||||
* val actor = actorOf(classOf[MyActor],"www.akka.io",2552).start
|
||||
* val actor = actorOf(classOf[MyActor],"www.akka.io",2552).start()
|
||||
* </pre>
|
||||
*/
|
||||
@deprecated("Will be removed after 1.1")
|
||||
|
|
@ -186,13 +204,13 @@ abstract class RemoteSupport extends ListenerManagement with RemoteServerModule
|
|||
* <pre>
|
||||
* import Actor._
|
||||
* val actor = actorOf[MyActor]("www.akka.io",2552)
|
||||
* actor.start
|
||||
* actor.start()
|
||||
* actor ! message
|
||||
* actor.stop
|
||||
* actor.stop()
|
||||
* </pre>
|
||||
* You can create and start the actor in one statement like this:
|
||||
* <pre>
|
||||
* val actor = actorOf[MyActor]("www.akka.io",2552).start
|
||||
* val actor = actorOf[MyActor]("www.akka.io",2552).start()
|
||||
* </pre>
|
||||
*/
|
||||
@deprecated("Will be removed after 1.1")
|
||||
|
|
@ -471,4 +489,4 @@ trait RemoteClientModule extends RemoteModule { self: RemoteModule =>
|
|||
|
||||
@deprecated("Will be removed after 1.1")
|
||||
private[akka] def unregisterClientManagedActor(hostname: String, port: Int, uuid: Uuid): Unit
|
||||
}
|
||||
}
|
||||
|
|
|
|||
|
|
@ -6,19 +6,22 @@ package akka.routing
|
|||
|
||||
import akka.actor.ActorRef
|
||||
import scala.collection.JavaConversions._
|
||||
import scala.collection.immutable.Seq
|
||||
|
||||
/**
|
||||
* An Iterator that is either always empty or yields an infinite number of Ts.
|
||||
*/
|
||||
trait InfiniteIterator[T] extends Iterator[T]
|
||||
trait InfiniteIterator[T] extends Iterator[T] {
|
||||
val items: Seq[T]
|
||||
}
|
||||
|
||||
/**
|
||||
* CyclicIterator is a round-robin style InfiniteIterator that cycles the supplied List.
|
||||
*/
|
||||
class CyclicIterator[T](items: List[T]) extends InfiniteIterator[T] {
|
||||
case class CyclicIterator[T](val items: Seq[T]) extends InfiniteIterator[T] {
|
||||
def this(items: java.util.List[T]) = this(items.toList)
|
||||
|
||||
@volatile private[this] var current: List[T] = items
|
||||
@volatile private[this] var current: Seq[T] = items
|
||||
|
||||
def hasNext = items != Nil
|
||||
|
||||
|
|
@ -29,14 +32,13 @@ class CyclicIterator[T](items: List[T]) extends InfiniteIterator[T] {
|
|||
}
|
||||
|
||||
override def exists(f: T => Boolean): Boolean = items.exists(f)
|
||||
|
||||
}
|
||||
|
||||
/**
|
||||
* This InfiniteIterator always returns the Actor that has the currently smallest mailbox
|
||||
* useful for work-stealing.
|
||||
*/
|
||||
class SmallestMailboxFirstIterator(items : List[ActorRef]) extends InfiniteIterator[ActorRef] {
|
||||
case class SmallestMailboxFirstIterator(val items : Seq[ActorRef]) extends InfiniteIterator[ActorRef] {
|
||||
def this(items: java.util.List[ActorRef]) = this(items.toList)
|
||||
def hasNext = items != Nil
|
||||
|
||||
|
|
|
|||
|
|
@ -15,7 +15,11 @@ trait Dispatcher { this: Actor =>
|
|||
|
||||
protected def routes: PartialFunction[Any, ActorRef]
|
||||
|
||||
protected def broadcast(message: Any) {}
|
||||
|
||||
protected def dispatch: Receive = {
|
||||
case Routing.Broadcast(message) =>
|
||||
broadcast(message)
|
||||
case a if routes.isDefinedAt(a) =>
|
||||
if (isSenderDefined) routes(a).forward(transform(a))(someSelf)
|
||||
else routes(a).!(transform(a))(None)
|
||||
|
|
@ -34,15 +38,19 @@ abstract class UntypedDispatcher extends UntypedActor {
|
|||
|
||||
protected def route(msg: Any): ActorRef
|
||||
|
||||
protected def broadcast(message: Any) {}
|
||||
|
||||
private def isSenderDefined = self.senderFuture.isDefined || self.sender.isDefined
|
||||
|
||||
@throws(classOf[Exception])
|
||||
def onReceive(msg: Any): Unit = {
|
||||
val r = route(msg)
|
||||
if(r eq null)
|
||||
throw new IllegalStateException("No route for " + msg + " defined!")
|
||||
if (isSenderDefined) r.forward(transform(msg))(someSelf)
|
||||
else r.!(transform(msg))(None)
|
||||
if (msg.isInstanceOf[Routing.Broadcast]) broadcast(msg.asInstanceOf[Routing.Broadcast].message)
|
||||
else {
|
||||
val r = route(msg)
|
||||
if (r eq null) throw new IllegalStateException("No route for " + msg + " defined!")
|
||||
if (isSenderDefined) r.forward(transform(msg))(someSelf)
|
||||
else r.!(transform(msg))(None)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
|
@ -53,7 +61,11 @@ abstract class UntypedDispatcher extends UntypedActor {
|
|||
trait LoadBalancer extends Dispatcher { self: Actor =>
|
||||
protected def seq: InfiniteIterator[ActorRef]
|
||||
|
||||
protected def routes = { case x if seq.hasNext => seq.next }
|
||||
protected def routes = {
|
||||
case x if seq.hasNext => seq.next
|
||||
}
|
||||
|
||||
override def broadcast(message: Any) = seq.items.foreach(_ ! message)
|
||||
|
||||
override def isDefinedAt(msg: Any) = seq.exists( _.isDefinedAt(msg) )
|
||||
}
|
||||
|
|
@ -69,5 +81,7 @@ abstract class UntypedLoadBalancer extends UntypedDispatcher {
|
|||
if (seq.hasNext) seq.next
|
||||
else null
|
||||
|
||||
override def broadcast(message: Any) = seq.items.foreach(_ ! message)
|
||||
|
||||
override def isDefinedAt(msg: Any) = seq.exists( _.isDefinedAt(msg) )
|
||||
}
|
||||
|
|
|
|||
|
|
@ -9,6 +9,9 @@ import akka.actor.Actor._
|
|||
|
||||
object Routing {
|
||||
|
||||
sealed trait RoutingMessage
|
||||
case class Broadcast(message: Any) extends RoutingMessage
|
||||
|
||||
type PF[A, B] = PartialFunction[A, B]
|
||||
|
||||
/**
|
||||
|
|
@ -31,26 +34,26 @@ object Routing {
|
|||
/**
|
||||
* Creates a LoadBalancer from the thunk-supplied InfiniteIterator.
|
||||
*/
|
||||
def loadBalancerActor(actors: => InfiniteIterator[ActorRef]): ActorRef =
|
||||
def loadBalancerActor(actors: => InfiniteIterator[ActorRef]): ActorRef =
|
||||
actorOf(new Actor with LoadBalancer {
|
||||
val seq = actors
|
||||
}).start
|
||||
}).start()
|
||||
|
||||
/**
|
||||
* Creates a Dispatcher given a routing and a message-transforming function.
|
||||
*/
|
||||
def dispatcherActor(routing: PF[Any, ActorRef], msgTransformer: (Any) => Any): ActorRef =
|
||||
def dispatcherActor(routing: PF[Any, ActorRef], msgTransformer: (Any) => Any): ActorRef =
|
||||
actorOf(new Actor with Dispatcher {
|
||||
override def transform(msg: Any) = msgTransformer(msg)
|
||||
def routes = routing
|
||||
}).start
|
||||
}).start()
|
||||
|
||||
/**
|
||||
* Creates a Dispatcher given a routing.
|
||||
*/
|
||||
def dispatcherActor(routing: PF[Any, ActorRef]): ActorRef = actorOf(new Actor with Dispatcher {
|
||||
def dispatcherActor(routing: PF[Any, ActorRef]): ActorRef = actorOf(new Actor with Dispatcher {
|
||||
def routes = routing
|
||||
}).start
|
||||
}).start()
|
||||
|
||||
/**
|
||||
* Creates an actor that pipes all incoming messages to
|
||||
|
|
|
|||
|
|
@ -1,11 +1,15 @@
|
|||
/**
|
||||
* Copyright (C) 2009-2011 Scalable Solutions AB <http://scalablesolutions.se>
|
||||
*/
|
||||
|
||||
package akka.util
|
||||
|
||||
import java.util.concurrent.locks.ReentrantLock
|
||||
import java.util.concurrent.atomic.AtomicInteger
|
||||
import java.util.concurrent.{ TimeUnit, BlockingQueue }
|
||||
import java.util.{ AbstractQueue, Queue, Collection, Iterator }
|
||||
|
||||
class BoundedBlockingQueue[E <: AnyRef](val maxCapacity: Int, private val backing: Queue[E]) extends AbstractQueue[E] with BlockingQueue[E] {
|
||||
class BoundedBlockingQueue[E <: AnyRef](
|
||||
val maxCapacity: Int, private val backing: Queue[E]) extends AbstractQueue[E] with BlockingQueue[E] {
|
||||
|
||||
backing match {
|
||||
case null => throw new IllegalArgumentException("Backing Queue may not be null")
|
||||
|
|
@ -32,7 +36,7 @@ class BoundedBlockingQueue[E <: AnyRef](val maxCapacity: Int, private val backin
|
|||
require(backing.offer(e))
|
||||
notEmpty.signal()
|
||||
} finally {
|
||||
lock.unlock()
|
||||
lock.unlock()
|
||||
}
|
||||
}
|
||||
|
||||
|
|
@ -319,4 +323,4 @@ class BoundedBlockingQueue[E <: AnyRef](val maxCapacity: Int, private val backin
|
|||
lock.unlock()
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
|||
|
|
@ -37,7 +37,7 @@ object Duration {
|
|||
* Construct a Duration by parsing a String. In case of a format error, a
|
||||
* RuntimeException is thrown. See `unapply(String)` for more information.
|
||||
*/
|
||||
def apply(s : String) : Duration = unapply(s) getOrElse error("format error")
|
||||
def apply(s : String) : Duration = unapply(s) getOrElse sys.error("format error")
|
||||
|
||||
/**
|
||||
* Deconstruct a Duration into length and unit if it is finite.
|
||||
|
|
@ -77,7 +77,7 @@ object Duration {
|
|||
if ( ms ne null) Some(Duration(JDouble.parseDouble(length), MILLISECONDS)) else
|
||||
if (mus ne null) Some(Duration(JDouble.parseDouble(length), MICROSECONDS)) else
|
||||
if ( ns ne null) Some(Duration(JDouble.parseDouble(length), NANOSECONDS)) else
|
||||
error("made some error in regex (should not be possible)")
|
||||
sys.error("made some error in regex (should not be possible)")
|
||||
case REinf() => Some(Inf)
|
||||
case REminf() => Some(MinusInf)
|
||||
case _ => None
|
||||
|
|
@ -317,26 +317,6 @@ class FiniteDuration(val length: Long, val unit: TimeUnit) extends Duration {
|
|||
override def hashCode = toNanos.asInstanceOf[Int]
|
||||
}
|
||||
|
||||
package object duration {
|
||||
implicit def intToDurationInt(n: Int) = new DurationInt(n)
|
||||
implicit def longToDurationLong(n: Long) = new DurationLong(n)
|
||||
implicit def doubleToDurationDouble(d: Double) = new DurationDouble(d)
|
||||
|
||||
implicit def pairIntToDuration(p : (Int, TimeUnit)) = Duration(p._1, p._2)
|
||||
implicit def pairLongToDuration(p : (Long, TimeUnit)) = Duration(p._1, p._2)
|
||||
implicit def durationToPair(d : Duration) = (d.length, d.unit)
|
||||
|
||||
implicit def intMult(i : Int) = new {
|
||||
def *(d : Duration) = d * i
|
||||
}
|
||||
implicit def longMult(l : Long) = new {
|
||||
def *(d : Duration) = d * l
|
||||
}
|
||||
implicit def doubleMult(f : Double) = new {
|
||||
def *(d : Duration) = d * f
|
||||
}
|
||||
}
|
||||
|
||||
class DurationInt(n: Int) {
|
||||
def nanoseconds = Duration(n, NANOSECONDS)
|
||||
def nanos = Duration(n, NANOSECONDS)
|
||||
|
|
|
|||
|
|
@ -27,7 +27,7 @@ trait ListenerManagement {
|
|||
* The <code>listener</code> is started by this method if manageLifeCycleOfListeners yields true.
|
||||
*/
|
||||
def addListener(listener: ActorRef) {
|
||||
if (manageLifeCycleOfListeners) listener.start
|
||||
if (manageLifeCycleOfListeners) listener.start()
|
||||
listeners add listener
|
||||
}
|
||||
|
||||
|
|
@ -37,7 +37,7 @@ trait ListenerManagement {
|
|||
*/
|
||||
def removeListener(listener: ActorRef) {
|
||||
listeners remove listener
|
||||
if (manageLifeCycleOfListeners) listener.stop
|
||||
if (manageLifeCycleOfListeners) listener.stop()
|
||||
}
|
||||
|
||||
/*
|
||||
|
|
|
|||
|
|
@ -6,7 +6,6 @@ package akka.util
|
|||
|
||||
import akka.dispatch.{Future, CompletableFuture, MessageInvocation}
|
||||
import akka.config.{Config, ModuleNotAvailableException}
|
||||
import akka.AkkaException
|
||||
|
||||
import java.net.InetSocketAddress
|
||||
import akka.remoteinterface.RemoteSupport
|
||||
|
|
@ -45,13 +44,13 @@ object ReflectiveAccess {
|
|||
def ensureEnabled = if (!isEnabled) {
|
||||
val e = new ModuleNotAvailableException(
|
||||
"Can't load the remoting module, make sure that akka-remote.jar is on the classpath")
|
||||
EventHandler.warning(this, e.toString)
|
||||
EventHandler.debug(this, e.toString)
|
||||
throw e
|
||||
}
|
||||
val remoteSupportClass: Option[Class[_ <: RemoteSupport]] = getClassFor(TRANSPORT)
|
||||
|
||||
protected[akka] val defaultRemoteSupport: Option[() => RemoteSupport] =
|
||||
remoteSupportClass map { remoteClass =>
|
||||
protected[akka] val defaultRemoteSupport: Option[() => RemoteSupport] =
|
||||
remoteSupportClass map { remoteClass =>
|
||||
() => createInstance[RemoteSupport](
|
||||
remoteClass,
|
||||
Array[Class[_]](),
|
||||
|
|
@ -59,7 +58,7 @@ object ReflectiveAccess {
|
|||
) getOrElse {
|
||||
val e = new ModuleNotAvailableException(
|
||||
"Can't instantiate [%s] - make sure that akka-remote.jar is on the classpath".format(remoteClass.getName))
|
||||
EventHandler.warning(this, e.toString)
|
||||
EventHandler.debug(this, e.toString)
|
||||
throw e
|
||||
}
|
||||
}
|
||||
|
|
@ -135,7 +134,7 @@ object ReflectiveAccess {
|
|||
Some(ctor.newInstance(args: _*).asInstanceOf[T])
|
||||
} catch {
|
||||
case e: Exception =>
|
||||
EventHandler.warning(this, e.toString)
|
||||
EventHandler.debug(this, e.toString)
|
||||
None
|
||||
}
|
||||
|
||||
|
|
@ -154,7 +153,7 @@ object ReflectiveAccess {
|
|||
}
|
||||
} catch {
|
||||
case e: Exception =>
|
||||
EventHandler.warning(this, e.toString)
|
||||
EventHandler.debug(this, e.toString)
|
||||
None
|
||||
}
|
||||
|
||||
|
|
@ -168,7 +167,7 @@ object ReflectiveAccess {
|
|||
}
|
||||
} catch {
|
||||
case e: ExceptionInInitializerError =>
|
||||
EventHandler.warning(this, e.toString)
|
||||
EventHandler.debug(this, e.toString)
|
||||
throw e
|
||||
}
|
||||
|
||||
|
|
@ -176,23 +175,23 @@ object ReflectiveAccess {
|
|||
assert(fqn ne null)
|
||||
|
||||
// First, use the specified CL
|
||||
val first = try {
|
||||
Option(classloader.loadClass(fqn).asInstanceOf[Class[T]])
|
||||
} catch {
|
||||
case c: ClassNotFoundException =>
|
||||
EventHandler.warning(this, c.toString)
|
||||
None
|
||||
}
|
||||
val first = try {
|
||||
Option(classloader.loadClass(fqn).asInstanceOf[Class[T]])
|
||||
} catch {
|
||||
case c: ClassNotFoundException =>
|
||||
EventHandler.debug(this, c.toString)
|
||||
None
|
||||
}
|
||||
|
||||
if (first.isDefined) first
|
||||
else {
|
||||
else {
|
||||
// Second option is to use the ContextClassLoader
|
||||
val second = try {
|
||||
Option(Thread.currentThread.getContextClassLoader.loadClass(fqn).asInstanceOf[Class[T]])
|
||||
} catch {
|
||||
case c: ClassNotFoundException =>
|
||||
EventHandler.warning(this, c.toString)
|
||||
None
|
||||
val second = try {
|
||||
Option(Thread.currentThread.getContextClassLoader.loadClass(fqn).asInstanceOf[Class[T]])
|
||||
} catch {
|
||||
case c: ClassNotFoundException =>
|
||||
EventHandler.debug(this, c.toString)
|
||||
None
|
||||
}
|
||||
|
||||
if (second.isDefined) second
|
||||
|
|
@ -201,22 +200,22 @@ object ReflectiveAccess {
|
|||
// Don't try to use "loader" if we got the default "classloader" parameter
|
||||
if (classloader ne loader) Option(loader.loadClass(fqn).asInstanceOf[Class[T]])
|
||||
else None
|
||||
} catch {
|
||||
case c: ClassNotFoundException =>
|
||||
EventHandler.warning(this, c.toString)
|
||||
None
|
||||
} catch {
|
||||
case c: ClassNotFoundException =>
|
||||
EventHandler.debug(this, c.toString)
|
||||
None
|
||||
}
|
||||
|
||||
if (third.isDefined) third
|
||||
else {
|
||||
// Last option is Class.forName
|
||||
try {
|
||||
try {
|
||||
Option(Class.forName(fqn).asInstanceOf[Class[T]])
|
||||
} catch {
|
||||
case c: ClassNotFoundException =>
|
||||
EventHandler.warning(this, c.toString)
|
||||
None
|
||||
}
|
||||
} catch {
|
||||
case c: ClassNotFoundException =>
|
||||
EventHandler.debug(this, c.toString)
|
||||
None
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
|||
27
akka-actor/src/main/scala/akka/util/package.scala
Normal file
27
akka-actor/src/main/scala/akka/util/package.scala
Normal file
|
|
@ -0,0 +1,27 @@
|
|||
/**
|
||||
* Copyright (C) 2009-2011 Scalable Solutions AB <http://scalablesolutions.se>
|
||||
*/
|
||||
|
||||
package akka.util
|
||||
|
||||
import java.util.concurrent.TimeUnit
|
||||
|
||||
package object duration {
|
||||
implicit def intToDurationInt(n: Int) = new DurationInt(n)
|
||||
implicit def longToDurationLong(n: Long) = new DurationLong(n)
|
||||
implicit def doubleToDurationDouble(d: Double) = new DurationDouble(d)
|
||||
|
||||
implicit def pairIntToDuration(p : (Int, TimeUnit)) = Duration(p._1, p._2)
|
||||
implicit def pairLongToDuration(p : (Long, TimeUnit)) = Duration(p._1, p._2)
|
||||
implicit def durationToPair(d : Duration) = (d.length, d.unit)
|
||||
|
||||
implicit def intMult(i : Int) = new {
|
||||
def *(d : Duration) = d * i
|
||||
}
|
||||
implicit def longMult(l : Long) = new {
|
||||
def *(d : Duration) = d * l
|
||||
}
|
||||
implicit def doubleMult(f : Double) = new {
|
||||
def *(d : Duration) = d * f
|
||||
}
|
||||
}
|
||||
|
|
@ -1,25 +0,0 @@
|
|||
/**
|
||||
* Copyright (C) 2009-2011 Scalable Solutions AB <http://scalablesolutions.se>
|
||||
*/
|
||||
|
||||
package akka
|
||||
|
||||
/**
|
||||
* Multiplying numbers used in test timeouts by a factor, set by system property.
|
||||
* Useful for Jenkins builds (where the machine may need more time).
|
||||
*/
|
||||
object Testing {
|
||||
val timeFactor: Double = {
|
||||
val factor = System.getProperty("akka.test.timefactor", "1.0")
|
||||
try {
|
||||
factor.toDouble
|
||||
} catch {
|
||||
case e: java.lang.NumberFormatException => 1.0
|
||||
}
|
||||
}
|
||||
|
||||
def time(t: Int): Int = (timeFactor * t).toInt
|
||||
def time(t: Long): Long = (timeFactor * t).toLong
|
||||
def time(t: Float): Float = (timeFactor * t).toFloat
|
||||
def time(t: Double): Double = timeFactor * t
|
||||
}
|
||||
|
|
@ -1,93 +0,0 @@
|
|||
package akka.actor
|
||||
|
||||
import java.util.concurrent.{TimeUnit, CyclicBarrier, TimeoutException}
|
||||
import akka.config.Supervision._
|
||||
import org.scalatest.junit.JUnitSuite
|
||||
import org.junit.Test
|
||||
|
||||
import akka.dispatch.Dispatchers
|
||||
import Actor._
|
||||
|
||||
import akka.Testing
|
||||
|
||||
object ActorFireForgetRequestReplySpec {
|
||||
class ReplyActor extends Actor {
|
||||
|
||||
def receive = {
|
||||
case "Send" =>
|
||||
self.reply("Reply")
|
||||
case "SendImplicit" =>
|
||||
self.sender.get ! "ReplyImplicit"
|
||||
}
|
||||
}
|
||||
|
||||
class CrashingTemporaryActor extends Actor {
|
||||
self.lifeCycle = Temporary
|
||||
|
||||
def receive = {
|
||||
case "Die" =>
|
||||
state.finished.await
|
||||
throw new Exception("Expected exception")
|
||||
}
|
||||
}
|
||||
|
||||
class SenderActor(replyActor: ActorRef) extends Actor {
|
||||
|
||||
def receive = {
|
||||
case "Init" =>
|
||||
replyActor ! "Send"
|
||||
case "Reply" => {
|
||||
state.s = "Reply"
|
||||
state.finished.await
|
||||
}
|
||||
case "InitImplicit" => replyActor ! "SendImplicit"
|
||||
case "ReplyImplicit" => {
|
||||
state.s = "ReplyImplicit"
|
||||
state.finished.await
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
object state {
|
||||
var s = "NIL"
|
||||
val finished = new CyclicBarrier(2)
|
||||
}
|
||||
}
|
||||
|
||||
class ActorFireForgetRequestReplySpec extends JUnitSuite {
|
||||
import ActorFireForgetRequestReplySpec._
|
||||
|
||||
@Test
|
||||
def shouldReplyToBangMessageUsingReply = {
|
||||
state.finished.reset
|
||||
val replyActor = actorOf[ReplyActor].start
|
||||
val senderActor = actorOf(new SenderActor(replyActor)).start
|
||||
senderActor ! "Init"
|
||||
try { state.finished.await(1L, TimeUnit.SECONDS) }
|
||||
catch { case e: TimeoutException => fail("Never got the message") }
|
||||
assert("Reply" === state.s)
|
||||
}
|
||||
|
||||
@Test
|
||||
def shouldReplyToBangMessageUsingImplicitSender = {
|
||||
state.finished.reset
|
||||
val replyActor = actorOf[ReplyActor].start
|
||||
val senderActor = actorOf(new SenderActor(replyActor)).start
|
||||
senderActor ! "InitImplicit"
|
||||
try { state.finished.await(1L, TimeUnit.SECONDS) }
|
||||
catch { case e: TimeoutException => fail("Never got the message") }
|
||||
assert("ReplyImplicit" === state.s)
|
||||
}
|
||||
|
||||
@Test
|
||||
def shouldShutdownCrashedTemporaryActor = {
|
||||
state.finished.reset
|
||||
val actor = actorOf[CrashingTemporaryActor].start
|
||||
assert(actor.isRunning)
|
||||
actor ! "Die"
|
||||
try { state.finished.await(10L, TimeUnit.SECONDS) }
|
||||
catch { case e: TimeoutException => fail("Never got the message") }
|
||||
Thread.sleep(Testing.time(500))
|
||||
assert(actor.isShutdown)
|
||||
}
|
||||
}
|
||||
|
|
@ -1,125 +0,0 @@
|
|||
/**
|
||||
* Copyright (C) 2009-2011 Scalable Solutions AB <http://scalablesolutions.se>
|
||||
*/
|
||||
|
||||
package akka.actor
|
||||
|
||||
import org.scalatest.Spec
|
||||
import org.scalatest.matchers.ShouldMatchers
|
||||
import org.scalatest.BeforeAndAfterAll
|
||||
import org.scalatest.junit.JUnitRunner
|
||||
import org.junit.runner.RunWith
|
||||
|
||||
import akka.actor._
|
||||
import akka.dispatch.Future
|
||||
import java.util.concurrent.{CountDownLatch, TimeUnit}
|
||||
|
||||
object ActorRefSpec {
|
||||
|
||||
var latch = new CountDownLatch(4)
|
||||
|
||||
class ReplyActor extends Actor {
|
||||
var replyTo: Channel[Any] = null
|
||||
|
||||
def receive = {
|
||||
case "complexRequest" => {
|
||||
replyTo = self.channel
|
||||
val worker = Actor.actorOf[WorkerActor].start
|
||||
worker ! "work"
|
||||
}
|
||||
case "complexRequest2" =>
|
||||
val worker = Actor.actorOf[WorkerActor].start
|
||||
worker ! self.channel
|
||||
case "workDone" => replyTo ! "complexReply"
|
||||
case "simpleRequest" => self.reply("simpleReply")
|
||||
}
|
||||
}
|
||||
|
||||
class WorkerActor() extends Actor {
|
||||
def receive = {
|
||||
case "work" => {
|
||||
work
|
||||
self.reply("workDone")
|
||||
self.stop
|
||||
}
|
||||
case replyTo: Channel[Any] => {
|
||||
work
|
||||
replyTo ! "complexReply"
|
||||
}
|
||||
}
|
||||
|
||||
private def work {
|
||||
Thread.sleep(1000)
|
||||
}
|
||||
}
|
||||
|
||||
class SenderActor(replyActor: ActorRef) extends Actor {
|
||||
|
||||
def receive = {
|
||||
case "complex" => replyActor ! "complexRequest"
|
||||
case "complex2" => replyActor ! "complexRequest2"
|
||||
case "simple" => replyActor ! "simpleRequest"
|
||||
case "complexReply" => {
|
||||
latch.countDown
|
||||
}
|
||||
case "simpleReply" => {
|
||||
latch.countDown
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@RunWith(classOf[JUnitRunner])
|
||||
class ActorRefSpec extends
|
||||
Spec with
|
||||
ShouldMatchers with
|
||||
BeforeAndAfterAll {
|
||||
|
||||
import ActorRefSpec._
|
||||
|
||||
describe("ActorRef") {
|
||||
it("should support to reply via channel") {
|
||||
val serverRef = Actor.actorOf[ReplyActor].start
|
||||
val clientRef = Actor.actorOf(new SenderActor(serverRef)).start
|
||||
|
||||
clientRef ! "complex"
|
||||
clientRef ! "simple"
|
||||
clientRef ! "simple"
|
||||
clientRef ! "simple"
|
||||
assert(latch.await(4L, TimeUnit.SECONDS))
|
||||
latch = new CountDownLatch(4)
|
||||
clientRef ! "complex2"
|
||||
clientRef ! "simple"
|
||||
clientRef ! "simple"
|
||||
clientRef ! "simple"
|
||||
assert(latch.await(4L, TimeUnit.SECONDS))
|
||||
clientRef.stop
|
||||
serverRef.stop
|
||||
}
|
||||
|
||||
it("should stop when sent a poison pill") {
|
||||
val ref = Actor.actorOf(
|
||||
new Actor {
|
||||
def receive = {
|
||||
case 5 => self reply_? "five"
|
||||
case null => self reply_? "null"
|
||||
}
|
||||
}
|
||||
).start
|
||||
|
||||
val ffive: Future[String] = ref !!! 5
|
||||
val fnull: Future[String] = ref !!! null
|
||||
|
||||
intercept[ActorKilledException] {
|
||||
ref !! PoisonPill
|
||||
fail("shouldn't get here")
|
||||
}
|
||||
|
||||
assert(ffive.resultOrException.get == "five")
|
||||
assert(fnull.resultOrException.get == "null")
|
||||
|
||||
assert(ref.isRunning == false)
|
||||
assert(ref.isShutdown == true)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
@ -1,144 +0,0 @@
|
|||
/**
|
||||
* Copyright (C) 2009-2011 Scalable Solutions AB <http://scalablesolutions.se>
|
||||
*/
|
||||
|
||||
package akka.actor
|
||||
|
||||
import org.scalatest.junit.JUnitSuite
|
||||
import org.junit.Test
|
||||
import FSM._
|
||||
|
||||
import org.multiverse.api.latches.StandardLatch
|
||||
|
||||
import java.util.concurrent.TimeUnit
|
||||
|
||||
import akka.util.duration._
|
||||
|
||||
object FSMActorSpec {
|
||||
|
||||
|
||||
val unlockedLatch = new StandardLatch
|
||||
val lockedLatch = new StandardLatch
|
||||
val unhandledLatch = new StandardLatch
|
||||
val terminatedLatch = new StandardLatch
|
||||
val transitionLatch = new StandardLatch
|
||||
val initialStateLatch = new StandardLatch
|
||||
val transitionCallBackLatch = new StandardLatch
|
||||
|
||||
sealed trait LockState
|
||||
case object Locked extends LockState
|
||||
case object Open extends LockState
|
||||
|
||||
class Lock(code: String, timeout: (Long, TimeUnit)) extends Actor with FSM[LockState, CodeState] {
|
||||
|
||||
startWith(Locked, CodeState("", code))
|
||||
|
||||
when(Locked) {
|
||||
case Event(digit: Char, CodeState(soFar, code)) => {
|
||||
soFar + digit match {
|
||||
case incomplete if incomplete.length < code.length =>
|
||||
stay using CodeState(incomplete, code)
|
||||
case codeTry if (codeTry == code) => {
|
||||
doUnlock
|
||||
goto(Open) using CodeState("", code) forMax timeout
|
||||
}
|
||||
case wrong => {
|
||||
stay using CodeState("", code)
|
||||
}
|
||||
}
|
||||
}
|
||||
case Event("hello", _) => stay replying "world"
|
||||
case Event("bye", _) => stop(Shutdown)
|
||||
}
|
||||
|
||||
when(Open) {
|
||||
case Event(StateTimeout, _) => {
|
||||
doLock
|
||||
goto(Locked)
|
||||
}
|
||||
}
|
||||
|
||||
whenUnhandled {
|
||||
case Event(_, stateData) => {
|
||||
unhandledLatch.open
|
||||
stay
|
||||
}
|
||||
}
|
||||
|
||||
onTransition(transitionHandler)
|
||||
|
||||
def transitionHandler(from: LockState, to: LockState) = {
|
||||
if (from == Locked && to == Open) transitionLatch.open
|
||||
}
|
||||
|
||||
onTermination {
|
||||
case StopEvent(Shutdown, Locked, _) =>
|
||||
// stop is called from lockstate with shutdown as reason...
|
||||
terminatedLatch.open
|
||||
}
|
||||
|
||||
// initialize the lock
|
||||
initialize
|
||||
|
||||
private def doLock() {
|
||||
lockedLatch.open
|
||||
}
|
||||
|
||||
private def doUnlock = {
|
||||
unlockedLatch.open
|
||||
}
|
||||
}
|
||||
|
||||
case class CodeState(soFar: String, code: String)
|
||||
}
|
||||
|
||||
class FSMActorSpec extends JUnitSuite {
|
||||
import FSMActorSpec._
|
||||
|
||||
|
||||
@Test
|
||||
def unlockTheLock = {
|
||||
|
||||
// lock that locked after being open for 1 sec
|
||||
val lock = Actor.actorOf(new Lock("33221", (1, TimeUnit.SECONDS))).start
|
||||
|
||||
val transitionTester = Actor.actorOf(new Actor { def receive = {
|
||||
case Transition(_, _, _) => transitionCallBackLatch.open
|
||||
case CurrentState(_, Locked) => initialStateLatch.open
|
||||
}}).start
|
||||
|
||||
lock ! SubscribeTransitionCallBack(transitionTester)
|
||||
assert(initialStateLatch.tryAwait(1, TimeUnit.SECONDS))
|
||||
|
||||
lock ! '3'
|
||||
lock ! '3'
|
||||
lock ! '2'
|
||||
lock ! '2'
|
||||
lock ! '1'
|
||||
|
||||
assert(unlockedLatch.tryAwait(1, TimeUnit.SECONDS))
|
||||
assert(transitionLatch.tryAwait(1, TimeUnit.SECONDS))
|
||||
assert(transitionCallBackLatch.tryAwait(1, TimeUnit.SECONDS))
|
||||
assert(lockedLatch.tryAwait(2, TimeUnit.SECONDS))
|
||||
|
||||
|
||||
lock ! "not_handled"
|
||||
assert(unhandledLatch.tryAwait(2, TimeUnit.SECONDS))
|
||||
|
||||
val answerLatch = new StandardLatch
|
||||
object Hello
|
||||
object Bye
|
||||
val tester = Actor.actorOf(new Actor {
|
||||
protected def receive = {
|
||||
case Hello => lock ! "hello"
|
||||
case "world" => answerLatch.open
|
||||
case Bye => lock ! "bye"
|
||||
}
|
||||
}).start
|
||||
tester ! Hello
|
||||
assert(answerLatch.tryAwait(2, TimeUnit.SECONDS))
|
||||
|
||||
tester ! Bye
|
||||
assert(terminatedLatch.tryAwait(2, TimeUnit.SECONDS))
|
||||
}
|
||||
}
|
||||
|
|
@ -1,108 +0,0 @@
|
|||
package akka.actor
|
||||
|
||||
import org.scalatest.junit.JUnitSuite
|
||||
import org.junit.Test
|
||||
|
||||
import java.util.concurrent.TimeUnit
|
||||
import org.multiverse.api.latches.StandardLatch
|
||||
import Actor._
|
||||
import java.util.concurrent.atomic.AtomicInteger
|
||||
|
||||
class ReceiveTimeoutSpec extends JUnitSuite {
|
||||
|
||||
@Test def receiveShouldGetTimeout= {
|
||||
|
||||
val timeoutLatch = new StandardLatch
|
||||
|
||||
val timeoutActor = actorOf(new Actor {
|
||||
self.receiveTimeout = Some(500L)
|
||||
|
||||
protected def receive = {
|
||||
case ReceiveTimeout => timeoutLatch.open
|
||||
}
|
||||
}).start
|
||||
|
||||
assert(timeoutLatch.tryAwait(3, TimeUnit.SECONDS))
|
||||
timeoutActor.stop
|
||||
}
|
||||
|
||||
@Test def swappedReceiveShouldAlsoGetTimout = {
|
||||
val timeoutLatch = new StandardLatch
|
||||
|
||||
val timeoutActor = actorOf(new Actor {
|
||||
self.receiveTimeout = Some(500L)
|
||||
|
||||
protected def receive = {
|
||||
case ReceiveTimeout => timeoutLatch.open
|
||||
}
|
||||
}).start
|
||||
|
||||
// after max 1 second the timeout should already been sent
|
||||
assert(timeoutLatch.tryAwait(3, TimeUnit.SECONDS))
|
||||
|
||||
val swappedLatch = new StandardLatch
|
||||
timeoutActor ! HotSwap(self => {
|
||||
case ReceiveTimeout => swappedLatch.open
|
||||
})
|
||||
|
||||
assert(swappedLatch.tryAwait(3, TimeUnit.SECONDS))
|
||||
timeoutActor.stop
|
||||
}
|
||||
|
||||
@Test def timeoutShouldBeRescheduledAfterRegularReceive = {
|
||||
|
||||
val timeoutLatch = new StandardLatch
|
||||
case object Tick
|
||||
val timeoutActor = actorOf(new Actor {
|
||||
self.receiveTimeout = Some(500L)
|
||||
|
||||
protected def receive = {
|
||||
case Tick => ()
|
||||
case ReceiveTimeout => timeoutLatch.open
|
||||
}
|
||||
}).start
|
||||
timeoutActor ! Tick
|
||||
|
||||
assert(timeoutLatch.tryAwait(2, TimeUnit.SECONDS) == true)
|
||||
timeoutActor.stop
|
||||
}
|
||||
|
||||
@Test def timeoutShouldBeTurnedOffIfDesired = {
|
||||
val count = new AtomicInteger(0)
|
||||
val timeoutLatch = new StandardLatch
|
||||
case object Tick
|
||||
val timeoutActor = actorOf(new Actor {
|
||||
self.receiveTimeout = Some(500L)
|
||||
|
||||
protected def receive = {
|
||||
case Tick => ()
|
||||
case ReceiveTimeout =>
|
||||
count.incrementAndGet
|
||||
timeoutLatch.open
|
||||
self.receiveTimeout = None
|
||||
}
|
||||
}).start
|
||||
timeoutActor ! Tick
|
||||
|
||||
assert(timeoutLatch.tryAwait(2, TimeUnit.SECONDS) == true)
|
||||
assert(count.get === 1)
|
||||
timeoutActor.stop
|
||||
}
|
||||
|
||||
@Test def timeoutShouldNotBeSentWhenNotSpecified = {
|
||||
val timeoutLatch = new StandardLatch
|
||||
val timeoutActor = actorOf(new Actor {
|
||||
|
||||
protected def receive = {
|
||||
case ReceiveTimeout => timeoutLatch.open
|
||||
}
|
||||
}).start
|
||||
|
||||
assert(timeoutLatch.tryAwait(1, TimeUnit.SECONDS) == false)
|
||||
timeoutActor.stop
|
||||
}
|
||||
|
||||
@Test def ActorsReceiveTimeoutShouldBeReceiveTimeout {
|
||||
assert(akka.actor.Actors.receiveTimeout() eq ReceiveTimeout)
|
||||
}
|
||||
}
|
||||
|
|
@ -1,616 +0,0 @@
|
|||
/**
|
||||
* Copyright (C) 2009-2011 Scalable Solutions AB <http://scalablesolutions.se>
|
||||
*/
|
||||
|
||||
package akka.actor
|
||||
|
||||
import akka.config.Supervision._
|
||||
import akka.{OneWay, Die, Ping}
|
||||
import Actor._
|
||||
|
||||
import org.scalatest.junit.JUnitSuite
|
||||
import org.junit.Test
|
||||
import java.util.concurrent.atomic.AtomicInteger
|
||||
import java.util.concurrent. {CountDownLatch, TimeUnit, LinkedBlockingQueue}
|
||||
|
||||
object SupervisorSpec {
|
||||
var messageLog = new LinkedBlockingQueue[String]
|
||||
var oneWayLog = new LinkedBlockingQueue[String]
|
||||
|
||||
def clearMessageLogs {
|
||||
messageLog.clear
|
||||
oneWayLog.clear
|
||||
}
|
||||
|
||||
class PingPong1Actor extends Actor {
|
||||
import self._
|
||||
def receive = {
|
||||
case Ping =>
|
||||
messageLog.put("ping")
|
||||
reply("pong")
|
||||
|
||||
case OneWay =>
|
||||
oneWayLog.put("oneway")
|
||||
|
||||
case Die =>
|
||||
throw new RuntimeException("Expected exception; to test fault-tolerance")
|
||||
}
|
||||
override def postRestart(reason: Throwable) {
|
||||
messageLog.put(reason.getMessage)
|
||||
}
|
||||
}
|
||||
|
||||
class PingPong2Actor extends Actor {
|
||||
import self._
|
||||
def receive = {
|
||||
case Ping =>
|
||||
messageLog.put("ping")
|
||||
reply("pong")
|
||||
case Die =>
|
||||
throw new RuntimeException("Expected exception; to test fault-tolerance")
|
||||
}
|
||||
override def postRestart(reason: Throwable) {
|
||||
messageLog.put(reason.getMessage)
|
||||
}
|
||||
}
|
||||
|
||||
class PingPong3Actor extends Actor {
|
||||
import self._
|
||||
def receive = {
|
||||
case Ping =>
|
||||
messageLog.put("ping")
|
||||
reply("pong")
|
||||
case Die =>
|
||||
throw new RuntimeException("Expected exception; to test fault-tolerance")
|
||||
}
|
||||
|
||||
override def postRestart(reason: Throwable) {
|
||||
messageLog.put(reason.getMessage)
|
||||
}
|
||||
}
|
||||
|
||||
class TemporaryActor extends Actor {
|
||||
import self._
|
||||
lifeCycle = Temporary
|
||||
def receive = {
|
||||
case Ping =>
|
||||
messageLog.put("ping")
|
||||
reply("pong")
|
||||
case Die =>
|
||||
throw new RuntimeException("Expected exception; to test fault-tolerance")
|
||||
}
|
||||
|
||||
override def postRestart(reason: Throwable) {
|
||||
messageLog.put(reason.getMessage)
|
||||
}
|
||||
}
|
||||
|
||||
class Master extends Actor {
|
||||
self.faultHandler = OneForOneStrategy(List(classOf[Exception]), 5, 1000)
|
||||
val temp = self.spawnLink[TemporaryActor]
|
||||
override def receive = {
|
||||
case Die => temp !! (Die, 5000)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* @author <a href="http://jonasboner.com">Jonas Bonér</a>
|
||||
*/
|
||||
class SupervisorSpec extends JUnitSuite {
|
||||
import SupervisorSpec._
|
||||
|
||||
var pingpong1: ActorRef = _
|
||||
var pingpong2: ActorRef = _
|
||||
var pingpong3: ActorRef = _
|
||||
var temporaryActor: ActorRef = _
|
||||
|
||||
@Test def shoulNotRestartProgrammaticallyLinkedTemporaryActor = {
|
||||
clearMessageLogs
|
||||
val master = actorOf[Master].start
|
||||
|
||||
intercept[RuntimeException] {
|
||||
master !! (Die, 5000)
|
||||
}
|
||||
|
||||
Thread.sleep(1000)
|
||||
assert(messageLog.size === 0)
|
||||
}
|
||||
|
||||
@Test def shoulNotRestartTemporaryActor = {
|
||||
clearMessageLogs
|
||||
val sup = getTemporaryActorAllForOneSupervisor
|
||||
|
||||
intercept[RuntimeException] {
|
||||
temporaryActor !! (Die, 5000)
|
||||
}
|
||||
|
||||
Thread.sleep(1000)
|
||||
assert(messageLog.size === 0)
|
||||
}
|
||||
|
||||
@Test def shouldStartServerForNestedSupervisorHierarchy = {
|
||||
clearMessageLogs
|
||||
val sup = getNestedSupervisorsAllForOneConf
|
||||
sup.start
|
||||
|
||||
expect("pong") {
|
||||
(pingpong1 !! (Ping, 5000)).getOrElse("nil")
|
||||
}
|
||||
}
|
||||
|
||||
@Test def shouldKillSingleActorOneForOne = {
|
||||
clearMessageLogs
|
||||
val sup = getSingleActorOneForOneSupervisor
|
||||
|
||||
intercept[RuntimeException] {
|
||||
pingpong1 !! (Die, 5000)
|
||||
}
|
||||
|
||||
expect("Expected exception; to test fault-tolerance") {
|
||||
messageLog.poll(5, TimeUnit.SECONDS)
|
||||
}
|
||||
}
|
||||
|
||||
@Test def shouldCallKillCallSingleActorOneForOne = {
|
||||
clearMessageLogs
|
||||
val sup = getSingleActorOneForOneSupervisor
|
||||
|
||||
expect("pong") {
|
||||
(pingpong1 !! (Ping, 5000)).getOrElse("nil")
|
||||
}
|
||||
|
||||
expect("ping") {
|
||||
messageLog.poll(5, TimeUnit.SECONDS)
|
||||
}
|
||||
intercept[RuntimeException] {
|
||||
pingpong1 !! (Die, 5000)
|
||||
}
|
||||
|
||||
expect("Expected exception; to test fault-tolerance") {
|
||||
messageLog.poll(5, TimeUnit.SECONDS)
|
||||
}
|
||||
expect("pong") {
|
||||
(pingpong1 !! (Ping, 5000)).getOrElse("nil")
|
||||
}
|
||||
|
||||
expect("ping") {
|
||||
messageLog.poll(5, TimeUnit.SECONDS)
|
||||
}
|
||||
}
|
||||
|
||||
@Test def shouldKillSingleActorAllForOne = {
|
||||
clearMessageLogs
|
||||
val sup = getSingleActorAllForOneSupervisor
|
||||
|
||||
intercept[RuntimeException] {
|
||||
pingpong1 !! (Die, 5000)
|
||||
}
|
||||
|
||||
expect("Expected exception; to test fault-tolerance") {
|
||||
messageLog.poll(5, TimeUnit.SECONDS)
|
||||
}
|
||||
}
|
||||
|
||||
@Test def shouldCallKillCallSingleActorAllForOne = {
|
||||
clearMessageLogs
|
||||
val sup = getSingleActorAllForOneSupervisor
|
||||
|
||||
expect("pong") {
|
||||
(pingpong1 !! (Ping, 5000)).getOrElse("nil")
|
||||
}
|
||||
|
||||
expect("ping") {
|
||||
messageLog.poll(5, TimeUnit.SECONDS)
|
||||
}
|
||||
intercept[RuntimeException] {
|
||||
pingpong1 !! (Die, 5000)
|
||||
}
|
||||
|
||||
expect("Expected exception; to test fault-tolerance") {
|
||||
messageLog.poll(5, TimeUnit.SECONDS)
|
||||
}
|
||||
expect("pong") {
|
||||
(pingpong1 !! (Ping, 5000)).getOrElse("nil")
|
||||
}
|
||||
|
||||
expect("ping") {
|
||||
messageLog.poll(5, TimeUnit.SECONDS)
|
||||
}
|
||||
}
|
||||
|
||||
@Test def shouldKillMultipleActorsOneForOne1 = {
|
||||
clearMessageLogs
|
||||
val sup = getMultipleActorsOneForOneConf
|
||||
|
||||
intercept[RuntimeException] {
|
||||
pingpong1 !! (Die, 5000)
|
||||
}
|
||||
|
||||
expect("Expected exception; to test fault-tolerance") {
|
||||
messageLog.poll(5, TimeUnit.SECONDS)
|
||||
}
|
||||
}
|
||||
|
||||
@Test def shouldKillMultipleActorsOneForOne2 = {
|
||||
clearMessageLogs
|
||||
val sup = getMultipleActorsOneForOneConf
|
||||
|
||||
intercept[RuntimeException] {
|
||||
pingpong3 !! (Die, 5000)
|
||||
}
|
||||
|
||||
expect("Expected exception; to test fault-tolerance") {
|
||||
messageLog.poll(5, TimeUnit.SECONDS)
|
||||
}
|
||||
}
|
||||
|
||||
@Test def shouldKillCallMultipleActorsOneForOne = {
|
||||
clearMessageLogs
|
||||
val sup = getMultipleActorsOneForOneConf
|
||||
|
||||
expect("pong") {
|
||||
(pingpong1 !! (Ping, 5000)).getOrElse("nil")
|
||||
}
|
||||
|
||||
expect("pong") {
|
||||
(pingpong2 !! (Ping, 5000)).getOrElse("nil")
|
||||
}
|
||||
|
||||
expect("pong") {
|
||||
(pingpong3 !! (Ping, 5000)).getOrElse("nil")
|
||||
}
|
||||
|
||||
expect("ping") {
|
||||
messageLog.poll(5, TimeUnit.SECONDS)
|
||||
}
|
||||
expect("ping") {
|
||||
messageLog.poll(5, TimeUnit.SECONDS)
|
||||
}
|
||||
expect("ping") {
|
||||
messageLog.poll(5, TimeUnit.SECONDS)
|
||||
}
|
||||
intercept[RuntimeException] {
|
||||
pingpong2 !! (Die, 5000)
|
||||
}
|
||||
|
||||
expect("Expected exception; to test fault-tolerance") {
|
||||
messageLog.poll(5, TimeUnit.SECONDS)
|
||||
}
|
||||
expect("pong") {
|
||||
(pingpong1 !! (Ping, 5000)).getOrElse("nil")
|
||||
}
|
||||
|
||||
expect("pong") {
|
||||
(pingpong2 !! (Ping, 5000)).getOrElse("nil")
|
||||
}
|
||||
|
||||
expect("pong") {
|
||||
(pingpong3 !! (Ping, 5000)).getOrElse("nil")
|
||||
}
|
||||
|
||||
expect("ping") {
|
||||
messageLog.poll(5, TimeUnit.SECONDS)
|
||||
}
|
||||
expect("ping") {
|
||||
messageLog.poll(5, TimeUnit.SECONDS)
|
||||
}
|
||||
expect("ping") {
|
||||
messageLog.poll(5, TimeUnit.SECONDS)
|
||||
}
|
||||
}
|
||||
|
||||
@Test def shouldKillMultipleActorsAllForOne = {
|
||||
clearMessageLogs
|
||||
val sup = getMultipleActorsAllForOneConf
|
||||
|
||||
intercept[RuntimeException] {
|
||||
pingpong2 !! (Die, 5000)
|
||||
}
|
||||
|
||||
expect("Expected exception; to test fault-tolerance") {
|
||||
messageLog.poll(5, TimeUnit.SECONDS)
|
||||
}
|
||||
expect("Expected exception; to test fault-tolerance") {
|
||||
messageLog.poll(5, TimeUnit.SECONDS)
|
||||
}
|
||||
expect("Expected exception; to test fault-tolerance") {
|
||||
messageLog.poll(5, TimeUnit.SECONDS)
|
||||
}
|
||||
}
|
||||
|
||||
@Test def shouldCallKillCallMultipleActorsAllForOne = {
|
||||
clearMessageLogs
|
||||
val sup = getMultipleActorsAllForOneConf
|
||||
|
||||
expect("pong") {
|
||||
(pingpong1 !! (Ping, 5000)).getOrElse("nil")
|
||||
}
|
||||
|
||||
expect("pong") {
|
||||
(pingpong2 !! (Ping, 5000)).getOrElse("nil")
|
||||
}
|
||||
|
||||
expect("pong") {
|
||||
(pingpong3 !! (Ping, 5000)).getOrElse("nil")
|
||||
}
|
||||
|
||||
expect("ping") {
|
||||
messageLog.poll(5, TimeUnit.SECONDS)
|
||||
}
|
||||
expect("ping") {
|
||||
messageLog.poll(5, TimeUnit.SECONDS)
|
||||
}
|
||||
expect("ping") {
|
||||
messageLog.poll(5, TimeUnit.SECONDS)
|
||||
}
|
||||
intercept[RuntimeException] {
|
||||
pingpong2 !! (Die, 5000)
|
||||
}
|
||||
|
||||
expect("Expected exception; to test fault-tolerance") {
|
||||
messageLog.poll(5, TimeUnit.SECONDS)
|
||||
}
|
||||
expect("Expected exception; to test fault-tolerance") {
|
||||
messageLog.poll(5, TimeUnit.SECONDS)
|
||||
}
|
||||
expect("Expected exception; to test fault-tolerance") {
|
||||
messageLog.poll(5, TimeUnit.SECONDS)
|
||||
}
|
||||
expect("pong") {
|
||||
(pingpong1 !! (Ping, 5000)).getOrElse("nil")
|
||||
}
|
||||
|
||||
expect("pong") {
|
||||
(pingpong2 !! (Ping, 5000)).getOrElse("nil")
|
||||
}
|
||||
|
||||
expect("pong") {
|
||||
(pingpong3 !! (Ping, 5000)).getOrElse("nil")
|
||||
}
|
||||
|
||||
expect("ping") {
|
||||
messageLog.poll(5, TimeUnit.SECONDS)
|
||||
}
|
||||
expect("ping") {
|
||||
messageLog.poll(5, TimeUnit.SECONDS)
|
||||
}
|
||||
expect("ping") {
|
||||
messageLog.poll(5, TimeUnit.SECONDS)
|
||||
}
|
||||
}
|
||||
|
||||
@Test def shouldOneWayKillSingleActorOneForOne = {
|
||||
clearMessageLogs
|
||||
val sup = getSingleActorOneForOneSupervisor
|
||||
|
||||
pingpong1 ! Die
|
||||
|
||||
expect("Expected exception; to test fault-tolerance") {
|
||||
messageLog.poll(5, TimeUnit.SECONDS)
|
||||
}
|
||||
}
|
||||
|
||||
@Test def shouldOneWayCallKillCallSingleActorOneForOne = {
|
||||
clearMessageLogs
|
||||
val sup = getSingleActorOneForOneSupervisor
|
||||
|
||||
pingpong1 ! OneWay
|
||||
|
||||
expect("oneway") {
|
||||
oneWayLog.poll(5, TimeUnit.SECONDS)
|
||||
}
|
||||
pingpong1 ! Die
|
||||
|
||||
expect("Expected exception; to test fault-tolerance") {
|
||||
messageLog.poll(5, TimeUnit.SECONDS)
|
||||
}
|
||||
pingpong1 ! OneWay
|
||||
|
||||
expect("oneway") {
|
||||
oneWayLog.poll(5, TimeUnit.SECONDS)
|
||||
}
|
||||
}
|
||||
|
||||
@Test def shouldRestartKilledActorsForNestedSupervisorHierarchy = {
|
||||
clearMessageLogs
|
||||
val sup = getNestedSupervisorsAllForOneConf
|
||||
|
||||
|
||||
expect("pong") {
|
||||
(pingpong1 !! (Ping, 5000)).getOrElse("nil")
|
||||
}
|
||||
|
||||
expect("pong") {
|
||||
(pingpong2 !! (Ping, 5000)).getOrElse("nil")
|
||||
}
|
||||
|
||||
expect("pong") {
|
||||
(pingpong3 !! (Ping, 5000)).getOrElse("nil")
|
||||
}
|
||||
|
||||
expect("ping") {
|
||||
messageLog.poll(5, TimeUnit.SECONDS)
|
||||
}
|
||||
expect("ping") {
|
||||
messageLog.poll(5, TimeUnit.SECONDS)
|
||||
}
|
||||
expect("ping") {
|
||||
messageLog.poll(5, TimeUnit.SECONDS)
|
||||
}
|
||||
intercept[RuntimeException] {
|
||||
pingpong2 !! (Die, 5000)
|
||||
}
|
||||
|
||||
expect("Expected exception; to test fault-tolerance") {
|
||||
messageLog.poll(5 , TimeUnit.SECONDS)
|
||||
}
|
||||
expect("Expected exception; to test fault-tolerance") {
|
||||
messageLog.poll(5, TimeUnit.SECONDS)
|
||||
}
|
||||
expect("Expected exception; to test fault-tolerance") {
|
||||
messageLog.poll(5, TimeUnit.SECONDS)
|
||||
}
|
||||
expect("pong") {
|
||||
(pingpong1 !! (Ping, 5000)).getOrElse("nil")
|
||||
}
|
||||
|
||||
expect("pong") {
|
||||
(pingpong2 !! (Ping, 5000)).getOrElse("nil")
|
||||
}
|
||||
|
||||
expect("pong") {
|
||||
(pingpong3 !! (Ping, 5000)).getOrElse("nil")
|
||||
}
|
||||
|
||||
expect("ping") {
|
||||
messageLog.poll(5, TimeUnit.SECONDS)
|
||||
}
|
||||
expect("ping") {
|
||||
messageLog.poll(5, TimeUnit.SECONDS)
|
||||
}
|
||||
expect("ping") {
|
||||
messageLog.poll(5, TimeUnit.SECONDS)
|
||||
}
|
||||
}
|
||||
|
||||
@Test def shouldAttemptRestartWhenExceptionDuringRestart {
|
||||
val inits = new AtomicInteger(0)
|
||||
val dyingActor = actorOf(new Actor {
|
||||
self.lifeCycle = Permanent
|
||||
inits.incrementAndGet
|
||||
|
||||
if (!(inits.get % 2 != 0))
|
||||
throw new IllegalStateException("Don't wanna!")
|
||||
|
||||
def receive = {
|
||||
case Ping => self.reply_?("pong")
|
||||
case Die => throw new Exception("expected")
|
||||
}
|
||||
})
|
||||
val supervisor =
|
||||
Supervisor(
|
||||
SupervisorConfig(
|
||||
OneForOneStrategy(classOf[Exception] :: Nil,3,10000),
|
||||
Supervise(dyingActor,Permanent) :: Nil))
|
||||
|
||||
intercept[Exception] {
|
||||
dyingActor !! (Die, 5000)
|
||||
}
|
||||
|
||||
expect("pong") {
|
||||
(dyingActor !! (Ping, 5000)).getOrElse("nil")
|
||||
}
|
||||
|
||||
expect(3) { inits.get }
|
||||
supervisor.shutdown
|
||||
}
|
||||
|
||||
// =============================================
|
||||
// Create some supervisors with different configurations
|
||||
|
||||
def getTemporaryActorAllForOneSupervisor: Supervisor = {
|
||||
temporaryActor = actorOf[TemporaryActor].start
|
||||
|
||||
Supervisor(
|
||||
SupervisorConfig(
|
||||
AllForOneStrategy(List(classOf[Exception]), 3, 5000),
|
||||
Supervise(
|
||||
temporaryActor,
|
||||
Temporary)
|
||||
:: Nil))
|
||||
}
|
||||
|
||||
def getSingleActorAllForOneSupervisor: Supervisor = {
|
||||
pingpong1 = actorOf[PingPong1Actor].start
|
||||
|
||||
Supervisor(
|
||||
SupervisorConfig(
|
||||
AllForOneStrategy(List(classOf[Exception]), 3, 5000),
|
||||
Supervise(
|
||||
pingpong1,
|
||||
Permanent)
|
||||
:: Nil))
|
||||
}
|
||||
|
||||
def getSingleActorOneForOneSupervisor: Supervisor = {
|
||||
pingpong1 = actorOf[PingPong1Actor].start
|
||||
|
||||
Supervisor(
|
||||
SupervisorConfig(
|
||||
OneForOneStrategy(List(classOf[Exception]), 3, 5000),
|
||||
Supervise(
|
||||
pingpong1,
|
||||
Permanent)
|
||||
:: Nil))
|
||||
}
|
||||
|
||||
def getMultipleActorsAllForOneConf: Supervisor = {
|
||||
pingpong1 = actorOf[PingPong1Actor].start
|
||||
pingpong2 = actorOf[PingPong2Actor].start
|
||||
pingpong3 = actorOf[PingPong3Actor].start
|
||||
|
||||
Supervisor(
|
||||
SupervisorConfig(
|
||||
AllForOneStrategy(List(classOf[Exception]), 3, 5000),
|
||||
Supervise(
|
||||
pingpong1,
|
||||
Permanent)
|
||||
::
|
||||
Supervise(
|
||||
pingpong2,
|
||||
Permanent)
|
||||
::
|
||||
Supervise(
|
||||
pingpong3,
|
||||
Permanent)
|
||||
:: Nil))
|
||||
}
|
||||
|
||||
def getMultipleActorsOneForOneConf: Supervisor = {
|
||||
pingpong1 = actorOf[PingPong1Actor].start
|
||||
pingpong2 = actorOf[PingPong2Actor].start
|
||||
pingpong3 = actorOf[PingPong3Actor].start
|
||||
|
||||
Supervisor(
|
||||
SupervisorConfig(
|
||||
OneForOneStrategy(List(classOf[Exception]), 3, 5000),
|
||||
Supervise(
|
||||
pingpong1,
|
||||
Permanent)
|
||||
::
|
||||
Supervise(
|
||||
pingpong2,
|
||||
Permanent)
|
||||
::
|
||||
Supervise(
|
||||
pingpong3,
|
||||
Permanent)
|
||||
:: Nil))
|
||||
}
|
||||
|
||||
def getNestedSupervisorsAllForOneConf: Supervisor = {
|
||||
pingpong1 = actorOf[PingPong1Actor].start
|
||||
pingpong2 = actorOf[PingPong2Actor].start
|
||||
pingpong3 = actorOf[PingPong3Actor].start
|
||||
|
||||
Supervisor(
|
||||
SupervisorConfig(
|
||||
AllForOneStrategy(List(classOf[Exception]), 3, 5000),
|
||||
Supervise(
|
||||
pingpong1,
|
||||
Permanent)
|
||||
::
|
||||
SupervisorConfig(
|
||||
AllForOneStrategy(Nil, 3, 5000),
|
||||
Supervise(
|
||||
pingpong2,
|
||||
Permanent)
|
||||
::
|
||||
Supervise(
|
||||
pingpong3,
|
||||
Permanent)
|
||||
:: Nil)
|
||||
:: Nil))
|
||||
}
|
||||
}
|
||||
|
|
@ -1,91 +0,0 @@
|
|||
package akka.dispatch
|
||||
|
||||
import java.util.concurrent.CountDownLatch
|
||||
import java.util.concurrent.TimeUnit
|
||||
import java.util.concurrent.atomic.AtomicBoolean
|
||||
import java.util.concurrent.locks.Lock
|
||||
import java.util.concurrent.locks.ReentrantLock
|
||||
|
||||
import org.scalatest.junit.JUnitSuite
|
||||
import org.junit.{Test, Before}
|
||||
|
||||
import akka.actor.Actor
|
||||
import Actor._
|
||||
|
||||
// FIXME use this test when we have removed the MessageInvoker classes
|
||||
/*
|
||||
class ThreadBasedDispatcherSpec extends JUnitSuite {
|
||||
private var threadingIssueDetected: AtomicBoolean = null
|
||||
val key1 = actorOf(new Actor { def receive = { case _ => {}} })
|
||||
val key2 = actorOf(new Actor { def receive = { case _ => {}} })
|
||||
val key3 = actorOf(new Actor { def receive = { case _ => {}} })
|
||||
|
||||
class TestMessageHandle(handleLatch: CountDownLatch) extends MessageInvoker {
|
||||
val guardLock: Lock = new ReentrantLock
|
||||
|
||||
def invoke(message: MessageInvocation) {
|
||||
try {
|
||||
if (threadingIssueDetected.get) return
|
||||
if (guardLock.tryLock) {
|
||||
handleLatch.countDown
|
||||
} else {
|
||||
threadingIssueDetected.set(true)
|
||||
}
|
||||
} catch {
|
||||
case e: Exception => threadingIssueDetected.set(true)
|
||||
} finally {
|
||||
guardLock.unlock
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@Before
|
||||
def setUp = {
|
||||
threadingIssueDetected = new AtomicBoolean(false)
|
||||
}
|
||||
|
||||
@Test
|
||||
def shouldMessagesDispatchedToTheSameHandlerAreExecutedSequentially = {
|
||||
internalTestMessagesDispatchedToTheSameHandlerAreExecutedSequentially
|
||||
}
|
||||
|
||||
@Test
|
||||
def shouldMessagesDispatchedToHandlersAreExecutedInFIFOOrder = {
|
||||
internalTestMessagesDispatchedToHandlersAreExecutedInFIFOOrder
|
||||
}
|
||||
|
||||
private def internalTestMessagesDispatchedToTheSameHandlerAreExecutedSequentially(): Unit = {
|
||||
val guardLock = new ReentrantLock
|
||||
val handleLatch = new CountDownLatch(100)
|
||||
val dispatcher = new ThreadBasedDispatcher("name", new TestMessageHandle(handleLatch))
|
||||
dispatcher.start
|
||||
for (i <- 0 until 100) {
|
||||
dispatcher.dispatch(new MessageInvocation(key1, new Object, None, None))
|
||||
}
|
||||
assert(handleLatch.await(5, TimeUnit.SECONDS))
|
||||
assert(!threadingIssueDetected.get)
|
||||
}
|
||||
|
||||
private def internalTestMessagesDispatchedToHandlersAreExecutedInFIFOOrder(): Unit = {
|
||||
val handleLatch = new CountDownLatch(100)
|
||||
val dispatcher = new ThreadBasedDispatcher("name", new MessageInvoker {
|
||||
var currentValue = -1;
|
||||
def invoke(message: MessageInvocation) {
|
||||
if (threadingIssueDetected.get) return
|
||||
val messageValue = message.message.asInstanceOf[Int]
|
||||
if (messageValue.intValue == currentValue + 1) {
|
||||
currentValue = messageValue.intValue
|
||||
handleLatch.countDown
|
||||
} else threadingIssueDetected.set(true)
|
||||
}
|
||||
})
|
||||
dispatcher.start
|
||||
for (i <- 0 until 100) {
|
||||
dispatcher.dispatch(new MessageInvocation(key1, i, None, None))
|
||||
}
|
||||
assert(handleLatch.await(5, TimeUnit.SECONDS))
|
||||
assert(!threadingIssueDetected.get)
|
||||
dispatcher.postStop
|
||||
}
|
||||
}
|
||||
*/
|
||||
49
akka-docs/Makefile
Normal file
49
akka-docs/Makefile
Normal file
|
|
@ -0,0 +1,49 @@
|
|||
# Makefile for Sphinx documentation
|
||||
#
|
||||
|
||||
# You can set these variables from the command line.
|
||||
SPHINXOPTS =
|
||||
SPHINXBUILD = sphinx-build
|
||||
PAPER =
|
||||
BUILDDIR = _build
|
||||
|
||||
# Internal variables.
|
||||
PAPEROPT_a4 = -D latex_paper_size=a4
|
||||
PAPEROPT_letter = -D latex_paper_size=letter
|
||||
ALLSPHINXOPTS = -d $(BUILDDIR)/doctrees $(PAPEROPT_$(PAPER)) $(SPHINXOPTS) .
|
||||
|
||||
.PHONY: help clean html singlehtml latex pdf
|
||||
|
||||
help:
|
||||
@echo "Please use \`make <target>' where <target> is one of"
|
||||
@echo " html to make standalone HTML files"
|
||||
@echo " singlehtml to make a single large HTML file"
|
||||
@echo " latex to make LaTeX files, you can set PAPER=a4 or PAPER=letter"
|
||||
@echo " pdf to make LaTeX files and run them through pdflatex"
|
||||
|
||||
clean:
|
||||
-rm -rf $(BUILDDIR)/*
|
||||
|
||||
html:
|
||||
$(SPHINXBUILD) -b html $(ALLSPHINXOPTS) $(BUILDDIR)/html
|
||||
@echo
|
||||
@echo "Build finished. The HTML pages are in $(BUILDDIR)/html."
|
||||
|
||||
singlehtml:
|
||||
$(SPHINXBUILD) -b singlehtml $(ALLSPHINXOPTS) $(BUILDDIR)/singlehtml
|
||||
@echo
|
||||
@echo "Build finished. The HTML page is in $(BUILDDIR)/singlehtml."
|
||||
|
||||
latex:
|
||||
$(SPHINXBUILD) -b latex $(ALLSPHINXOPTS) $(BUILDDIR)/latex
|
||||
@echo
|
||||
@echo "Build finished; the LaTeX files are in $(BUILDDIR)/latex."
|
||||
@echo "Run \`make' in that directory to run these through (pdf)latex" \
|
||||
"(use \`make latexpdf' here to do that automatically)."
|
||||
|
||||
pdf:
|
||||
$(SPHINXBUILD) -b latex $(ALLSPHINXOPTS) $(BUILDDIR)/latex
|
||||
@echo "Running LaTeX files through pdflatex..."
|
||||
make -C $(BUILDDIR)/latex all-pdf
|
||||
@echo "pdflatex finished; the PDF files are in $(BUILDDIR)/latex."
|
||||
|
||||
BIN
akka-docs/_static/akka.png
Normal file
BIN
akka-docs/_static/akka.png
Normal file
Binary file not shown.
|
After Width: | Height: | Size: 5.8 KiB |
BIN
akka-docs/_static/logo.png
Normal file
BIN
akka-docs/_static/logo.png
Normal file
Binary file not shown.
|
After Width: | Height: | Size: 6.6 KiB |
65
akka-docs/conf.py
Normal file
65
akka-docs/conf.py
Normal file
|
|
@ -0,0 +1,65 @@
|
|||
# -*- coding: utf-8 -*-
|
||||
#
|
||||
# Akka documentation build configuration file.
|
||||
#
|
||||
|
||||
import sys, os
|
||||
|
||||
# -- General configuration -----------------------------------------------------
|
||||
|
||||
extensions = ['sphinx.ext.todo']
|
||||
|
||||
templates_path = ['_templates']
|
||||
source_suffix = '.rst'
|
||||
master_doc = 'index'
|
||||
exclude_patterns = ['_build', 'pending']
|
||||
|
||||
project = u'Akka'
|
||||
copyright = u'2009-2011, Scalable Solutions AB'
|
||||
version = '1.1'
|
||||
release = '1.1'
|
||||
|
||||
pygments_style = 'akka'
|
||||
highlight_language = 'scala'
|
||||
|
||||
# -- Options for HTML output ---------------------------------------------------
|
||||
|
||||
html_theme = 'akka'
|
||||
html_theme_options = {
|
||||
'full_logo': 'true'
|
||||
}
|
||||
html_theme_path = ['themes']
|
||||
|
||||
html_title = 'Akka Documentation'
|
||||
html_logo = '_static/logo.png'
|
||||
#html_favicon = None
|
||||
|
||||
html_static_path = ['_static']
|
||||
|
||||
html_last_updated_fmt = '%b %d, %Y'
|
||||
#html_sidebars = {}
|
||||
#html_additional_pages = {}
|
||||
html_domain_indices = False
|
||||
html_use_index = False
|
||||
html_show_sourcelink = False
|
||||
html_show_sphinx = False
|
||||
html_show_copyright = True
|
||||
htmlhelp_basename = 'Akkadoc'
|
||||
|
||||
# -- Options for LaTeX output --------------------------------------------------
|
||||
|
||||
latex_paper_size = 'a4'
|
||||
latex_font_size = '10pt'
|
||||
|
||||
latex_documents = [
|
||||
('index', 'Akka.tex', u' Akka Documentation',
|
||||
u'Scalable Solutions AB', 'manual'),
|
||||
]
|
||||
|
||||
latex_elements = {
|
||||
'classoptions': ',oneside,openany',
|
||||
'babel': '\\usepackage[english]{babel}',
|
||||
'preamble': '\\definecolor{VerbatimColor}{rgb}{0.935,0.935,0.935}'
|
||||
}
|
||||
|
||||
# latex_logo = '_static/akka.png'
|
||||
81
akka-docs/index.rst
Normal file
81
akka-docs/index.rst
Normal file
|
|
@ -0,0 +1,81 @@
|
|||
Contents
|
||||
========
|
||||
|
||||
.. toctree::
|
||||
:maxdepth: 2
|
||||
|
||||
manual/getting-started-first
|
||||
pending/actor-registry-java
|
||||
pending/actor-registry-scala
|
||||
pending/actors-scala
|
||||
pending/agents-scala
|
||||
pending/articles
|
||||
pending/benchmarks
|
||||
pending/building-akka
|
||||
pending/buildr
|
||||
pending/cluster-membership
|
||||
pending/companies-using-akka
|
||||
pending/configuration
|
||||
pending/dataflow-java
|
||||
pending/dataflow-scala
|
||||
pending/deployment-scenarios
|
||||
pending/developer-guidelines
|
||||
pending/dispatchers-java
|
||||
pending/dispatchers-scala
|
||||
pending/event-handler
|
||||
pending/external-sample-projects
|
||||
pending/fault-tolerance-java
|
||||
pending/fault-tolerance-scala
|
||||
pending/Feature Stability Matrix
|
||||
pending/fsm-scala
|
||||
pending/futures-scala
|
||||
pending/getting-started
|
||||
pending/guice-integration
|
||||
pending/Home
|
||||
pending/http
|
||||
pending/issue-tracking
|
||||
pending/language-bindings
|
||||
pending/licenses
|
||||
pending/logging
|
||||
pending/Migration-1.0-1.1
|
||||
pending/migration-guide-0.10.x-1.0.x
|
||||
pending/migration-guide-0.7.x-0.8.x
|
||||
pending/migration-guide-0.8.x-0.9.x
|
||||
pending/migration-guide-0.9.x-0.10.x
|
||||
pending/migration-guides
|
||||
pending/Recipes
|
||||
pending/release-notes
|
||||
pending/remote-actors-java
|
||||
pending/remote-actors-scala
|
||||
pending/routing-java
|
||||
pending/routing-scala
|
||||
pending/scheduler
|
||||
pending/security
|
||||
pending/serialization-java
|
||||
pending/serialization-scala
|
||||
pending/servlet
|
||||
pending/slf4j
|
||||
pending/sponsors
|
||||
pending/stm
|
||||
pending/stm-java
|
||||
pending/stm-scala
|
||||
pending/team
|
||||
pending/test
|
||||
pending/testkit
|
||||
pending/testkit-example
|
||||
pending/third-party-integrations
|
||||
pending/transactors-java
|
||||
pending/transactors-scala
|
||||
pending/tutorial-chat-server-java
|
||||
pending/tutorial-chat-server-scala
|
||||
pending/typed-actors-java
|
||||
pending/typed-actors-scala
|
||||
pending/untyped-actors-java
|
||||
pending/use-cases
|
||||
pending/web
|
||||
|
||||
Links
|
||||
=====
|
||||
|
||||
* `Akka Documentation <http://doc.akka.io>`_
|
||||
* `Support <http://scalablesolutions.se>`_
|
||||
503
akka-docs/manual/getting-started-first.rst
Normal file
503
akka-docs/manual/getting-started-first.rst
Normal file
|
|
@ -0,0 +1,503 @@
|
|||
Getting Started Tutorial: First Chapter
|
||||
=======================================
|
||||
|
||||
Introduction
|
||||
------------
|
||||
|
||||
Welcome to the first tutorial on how to get started with Akka and Scala. We assume that you already know what Akka and Scala is and will now focus on the steps necessary to start your first project.
|
||||
|
||||
There are two variations of this first tutorial:
|
||||
|
||||
- creating a standalone project and run it from the command line
|
||||
- creating a SBT (Simple Build Tool) project and running it from within SBT
|
||||
|
||||
Since they are so similar we will present them both in this tutorial.
|
||||
|
||||
The sample application that we will create is using actors to calculate the value of Pi. Calculating Pi is a CPU intensive operation and we will utilize Akka Actors to write a concurrent solution that scales out to multi-core processors. This sample will be extended in future tutorials to use Akka Remote Actors to scale out on multiple machines in a cluster.
|
||||
|
||||
We will be using an algorithm that is called "embarrassingly parallel" which just means that each job is completely isolated and not coupled with any other job. Since this algorithm is so parallelizable it suits the actor model very well.
|
||||
|
||||
Here is the formula for the algorithm we will use:
|
||||
|
||||
.. image:: pi-formula.png
|
||||
|
||||
In this particular algorithm the master splits the series into chunks which are sent out to each worker actor to be processed, when each worker has processed its chunk it sends a result back to the master which aggregates to total result.
|
||||
|
||||
Tutorial source code
|
||||
--------------------
|
||||
|
||||
If you want don't want to type in the code and/or set up an SBT project then you can check out the full tutorial from the Akka GitHub repository. It is in the ``akka-tutorials/akka-tutorial-first`` module. You can also browse it online `here <https://github.com/jboner/akka/tree/master/akka-tutorials/akka-tutorial-first>`_, with the actual source code `here <https://github.com/jboner/akka/blob/master/akka-tutorials/akka-tutorial-first/src/main/scala/Pi.scala>`_.
|
||||
|
||||
Prerequisites
|
||||
-------------
|
||||
|
||||
This tutorial assumes that you have Jave 1.6 or later installed on you machine and ``java`` on your ``PATH``. You also need to know how to run commands in a shell (ZSH, Bash, DOS etc.) and a decent text editor or IDE to type in the Scala code in.
|
||||
|
||||
Downloading and installing Akka
|
||||
-------------------------------
|
||||
|
||||
If you want to be able to build and run the tutorial sample from the command line then you have to download Akka. If you prefer to use SBT to build and run the sample then you can skip this section and jump to the next one.
|
||||
|
||||
Let's get the ``akka-1.1`` distribution of Akka core (not Akka Modules) from `http://akka.io/downloads <http://akka.io/downloads/>`_. Once you have downloaded the distribution unzip it in the folder you would like to have Akka installed in, in my case I choose to install it in ``/Users/jboner/tools/``, simply by unzipping it to this directory.
|
||||
|
||||
You need to do one more thing in order to install Akka properly and that is to set the ``AKKA_HOME`` environment variable to the root of the distribution. In my case I'm opening up a shell and navigating down to the distribution and setting the ``AKKA_HOME`` variable::
|
||||
|
||||
$ cd /Users/jboner/tools/akka-1.1
|
||||
$ export AKKA_HOME=`pwd`
|
||||
$ echo $AKKA_HOME
|
||||
/Users/jboner/tools/akka-1.1
|
||||
|
||||
If we now take a look at what we have in this distribution, looks like this::
|
||||
|
||||
$ ls -l
|
||||
total 16944
|
||||
drwxr-xr-x 7 jboner staff 238 Apr 6 11:15 .
|
||||
drwxr-xr-x 28 jboner staff 952 Apr 6 11:16 ..
|
||||
drwxr-xr-x 17 jboner staff 578 Apr 6 11:16 deploy
|
||||
drwxr-xr-x 26 jboner staff 884 Apr 6 11:16 dist
|
||||
drwxr-xr-x 3 jboner staff 102 Apr 6 11:15 lib_managed
|
||||
-rwxr-xr-x 1 jboner staff 8674105 Apr 6 11:15 scala-library.jar
|
||||
drwxr-xr-x 4 jboner staff 136 Apr 6 11:16 scripts
|
||||
|
||||
- In the ``dist`` directory we have all the Akka JARs, including sources and docs.
|
||||
- In the ``lib_managed/compile`` directory we have all the Akka's dependency JARs.
|
||||
- In the ``deploy`` directory we have all the sample JARs.
|
||||
- In the ``scripts`` directory we have scripts for running Akka.
|
||||
- Finallly the ``scala-library.jar`` is the JAR for the latest Scala distribution that Akka depends on.
|
||||
|
||||
The only JAR we will need for this tutorial (apart from the ``scala-library.jar`` JAR) is the ``akka-actor-1.1.jar`` JAR in the ``dist`` directory. This is a self-contained JAR with zero dependencies and contains everything we need to write a system using Actors.
|
||||
|
||||
Akka is very modular and has many JARs for containing different features. The core distribution has seven modules:
|
||||
|
||||
- ``akka-actor-1.1.jar`` -- Standard Actors
|
||||
- ``akka-typed-actor-1.1.jar`` -- Typed Actors
|
||||
- ``akka-remote-1.1.jar`` -- Remote Actors
|
||||
- ``akka-stm-1.1.jar`` -- STM (Software Transactional Memory), transactors and transactional datastructures
|
||||
- ``akka-http-1.1.jar`` -- Akka Mist for continuation-based asynchronous HTTP and also Jersey integration
|
||||
- ``akka-slf4j-1.1.jar`` -- SLF4J Event Handler Listener
|
||||
- ``akka-testkit-1.1.jar`` -- Toolkit for testing Actors
|
||||
|
||||
We also have Akka Modules containing add-on modules for the core of Akka. You can download the Akka Modules distribution from TODO. It contains Akka core as well. We will not be needing any modules there today but for your information the module JARs are these:
|
||||
|
||||
- ``akka-kernel-1.1.jar`` -- Akka microkernel for running a bare-bones mini application server (embeds Jetty etc.)
|
||||
- ``akka-amqp-1.1.jar`` -- AMQP integration
|
||||
- ``akka-camel-1.1.jar`` -- Apache Camel Actors integration (it's the best way to have your Akka application communicate with the rest of the world)
|
||||
- ``akka-camel-typed-1.1.jar`` -- Apache Camel Typed Actors integration
|
||||
- ``akka-scalaz-1.1.jar`` -- Support for the Scalaz library
|
||||
- ``akka-spring-1.1.jar`` -- Spring framework integration
|
||||
- ``akka-osgi-dependencies-bundle-1.1.jar`` -- OSGi support
|
||||
|
||||
Downloading and installing Scala
|
||||
--------------------------------
|
||||
|
||||
If you want to be able to build and run the tutorial sample from the command line then you have to install the Scala distribution. If you prefer to use SBT to build and run the sample then you can skip this section and jump to the next one.
|
||||
|
||||
Scala can be downloaded from `http://www.scala-lang.org/downloads <http://www.scala-lang.org/downloads>`_. Browse there and download the Scala 2.9.0 final release. If you pick the ``tgz`` or ``zip`` distributions then just unzip it where you want it installed. If you pick the IzPack Installer then double click on it and follow the instructions.
|
||||
|
||||
You also need to make sure that the ``scala-2.9.0-final/bin`` (if that is the directory where you installed Scala) is on your ``PATH``::
|
||||
|
||||
$ export PATH=$PATH:scala-2.9.0-final/bin
|
||||
|
||||
Now you can test you installation by invoking and see the printout::
|
||||
|
||||
$ scala -version
|
||||
Scala code runner version 2.9.0.final -- Copyright 2002-2011, LAMP/EPFL
|
||||
|
||||
Looks like we are all good. Finally let's create a source file ``Pi.scala`` for the tutorial and put it in the root of the Akka distribution in the ``tutorial`` directory (you have to create it first).
|
||||
|
||||
Some tools requires you to set the ``SCALA_HOME`` environment variable to the root of the Scala distribution, however Akka does not require that.
|
||||
|
||||
Downloading and installing SBT
|
||||
------------------------------
|
||||
|
||||
SBT, short for 'Simple Build Tool' is an excellent build system written in Scala. You are using Scala to write the build scripts which gives you a lot of power. It has a plugin architecture with many plugins available, something that we will take advantage of soon. SBT is the preferred way of building software in Scala. If you want to use SBT for this tutorial then follow the following instructions, if not you can skip this section and the next.
|
||||
|
||||
To install SBT and create a project for this tutorial it is easiest to follow the instructions on `this page <http://code.google.com/p/simple-build-tool/wiki/Setup>`_. The preferred SBT version to install is ``0.7.6``.
|
||||
|
||||
If you have created an SBT project then step into the newly created SBT project, create a source file ``Pi.scala`` for the tutorial sample and put it in the ``src/main/scala`` directory.
|
||||
|
||||
So far we only have a standard Scala project but now we need to make our project an Akka project. You could add the dependencies manually to the build script, but the easiest way is to use Akka's SBT Plugin, covered in the next section.
|
||||
|
||||
Creating an Akka SBT project
|
||||
----------------------------
|
||||
|
||||
If you have not already done so, now is the time to create an SBT project for our tutorial. You do that by stepping into the directory you want to create your project in and invoking the ``sbt`` command answering the questions for setting up your project (just pressing ENTER will choose the default in square brackets)::
|
||||
|
||||
$ sbt
|
||||
Project does not exist, create new project? (y/N/s) y
|
||||
Name: Tutorial 1
|
||||
Organization: Hakkers Inc
|
||||
Version [1.0]:
|
||||
Scala version [2.9.0]:
|
||||
sbt version [0.7.6]:
|
||||
|
||||
Now we have the basis for an SBT project. Akka has an SBT Plugin that makes it very easy to use Akka is an SBT-based project so let's use that.
|
||||
|
||||
To use the plugin, first add a plugin definition to your SBT project by creating a ``Plugins.scala`` file in the ``project/plugins`` directory containing::
|
||||
|
||||
import sbt._
|
||||
|
||||
class Plugins(info: ProjectInfo) extends PluginDefinition(info) {
|
||||
val akkaRepo = "Akka Repo" at "http://akka.io/repository"
|
||||
val akkaPlugin = "se.scalablesolutions.akka" % "akka-sbt-plugin" % "1.1"
|
||||
}
|
||||
|
||||
Now we need to create a project definition using our Akka SBT plugin. We do that by creating a ``Project.scala`` file in the ``build`` directory containing::
|
||||
|
||||
import sbt._
|
||||
|
||||
class TutorialOneProject(info: ProjectInfo) extends DefaultProject(info) with AkkaProject {
|
||||
val akkaRepo = "Akka Repo" at "http://akka.io/repository"
|
||||
}
|
||||
|
||||
The magic is in mixing in the ``AkkaProject`` trait.
|
||||
|
||||
Not needed in this tutorial, but if you would like to use additional Akka modules than ``akka-actor`` then you can add these as "module configurations" in the project file. Here is an example adding ``akka-remote`` and ``akka-stm``::
|
||||
|
||||
class AkkaSampleProject(info: ProjectInfo) extends DefaultProject(info) with AkkaProject {
|
||||
val akkaSTM = akkaModule("stm")
|
||||
val akkaRemote = akkaModule("remote")
|
||||
}
|
||||
|
||||
So, now we are all set. Just one final thing to do; make SBT download all dependencies it needs. That is done by invoking::
|
||||
|
||||
> update
|
||||
|
||||
SBT itself needs a whole bunch of dependencies but our project will only need one; ``akka-actor-1.1.jar``. SBT downloads that as well.
|
||||
|
||||
Start writing the code
|
||||
----------------------
|
||||
|
||||
Now it's about time that we start hacking.
|
||||
|
||||
We start by creating a ``Pi.scala`` file and add these import statements at the top of the file::
|
||||
|
||||
package akka.tutorial.scala.first
|
||||
|
||||
import akka.actor.{Actor, ActorRef, PoisonPill}
|
||||
import Actor._
|
||||
import akka.routing.{Routing, CyclicIterator}
|
||||
import Routing._
|
||||
import akka.dispatch.Dispatchers
|
||||
|
||||
import java.util.concurrent.CountDownLatch
|
||||
|
||||
If you are using SBT in this tutorial then create the file in the ``src/main/scala`` directory.
|
||||
|
||||
If you are using the command line tools then just create the file wherever you want. I will create it in a directory called ``tutorial`` at the root of the Akka distribution, e.g. in ``$AKKA_HOME/tutorial/Pi.scala``.
|
||||
|
||||
Creating the messages
|
||||
---------------------
|
||||
|
||||
The design we are aiming for is to have one ``Master`` actor initiating the computation, creating a set of ``Worker`` actors. Then it splits up the work into discrete chunks, sends out these work chunks to the different workers in a round-robin fashion. The master then waits until all the workers have completed all the work and sent back the result for aggregation. When computation is completed the master prints out the result, shuts down all workers an then himself.
|
||||
|
||||
With this in mind, let's now create the messages that we want to have flowing in the system. We need three different messages:
|
||||
|
||||
- ``Calculate`` -- sent to the ``Master`` actor to start the calculation
|
||||
- ``Work`` -- sent from the ``Master`` actor to the ``Worker`` actors containing the work assignment
|
||||
- ``Result`` -- sent from the ``Worker`` actors to the ``Master`` actor containing the result from the worker's calculation
|
||||
|
||||
Messages sent to actors should always be immutable to avoid sharing mutable state. In scala we have 'case classes' which make excellent messages. So let's start by creating three messages as case classes. We also create a common base trait for our messages (that we define as being ``sealed`` in order to prevent creating messages outside our control)::
|
||||
|
||||
sealed trait PiMessage
|
||||
|
||||
case object Calculate extends PiMessage
|
||||
|
||||
case class Work(start: Int, nrOfElements: Int) extends PiMessage
|
||||
|
||||
case class Result(value: Double) extends PiMessage
|
||||
|
||||
Creating the worker
|
||||
-------------------
|
||||
|
||||
Now we can create the worker actor. This is done by mixing in the ``Actor`` trait and defining the ``receive`` method. The ``receive`` method defines our message handler. We expect it to be able to handle the ``Work`` message so we need to add a handler for this message::
|
||||
|
||||
class Worker extends Actor {
|
||||
def receive = {
|
||||
case Work(start, nrOfElements) =>
|
||||
self reply Result(calculatePiFor(start, nrOfElements)) // perform the work
|
||||
}
|
||||
}
|
||||
|
||||
As you can see we have now created an ``Actor`` with a ``receive`` method as a handler for the ``Work`` message. In this handler we invoke the ``calculatePiFor(..)`` method, wrap the result in a ``Result`` message and send it back to the original sender using ``self.reply``. In Akka the sender reference is implicitly passed along with the message so that the receiver can always reply or store away the sender reference use.
|
||||
|
||||
The only thing missing in our ``Worker`` actor is the implementation on the ``calculatePiFor(..)`` method. There are many ways we can implement this algorithm in Scala, in this introductory tutorial we have chosen an imperative style using a for comprehension and an accumulator::
|
||||
|
||||
def calculatePiFor(start: Int, nrOfElements: Int): Double = {
|
||||
var acc = 0.0
|
||||
for (i <- start until (start + nrOfElements))
|
||||
acc += 4 * math.pow(-1, i) / (2 * i + 1)
|
||||
acc
|
||||
}
|
||||
|
||||
Creating the master
|
||||
-------------------
|
||||
|
||||
The master actor is a little bit more involved. In its constructor we need to create the workers (the ``Worker`` actors) and start them. We will also wrap them in a load-balancing router to make it easier to spread out the work evenly between the workers. Let's do that first::
|
||||
|
||||
// create the workers
|
||||
val workers = Vector.fill(nrOfWorkers)(actorOf[Worker].start())
|
||||
|
||||
// wrap them with a load-balancing router
|
||||
val router = Routing.loadBalancerActor(CyclicIterator(workers)).start()
|
||||
|
||||
As you can see we are using the ``actorOf`` factory method to create actors, this method returns as an ``ActorRef`` which is a reference to our newly created actor. This method is available in the ``Actor`` object but is usually imported::
|
||||
|
||||
import akka.actor.Actor._
|
||||
|
||||
Now we have a router that is representing all our workers in a single abstraction. If you paid attention to the code above to see that we were using the ``nrOfWorkers`` variable. This variable and others we have to pass to the ``Master`` actor in its constructor. So now let's create the master actor. We had to pass in three integer variables needed:
|
||||
|
||||
- ``nrOfWorkers`` -- defining how many workers we should start up
|
||||
- ``nrOfMessages`` -- defining how many number chunks should send out to the workers
|
||||
- ``nrOfElements`` -- defining how big the number chunks sent to each worker should be
|
||||
|
||||
Let's now write the master actor::
|
||||
|
||||
class Master(nrOfWorkers: Int, nrOfMessages: Int, nrOfElements: Int, latch: CountDownLatch)
|
||||
extends Actor {
|
||||
|
||||
var pi: Double = _
|
||||
var nrOfResults: Int = _
|
||||
var start: Long = _
|
||||
|
||||
// create the workers
|
||||
val workers = Vector.fill(nrOfWorkers)(actorOf[Worker].start())
|
||||
|
||||
// wrap them with a load-balancing router
|
||||
val router = Routing.loadBalancerActor(CyclicIterator(workers)).start()
|
||||
|
||||
def receive = { ... }
|
||||
|
||||
override def preStart {
|
||||
start = now
|
||||
}
|
||||
|
||||
override def postStop {
|
||||
// tell the world that the calculation is complete
|
||||
println("\n\tPi estimate: \t\t%s\n\tCalculation time: \t%s millis".format(pi, (now - start)))
|
||||
latch.countDown()
|
||||
}
|
||||
}
|
||||
|
||||
Couple of things are worth explaining further.
|
||||
|
||||
First, we are passing in a ``java.util.concurrent.CountDownLatch`` to the ``Master`` actor. This latch is only used for doing plumbing (in this specific tutorial), to have a simple way of letting the outside world knowing when the master can deliver the result and shut down. In more idiomatic Akka code, as we will see in part two of this tutorial series, we would not use a latch but other abstractions and functions like ``Channel``, ``Future`` and ``!!!`` to achive the same thing in a non-blocking way. But for simplicity let's stick to a ``CountDownLatch`` for now.
|
||||
|
||||
Second, we are adding a couple of life-cycle callback methods; ``preStart`` and ``postStop``. In the ``preStart`` callback we are recording the time when the actor is started and in the ``postStop`` callback we are printing out the result (the approximation of Pi) and the time it took to calculate it. In this call we also invoke ``latch.countDown`` to tell the outside world that we are done.
|
||||
|
||||
But we are not done yet. We are missing the message handler for the ``Master`` actor. This message handler needs to be able to react to two different messages:
|
||||
|
||||
- ``Calculate`` -- which should start the calculation
|
||||
- ``Result`` -- which should aggregate the different results
|
||||
|
||||
The ``Calculate`` handler is sending out work to all the ``Worker`` actors and after doing that it also sends a ``Broadcast(PoisonPill)`` message to the router, which will send out the ``PoisonPill`` message to all the actors it is representing (in our case all the ``Worker`` actors). The ``PoisonPill`` is a special kind of message that tells the receiver to shut himself down using the normal shutdown; ``self.stop()``. Then we also send a ``PoisonPill`` to the router itself (since it's also an actor that we want to shut down).
|
||||
|
||||
The ``Result`` handler is simpler, here we just get the value from the ``Result`` message and aggregate it to our ``pi`` member variable. We also keep track of how many results we have received back and if it matches the number of tasks sent out the ``Master`` actor considers itself done and shuts himself down.
|
||||
|
||||
Now, let's capture this in code::
|
||||
|
||||
// message handler
|
||||
def receive = {
|
||||
case Calculate =>
|
||||
// schedule work
|
||||
for (i <- 0 until nrOfMessages) router ! Work(i * nrOfElements, nrOfElements)
|
||||
|
||||
// send a PoisonPill to all workers telling them to shut down themselves
|
||||
router ! Broadcast(PoisonPill)
|
||||
|
||||
// send a PoisonPill to the router, telling him to shut himself down
|
||||
router ! PoisonPill
|
||||
|
||||
case Result(value) =>
|
||||
// handle result from the worker
|
||||
pi += value
|
||||
nrOfResults += 1
|
||||
if (nrOfResults == nrOfMessages) self.stop()
|
||||
}
|
||||
|
||||
Bootstrap the calculation
|
||||
-------------------------
|
||||
|
||||
Now the only thing that is left to implement is the runner that should bootstrap and run his calculation for us. We do that by creating an object that we call ``Pi``, here we can extend the ``App`` trait in Scala which means that we will be able to run this as an application directly from the command line. The ``Pi`` object is a perfect container module for our actors and messages, so let's put them all there. We also create a method ``calculate`` in which we start up the ``Master`` actor and waits for it to finish::
|
||||
|
||||
object Pi extends App {
|
||||
|
||||
calculate(nrOfWorkers = 4, nrOfElements = 10000, nrOfMessages = 10000)
|
||||
|
||||
... // actors and messages
|
||||
|
||||
def calculate(nrOfWorkers: Int, nrOfElements: Int, nrOfMessages: Int) {
|
||||
|
||||
// this latch is only plumbing to know when the calculation is completed
|
||||
val latch = new CountDownLatch(1)
|
||||
|
||||
// create the master
|
||||
val master = actorOf(new Master(nrOfWorkers, nrOfMessages, nrOfElements, latch)).start()
|
||||
|
||||
// start the calculation
|
||||
master ! Calculate
|
||||
|
||||
// wait for master to shut down
|
||||
latch.await()
|
||||
}
|
||||
}
|
||||
|
||||
That's it. Now we are done.
|
||||
|
||||
But before we package it up and run it, let's take a look at the full code now, with package declaration, imports and all::
|
||||
|
||||
package akka.tutorial.scala.first
|
||||
|
||||
import akka.actor.{Actor, PoisonPill}
|
||||
import Actor._
|
||||
import akka.routing.{Routing, CyclicIterator}
|
||||
import Routing._
|
||||
|
||||
import System.{currentTimeMillis => now}
|
||||
import java.util.concurrent.CountDownLatch
|
||||
|
||||
object Pi extends App {
|
||||
|
||||
calculate(nrOfWorkers = 4, nrOfElements = 10000, nrOfMessages = 10000)
|
||||
|
||||
// ====================
|
||||
// ===== Messages =====
|
||||
// ====================
|
||||
sealed trait PiMessage
|
||||
case object Calculate extends PiMessage
|
||||
case class Work(start: Int, nrOfElements: Int) extends PiMessage
|
||||
case class Result(value: Double) extends PiMessage
|
||||
|
||||
// ==================
|
||||
// ===== Worker =====
|
||||
// ==================
|
||||
class Worker extends Actor {
|
||||
|
||||
// define the work
|
||||
def calculatePiFor(start: Int, nrOfElements: Int): Double = {
|
||||
var acc = 0.0
|
||||
for (i <- start until (start + nrOfElements))
|
||||
acc += 4 * math.pow(-1, i) / (2 * i + 1)
|
||||
acc
|
||||
}
|
||||
|
||||
def receive = {
|
||||
case Work(start, nrOfElements) =>
|
||||
self reply Result(calculatePiFor(start, nrOfElements)) // perform the work
|
||||
}
|
||||
}
|
||||
|
||||
// ==================
|
||||
// ===== Master =====
|
||||
// ==================
|
||||
class Master(nrOfWorkers: Int, nrOfMessages: Int, nrOfElements: Int, latch: CountDownLatch)
|
||||
extends Actor {
|
||||
|
||||
var pi: Double = _
|
||||
var nrOfResults: Int = _
|
||||
var start: Long = _
|
||||
|
||||
// create the workers
|
||||
val workers = Vector.fill(nrOfWorkers)(actorOf[Worker].start())
|
||||
|
||||
// wrap them with a load-balancing router
|
||||
val router = Routing.loadBalancerActor(CyclicIterator(workers)).start()
|
||||
|
||||
// message handler
|
||||
def receive = {
|
||||
case Calculate =>
|
||||
// schedule work
|
||||
//for (arg <- 0 until nrOfMessages) router ! Work(arg, nrOfElements)
|
||||
for (i <- 0 until nrOfMessages) router ! Work(i * nrOfElements, nrOfElements)
|
||||
|
||||
// send a PoisonPill to all workers telling them to shut down themselves
|
||||
router ! Broadcast(PoisonPill)
|
||||
|
||||
// send a PoisonPill to the router, telling him to shut himself down
|
||||
router ! PoisonPill
|
||||
|
||||
case Result(value) =>
|
||||
// handle result from the worker
|
||||
pi += value
|
||||
nrOfResults += 1
|
||||
if (nrOfResults == nrOfMessages) self.stop()
|
||||
}
|
||||
|
||||
override def preStart {
|
||||
start = now
|
||||
}
|
||||
|
||||
override def postStop {
|
||||
// tell the world that the calculation is complete
|
||||
println("\n\tPi estimate: \t\t%s\n\tCalculation time: \t%s millis".format(pi, (now - start)))
|
||||
latch.countDown()
|
||||
}
|
||||
}
|
||||
|
||||
// ==================
|
||||
// ===== Run it =====
|
||||
// ==================
|
||||
def calculate(nrOfWorkers: Int, nrOfElements: Int, nrOfMessages: Int) {
|
||||
|
||||
// this latch is only plumbing to know when the calculation is completed
|
||||
val latch = new CountDownLatch(1)
|
||||
|
||||
// create the master
|
||||
val master = actorOf(new Master(nrOfWorkers, nrOfMessages, nrOfElements, latch)).start()
|
||||
|
||||
// start the calculation
|
||||
master ! Calculate
|
||||
|
||||
// wait for master to shut down
|
||||
latch.await()
|
||||
}
|
||||
}
|
||||
|
||||
Run it as a command line application
|
||||
------------------------------------
|
||||
|
||||
If you have not typed (or copied) in the code for the tutorial in the ``$AKKA_HOME/tutorial/Pi.scala`` then now is the time. When that is done open up a shell and step in to the Akka distribution (``cd $AKKA_HOME``).
|
||||
|
||||
First we need to compile the source file. That is done with Scala's compiler ``scalac``. Our application depends on the ``akka-actor-1.1.jar`` JAR file, so let's add that to the compiler classpath when we compile the source::
|
||||
|
||||
$ scalac -cp dist/akka-actor-1.1.jar tutorial/Pi.scala
|
||||
|
||||
When we have compiled the source file we are ready to run the application. This is done with ``java`` but yet again we need to add the ``akka-actor-1.1.jar`` JAR file to the classpath, this time we also need to add the Scala runtime library ``scala-library.jar`` and the classes we compiled ourselves to the classpath::
|
||||
|
||||
$ java -cp dist/akka-actor-1.1.jar:scala-library.jar:tutorial akka.tutorial.scala.first.Pi
|
||||
AKKA_HOME is defined as [/Users/jboner/src/akka-stuff/akka-core], loading config from \
|
||||
[/Users/jboner/src/akka-stuff/akka-core/config/akka.conf].
|
||||
|
||||
Pi estimate: 3.1435501812459323
|
||||
Calculation time: 858 millis
|
||||
|
||||
Yippee! It is working.
|
||||
|
||||
Run it inside SBT
|
||||
-----------------
|
||||
|
||||
If you have based the tutorial on SBT then you can run the application directly inside SBT. First you need to compile the project::
|
||||
|
||||
$ sbt
|
||||
> update
|
||||
...
|
||||
> compile
|
||||
...
|
||||
|
||||
When this in done we can run our application directly inside SBT::
|
||||
|
||||
> run
|
||||
...
|
||||
Pi estimate: 3.1435501812459323
|
||||
Calculation time: 942 millis
|
||||
|
||||
Yippee! It is working.
|
||||
|
||||
Conclusion
|
||||
----------
|
||||
|
||||
Now we have learned how to create our first Akka project utilizing Akka's actors to speed up a computation intensive problem by scaling out on multi-core processors (also known as scaling up). We have also learned how to compile and run an Akka project utilizing either the tools on the command line or the SBT build system.
|
||||
|
||||
Now we are ready to take on more advanced problems. In the next tutorial we will build upon this one, refactor it into more idiomatic Akka and Scala code and introduce a few new concepts and abstractions. Whenever you feel ready, join me in the `Getting Started Tutorial: Second Chapter <TODO>`_.
|
||||
|
||||
Happy hakking.
|
||||
BIN
akka-docs/manual/more.png
Normal file
BIN
akka-docs/manual/more.png
Normal file
Binary file not shown.
|
After Width: | Height: | Size: 1.5 KiB |
BIN
akka-docs/manual/pi-formula.png
Normal file
BIN
akka-docs/manual/pi-formula.png
Normal file
Binary file not shown.
|
After Width: | Height: | Size: 1.5 KiB |
31
akka-docs/pending/Feature Stability Matrix.rst
Normal file
31
akka-docs/pending/Feature Stability Matrix.rst
Normal file
|
|
@ -0,0 +1,31 @@
|
|||
Feature Stability Matrix
|
||||
========================
|
||||
|
||||
Akka is comprised of a number if modules, with different levels of maturity and in different parts of their lifecycle, the matrix below gives you get current stability level of the modules.
|
||||
|
||||
Explanation of the different levels of stability
|
||||
------------------------------------------------
|
||||
|
||||
* **Solid** - Proven solid in heavy production usage
|
||||
* **Stable** - Ready for use in production environment
|
||||
* **In progress** - Not enough feedback/use to claim it's ready for production use
|
||||
|
||||
||~ Feature ||~ Solid ||~ Stable ||~ In progress ||
|
||||
||= ====`Actors (Scala) <actors-scala>`_ ==== ||= Solid ||= ||= ||
|
||||
||= ====`Actors (Java) <actors-java>`_ ==== ||= Solid ||= ||= ||
|
||||
||= ====` Typed Actors (Scala) <typed-actors-scala>`_ ==== ||= Solid ||= ||= ||
|
||||
||= ====` Typed Actors (Java) <typed-actors-java>`_ ==== ||= Solid ||= ||= ||
|
||||
||= ====`STM (Scala) <stm-scala>`_ ==== ||= Solid ||= ||= ||
|
||||
||= ====`STM (Java) <stm-java>`_ ==== ||= Solid ||= ||= ||
|
||||
||= ====`Transactors (Scala) <transactors-scala>`_ ==== ||= Solid ||= ||= ||
|
||||
||= ====`Transactors (Java) <transactors-java>`_ ==== ||= Solid ||= ||= ||
|
||||
||= ====`Remote Actors (Scala) <remote-actors-scala>`_ ==== ||= Solid ||= ||= ||
|
||||
||= ====`Remote Actors (Java) <remote-actors-java>`_ ==== ||= Solid ||= ||= ||
|
||||
||= ====`Camel <camel>`_ ==== ||= Solid ||= ||= ||
|
||||
||= ====`AMQP <amqp>`_ ==== ||= Solid ||= ||= ||
|
||||
||= ====`HTTP <http>`_ ==== ||= Solid ||= ||= ||
|
||||
||= ====`Integration Guice <guice-integration>`_ ==== ||= ||= Stable ||= ||
|
||||
||= ====`Integration Spring <spring-integration>`_ ==== ||= ||= Stable ||= ||
|
||||
||= ====`JTA <jta>`_ ==== ||= ||= Stable ||= ||
|
||||
||= ====`Scheduler <scheduler>`_ ==== ||= Solid ||= ||= ||
|
||||
||= ====`Redis Pub Sub <pubsub>`_ ==== ||= ||= ||= In progress ||
|
||||
60
akka-docs/pending/Home.rst
Normal file
60
akka-docs/pending/Home.rst
Normal file
|
|
@ -0,0 +1,60 @@
|
|||
Akka
|
||||
====
|
||||
|
||||
**Simpler Scalability, Fault-Tolerance, Concurrency & Remoting through Actors**
|
||||
|
||||
We believe that writing correct concurrent, fault-tolerant and scalable applications is too hard. Most of the time it's because we are using the wrong tools and the wrong level of abstraction. Akka is here to change that. Using the Actor Model together with Software Transactional Memory we raise the abstraction level and provide a better platform to build correct concurrent and scalable applications. For fault-tolerance we adopt the "Let it crash" / "Embrace failure" model which have been used with great success in the telecom industry to build applications that self-heals, systems that never stop. Actors also provides the abstraction for transparent distribution and the basis for truly scalable and fault-tolerant applications. Akka is Open Source and available under the Apache 2 License.
|
||||
|
||||
Akka is split up into two different parts:
|
||||
* Akka - Reflects all the sections under 'Scala API' and 'Java API' in the navigation bar.
|
||||
* Akka Modules - Reflects all the sections under 'Add-on modules' in the navigation bar.
|
||||
|
||||
Download from `<http://akka.io/downloads/>`_
|
||||
|
||||
News: Akka 1.0 final is released
|
||||
--------------------------------
|
||||
|
||||
1.0 documentation
|
||||
-----------------
|
||||
|
||||
This documentation covers the latest release ready code in 'master' branch in the repository.
|
||||
If you want the documentation for the 1.0 release you can find it `here <http://akka.io/docs/akka-1.0/space.menu.html>`_.
|
||||
|
||||
You can watch the recording of the `Akka talk at JFokus in Feb 2011 <http://79.136.112.58/ability/show/xaimkwdli/a2_20110216_1110/mainshow.asp?STREAMID=1>`_.
|
||||
|
||||
`<media type="custom" key="8924178">`_
|
||||
|
||||
**Akka implements a unique hybrid of:**
|
||||
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
|
||||
|
||||
* `Actors <untyped-actors-java>`_, which gives you:
|
||||
* Simple and high-level abstractions for concurrency and parallelism.
|
||||
* Asynchronous, non-blocking and highly performant event-driven programming model.
|
||||
* Very lightweight event-driven processes (create ~6.5 million actors on 4 G RAM).
|
||||
* `Failure management <fault-tolerance-java>`_ through supervisor hierarchies with `let-it-crash <http://letitcrash.com>`_ semantics. Excellent for writing highly fault-tolerant systems that never stop, systems that self-heal.
|
||||
* `Software Transactional Memory <stm-java>`_ (STM). (Distributed transactions coming soon).
|
||||
* `Transactors <transactors-java>`_: combine actors and STM into transactional actors. Allows you to compose atomic message flows with automatic retry and rollback.
|
||||
* `Remote actors <remote-actors-java>`_: highly performant distributed actors with remote supervision and error management.
|
||||
* Java and Scala API.
|
||||
|
||||
**Akka also has a set of add-on modules:**
|
||||
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
|
||||
|
||||
* `Camel <camel>`_: Expose actors as Apache Camel endpoints.
|
||||
* `Spring <spring-integration>`_: Wire up typed actors in the Spring config using Akka's namespace.
|
||||
* `REST <rest>`_ (JAX-RS): Expose actors as REST services.
|
||||
* `OSGi <osgi>`_: Akka and all its dependency is OSGi enabled.
|
||||
* `Mist <http#Mist%20-%20Lightweight%20Asynchronous%20HTTP>`_: Expose actors as asynchronous HTTP services.
|
||||
* `Security <security>`_: Basic, Digest and Kerberos based security.
|
||||
* `Microkernel <microkernel>`_: Run Akka as a stand-alone self-hosted kernel.
|
||||
* `FSM <fsm-scala>`_: Finite State Machine support.
|
||||
* `JTA <jta>`_: Let the STM interoperate with other transactional resources.
|
||||
* `Pub/Sub <pubsub>`_: Publish-Subscribe across remote nodes.
|
||||
|
||||
**Akka can be used in two different ways:**
|
||||
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
|
||||
|
||||
* As a library: used by a web app, to be put into ‘WEB-INF/lib’ or as a regular JAR on your classpath.
|
||||
* As a microkernel: stand-alone kernel, embedding a servlet container and all the other modules.
|
||||
|
||||
See the `Use-case and Deployment Scenarios <deployment-scenarios>`_ for details.
|
||||
32
akka-docs/pending/Migration-1.0-1.1.rst
Normal file
32
akka-docs/pending/Migration-1.0-1.1.rst
Normal file
|
|
@ -0,0 +1,32 @@
|
|||
Moved to Scala 2.9.x
|
||||
^^^^^^^^^^^^^^^^^^^^
|
||||
|
||||
Akka HTTP
|
||||
=========
|
||||
|
||||
# akka.servlet.Initializer has been moved to akka-kernel to be able to have akka-http not depend on akka-remote, if you don't want to use the class for kernel, just create your own version of akka.servlet.Initializer, it's just a couple of lines of code and there is instructions here: `Akka Http Docs <http>`_
|
||||
# akka.http.ListWriter has been removed in full, if you use it and want to keep using it, here's the code: `ListWriter <https://github.com/jboner/akka/blob/v1.0/akka-http/src/main/scala/akka/http/ListWriter.scala>`_
|
||||
# Jersey-server is now a "provided" dependency for Akka-http, so you'll need to add the dependency to your project, it's built against Jersey 1.3
|
||||
|
||||
Akka Actor
|
||||
==========
|
||||
|
||||
# is now dependency free, with the exception of the dependency on the scala-library.jar
|
||||
# does not bundle any logging anymore, but you can subscribe to events within Akka by registering an event handler on akka.aevent.EventHandler or by specifying the FQN of an Actor in the akka.conf under akka.event-handlers; there is an akka-slf4j module which still provides the Logging trait and a default SLF4J logger adapter.
|
||||
# If you used HawtDispatcher and want to continue using it, you need to include akka-dispatcher-extras.jar from Akka Modules, in your akka.conf you need to specify: "akka.dispatch.HawtDispatcherConfigurator" instead of "HawtDispatcher"
|
||||
# FSM: the onTransition method changed from Function1 to PartialFunction; there is an implicit conversion for the precise types in place, but it may be necessary to add an underscore if you are passing an eta-expansion (using a method as function value).
|
||||
|
||||
Akka Typed Actor
|
||||
================
|
||||
|
||||
All methods starting with 'get*' are deprecated and will be removed in post 1.1 release.
|
||||
|
||||
Akka Remote
|
||||
===========
|
||||
|
||||
# UnparsebleException => CannotInstantiateRemoteExceptionDueToRemoteProtocolParsingErrorException(exception, classname, message)
|
||||
|
||||
Akka Testkit
|
||||
============
|
||||
|
||||
The TestKit moved into the akka-testkit subproject and correspondingly into the akka.testkit package.
|
||||
6
akka-docs/pending/Recipes.rst
Normal file
6
akka-docs/pending/Recipes.rst
Normal file
|
|
@ -0,0 +1,6 @@
|
|||
Here is a list of recipies for all things Akka
|
||||
==============================================
|
||||
|
||||
* PostStart => `Link to Klangism <http://klangism.tumblr.com/post/3667529448/implementing-poststart-in-akka>`_
|
||||
* `Consumer actors best practices <http://krasserm.blogspot.com/2011/02/akka-consumer-actors-new-features-and.html>`_
|
||||
* `Producer actors best practices <http://krasserm.blogspot.com/2011/02/akka-producer-actor-new-features-and.html>`_
|
||||
81
akka-docs/pending/actor-registry-java.rst
Normal file
81
akka-docs/pending/actor-registry-java.rst
Normal file
|
|
@ -0,0 +1,81 @@
|
|||
ActorRegistry (Java)
|
||||
====================
|
||||
|
||||
Module stability: **SOLID**
|
||||
|
||||
ActorRegistry: Finding Actors
|
||||
-----------------------------
|
||||
|
||||
Actors can be looked up using the 'akka.actor.Actors.registry()' object. Through this registry you can look up actors by:
|
||||
|
||||
* uuid com.eaio.uuid.UUID – this uses the ‘uuid’ field in the Actor class, returns the actor reference for the actor with specified uuid, if one exists, otherwise None
|
||||
* id string – this uses the ‘id’ field in the Actor class, which can be set by the user (default is the class name), returns all actor references to actors with specified id
|
||||
* parameterized type - returns a 'ActorRef[]' with all actors that are a subtype of this specific type
|
||||
* specific actor class - returns a 'ActorRef[]' with all actors of this exact class
|
||||
|
||||
Actors are automatically registered in the ActorRegistry when they are started and removed when they are stopped. But you can explicitly register and unregister ActorRef's if you need to using the 'register' and 'unregister' methods.
|
||||
|
||||
Here is a summary of the API for finding actors:
|
||||
|
||||
.. code-block:: java
|
||||
|
||||
import static akka.actor.Actors.*;
|
||||
Option<ActorRef> actor = registry().actorFor(uuid);
|
||||
ActorRef[] actors = registry().actors();
|
||||
ActorRef[] otherActors = registry().actorsFor(id);
|
||||
ActorRef[] moreActors = registry().actorsFor(clazz);
|
||||
|
||||
You can shut down all Actors in the system by invoking:
|
||||
|
||||
.. code-block:: java
|
||||
|
||||
registry().shutdownAll();
|
||||
|
||||
If you want to know when a new Actor is added or to or removed from the registry, you can use the subscription API. You can register an Actor that should be notified when an event happens in the ActorRegistry:
|
||||
|
||||
.. code-block:: java
|
||||
|
||||
void addListener(ActorRef listener);
|
||||
void removeListener(ActorRef listener);
|
||||
|
||||
The messages sent to this Actor are:
|
||||
|
||||
.. code-block:: java
|
||||
|
||||
public class ActorRegistered {
|
||||
ActorRef actor();
|
||||
}
|
||||
public class ActorUnregistered {
|
||||
ActorRef actor();
|
||||
}
|
||||
|
||||
So your listener Actor needs to be able to handle these two messages. Example:
|
||||
|
||||
.. code-block:: java
|
||||
|
||||
import akka.actor.ActorRegistered;
|
||||
import akka.actor.ActorUnregistered;
|
||||
import akka.actor.UntypedActor;
|
||||
import akka.event.EventHandler;
|
||||
|
||||
public class RegistryListener extends UntypedActor {
|
||||
public void onReceive(Object message) throws Exception {
|
||||
if (message instanceof ActorRegistered) {
|
||||
ActorRegistered event = (ActorRegistered) message;
|
||||
EventHandler.info(this, String.format("Actor registered: %s - %s",
|
||||
event.actor().actorClassName(), event.actor().getUuid()));
|
||||
event.actor().actorClassName(), event.actor().getUuid()));
|
||||
} else if (message instanceof ActorUnregistered) {
|
||||
// ...
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
The above actor can be added as listener of registry events:
|
||||
|
||||
.. code-block:: java
|
||||
|
||||
import static akka.actor.Actors.*;
|
||||
|
||||
ActorRef listener = actorOf(RegistryListener.class).start();
|
||||
registry().addListener(listener);
|
||||
107
akka-docs/pending/actor-registry-scala.rst
Normal file
107
akka-docs/pending/actor-registry-scala.rst
Normal file
|
|
@ -0,0 +1,107 @@
|
|||
ActorRegistry (Scala)
|
||||
=====================
|
||||
|
||||
Module stability: **SOLID**
|
||||
|
||||
ActorRegistry: Finding Actors
|
||||
-----------------------------
|
||||
|
||||
Actors can be looked up by using the **akka.actor.Actor.registry: akka.actor.ActorRegistry**. Lookups for actors through this registry can be done by:
|
||||
|
||||
* uuid akka.actor.Uuid – this uses the ‘**uuid**’ field in the Actor class, returns the actor reference for the actor with specified uuid, if one exists, otherwise None
|
||||
* id string – this uses the ‘**id**’ field in the Actor class, which can be set by the user (default is the class name), returns all actor references to actors with specified id
|
||||
* specific actor class - returns an '**Array[Actor]**' with all actors of this exact class
|
||||
* parameterized type - returns an '**Array[Actor]**' with all actors that are a subtype of this specific type
|
||||
|
||||
Actors are automatically registered in the ActorRegistry when they are started, removed or stopped. You can explicitly register and unregister ActorRef's by using the '**register**' and '**unregister**' methods. The ActorRegistry contains many convenience methods for looking up typed actors.
|
||||
|
||||
Here is a summary of the API for finding actors:
|
||||
|
||||
.. code-block:: scala
|
||||
|
||||
def actors: Array[ActorRef]
|
||||
def actorFor(uuid: akka.actor.Uuid): Option[ActorRef]
|
||||
def actorsFor(id : String): Array[ActorRef]
|
||||
def actorsFor[T <: Actor](implicit manifest: Manifest[T]): Array[ActorRef]
|
||||
def actorsFor[T <: Actor](clazz: Class[T]): Array[ActorRef]
|
||||
|
||||
// finding typed actors
|
||||
def typedActors: Array[AnyRef]
|
||||
def typedActorFor(uuid: akka.actor.Uuid): Option[AnyRef]
|
||||
def typedActorsFor(id: String): Array[AnyRef]
|
||||
def typedActorsFor[T <: AnyRef](implicit manifest: Manifest[T]): Array[AnyRef]
|
||||
def typedActorsFor[T <: AnyRef](clazz: Class[T]): Array[AnyRef]
|
||||
|
||||
Examples of how to use them:
|
||||
|
||||
.. code-block:: scala
|
||||
|
||||
val actor = Actor.registry.actorFor(uuid)
|
||||
val pojo = Actor.registry.typedActorFor(uuid)
|
||||
|
||||
.. code-block:: scala
|
||||
|
||||
val actors = Actor.registry.actorsFor(classOf[...])
|
||||
val pojos = Actor.registry.typedActorsFor(classOf[...])
|
||||
|
||||
.. code-block:: scala
|
||||
|
||||
val actors = Actor.registry.actorsFor(id)
|
||||
val pojos = Actor.registry.typedActorsFor(id)
|
||||
|
||||
.. code-block:: scala
|
||||
|
||||
val actors = Actor.registry.actorsFor[MyActorType]
|
||||
val pojos = Actor.registry.typedActorsFor[MyTypedActorImpl]
|
||||
|
||||
The ActorRegistry also has a 'shutdownAll' and 'foreach' methods:
|
||||
|
||||
.. code-block:: scala
|
||||
|
||||
def foreach(f: (ActorRef) => Unit)
|
||||
def foreachTypedActor(f: (AnyRef) => Unit)
|
||||
def shutdownAll()
|
||||
|
||||
If you need to know when a new Actor is added or removed from the registry, you can use the subscription API. You can register an Actor that should be notified when an event happens in the ActorRegistry:
|
||||
|
||||
.. code-block:: scala
|
||||
|
||||
def addListener(listener: ActorRef)
|
||||
def removeListener(listener: ActorRef)
|
||||
|
||||
The messages sent to this Actor are:
|
||||
|
||||
.. code-block:: scala
|
||||
|
||||
case class ActorRegistered(actor: ActorRef)
|
||||
case class ActorUnregistered(actor: ActorRef)
|
||||
|
||||
So your listener Actor needs to be able to handle these two messages. Example:
|
||||
|
||||
.. code-block:: scala
|
||||
|
||||
import akka.actor.Actor
|
||||
import akka.actor.ActorRegistered;
|
||||
import akka.actor.ActorUnregistered;
|
||||
import akka.actor.UntypedActor;
|
||||
import akka.event.EventHandler;
|
||||
|
||||
class RegistryListener extends Actor {
|
||||
def receive = {
|
||||
case event: ActorRegistered =>
|
||||
EventHandler.info(this, "Actor registered: %s - %s".format(
|
||||
event.actor.actorClassName, event.actor.uuid))
|
||||
case event: ActorUnregistered =>
|
||||
// ...
|
||||
}
|
||||
}
|
||||
|
||||
The above actor can be added as listener of registry events:
|
||||
|
||||
.. code-block:: scala
|
||||
|
||||
import akka.actor._
|
||||
import akka.actor.Actor._
|
||||
|
||||
val listener = actorOf[RegistryListener].start()
|
||||
registry.addListener(listener)
|
||||
573
akka-docs/pending/actors-scala.rst
Normal file
573
akka-docs/pending/actors-scala.rst
Normal file
|
|
@ -0,0 +1,573 @@
|
|||
Actors (Scala)
|
||||
==============
|
||||
|
||||
Module stability: **SOLID**
|
||||
|
||||
The `Actor Model <http://en.wikipedia.org/wiki/Actor_model>`_ provides a higher level of abstraction for writing concurrent and distributed systems. It alleviates the developer from having to deal with explicit locking and thread management, making it easier to write correct concurrent and parallel systems. Actors were defined in the 1973 paper by Carl Hewitt but have been popularized by the Erlang language, and used for example at Ericsson with great success to build highly concurrent and reliable telecom systems.
|
||||
|
||||
The API of Akka’s Actors is similar to Scala Actors which has borrowed some of its syntax from Erlang.
|
||||
|
||||
The Akka 0.9 release introduced a new concept; ActorRef, which requires some refactoring. If you are new to Akka just read along, but if you have used Akka 0.6.x, 0.7.x and 0.8.x then you might be helped by the :doc:`0.8.x => 0.9.x migration guide <migration-guide-0.8.x-0.9.x>`
|
||||
|
||||
Creating Actors
|
||||
---------------
|
||||
|
||||
Actors can be created either by:
|
||||
|
||||
* Extending the Actor class and implementing the receive method.
|
||||
* Create an anonymous actor using one of the actor methods.
|
||||
|
||||
Defining an Actor class
|
||||
^^^^^^^^^^^^^^^^^^^^^^^
|
||||
|
||||
Actor classes are implemented by extending the Actor class and implementing the ``receive`` method. The ``receive`` method should define a series of case statements (which has the type ``PartialFunction[Any, Unit]``) that defines which messages your Actor can handle, using standard Scala pattern matching, along with the implementation of how the messages should be processed.
|
||||
|
||||
Here is an example:
|
||||
|
||||
.. code-block:: scala
|
||||
|
||||
class MyActor extends Actor {
|
||||
def receive = {
|
||||
case "test" => EventHandler.info(this, "received test")
|
||||
case _ => EventHandler.info(this, "received unknown message")
|
||||
}
|
||||
}
|
||||
|
||||
Please note that the Akka Actor ``receive`` message loop is exhaustive, which is different compared to Erlang and Scala Actors. This means that you need to provide a pattern match for all messages that it can accept and if you want to be able to handle unknown messages then you need to have a default case as in the example above.
|
||||
|
||||
Creating Actors
|
||||
^^^^^^^^^^^^^^^
|
||||
|
||||
.. code-block:: scala
|
||||
|
||||
val myActor = Actor.actorOf[MyActor]
|
||||
myActor.start()
|
||||
|
||||
Normally you would want to import the ``actorOf`` method like this:
|
||||
|
||||
.. code-block:: scala
|
||||
|
||||
import akka.actor.Actor._
|
||||
|
||||
val myActor = actorOf[MyActor]
|
||||
|
||||
To avoid prefixing it with ``Actor`` every time you use it.
|
||||
|
||||
You can also start it in the same statement:
|
||||
|
||||
.. code-block:: scala
|
||||
|
||||
val myActor = actorOf[MyActor].start()
|
||||
|
||||
The call to ``actorOf`` returns an instance of ``ActorRef``. This is a handle to the ``Actor`` instance which you can use to interact with the ``Actor``. The ``ActorRef`` is immutable and has a one to one relationship with the Actor it represents. The ``ActorRef`` is also serializable and network-aware. This means that you can serialize it, send it over the wire and use it on a remote host and it will still be representing the same Actor on the original node, across the network.
|
||||
|
||||
Creating Actors with non-default constructor
|
||||
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
|
||||
|
||||
If your Actor has a constructor that takes parameters then you can't create it using ``actorOf[TYPE]``. Instead you can use a variant of ``actorOf`` that takes a call-by-name block in which you can create the Actor in any way you like.
|
||||
|
||||
Here is an example:
|
||||
|
||||
.. code-block:: scala
|
||||
|
||||
val a = actorOf(new MyActor(..)).start() // allows passing in arguments into the MyActor constructor
|
||||
|
||||
Running a block of code asynchronously
|
||||
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
|
||||
|
||||
Here we create a light-weight actor-based thread, that can be used to spawn off a task. Code blocks spawned up like this are always implicitly started, shut down and made eligible for garbage collection. The actor that is created "under the hood" is not reachable from the outside and there is no way of sending messages to it. It being an actor is only an implementation detail. It will only run the block in an event-based thread and exit once the block has run to completion.
|
||||
|
||||
.. code-block:: scala
|
||||
|
||||
spawn {
|
||||
... // do stuff
|
||||
}
|
||||
|
||||
Identifying Actors
|
||||
------------------
|
||||
|
||||
Each Actor has two fields:
|
||||
|
||||
* ``self.uuid``
|
||||
* ``self.id``
|
||||
|
||||
The difference is that the ``uuid`` is generated by the runtime, guaranteed to be unique and can't be modified. While the ``id`` is modifiable by the user, and defaults to the Actor class name. You can retrieve Actors by both UUID and ID using the ``ActorRegistry``, see the section further down for details.
|
||||
|
||||
Messages and immutability
|
||||
-------------------------
|
||||
|
||||
**IMPORTANT**: Messages can be any kind of object but have to be immutable. Scala can’t enforce immutability (yet) so this has to be by convention. Primitives like String, Int, Boolean are always immutable. Apart from these the recommended approach is to use Scala case classes which are immutable (if you don’t explicitly expose the state) and works great with pattern matching at the receiver side.
|
||||
|
||||
Here is an example:
|
||||
|
||||
.. code-block:: scala
|
||||
|
||||
// define the case class
|
||||
case class Register(user: User)
|
||||
|
||||
// create a new case class message
|
||||
val message = Register(user)
|
||||
|
||||
Other good messages types are ``scala.Tuple2``, ``scala.List``, ``scala.Map`` which are all immutable and great for pattern matching.
|
||||
|
||||
Send messages
|
||||
-------------
|
||||
|
||||
Messages are sent to an Actor through one of the “bang” methods.
|
||||
|
||||
* ! means “fire-and-forget”, e.g. send a message asynchronously and return immediately.
|
||||
* !! means “send-and-reply-eventually”, e.g. send a message asynchronously and wait for a reply through aFuture. Here you can specify a timeout. Using timeouts is very important. If no timeout is specified then the actor’s default timeout (set by the this.timeout variable in the actor) is used. This method returns an ``Option[Any]`` which will be either ``Some(result)`` if returning successfully or None if the call timed out.
|
||||
* !!! sends a message asynchronously and returns a ``Future``.
|
||||
|
||||
You can check if an Actor can handle a specific message by invoking the ``isDefinedAt`` method:
|
||||
|
||||
.. code-block:: scala
|
||||
|
||||
if (actor.isDefinedAt(message)) actor ! message
|
||||
else ...
|
||||
|
||||
Fire-forget
|
||||
^^^^^^^^^^^
|
||||
|
||||
This is the preferred way of sending messages. No blocking waiting for a message. This gives the best concurrency and scalability characteristics.
|
||||
|
||||
.. code-block:: scala
|
||||
|
||||
actor ! "Hello"
|
||||
|
||||
If invoked from within an Actor, then the sending actor reference will be implicitly passed along with the message and available to the receiving Actor in its ``sender: Option[AnyRef]`` member field. He can use this to reply to the original sender or use the ``reply(message: Any)`` method.
|
||||
|
||||
If invoked from an instance that is **not** an Actor there will be no implicit sender passed along the message and you will get an IllegalStateException if you call ``self.reply(..)``.
|
||||
|
||||
Send-And-Receive-Eventually
|
||||
^^^^^^^^^^^^^^^^^^^^^^^^^^^
|
||||
|
||||
Using ``!!`` will send a message to the receiving Actor asynchronously but it will wait for a reply on a ``Future``, blocking the sender Actor until either:
|
||||
|
||||
* A reply is received, or
|
||||
* The Future times out
|
||||
|
||||
You can pass an explicit time-out to the ``!!`` method and if none is specified then the default time-out defined in the sender Actor will be used.
|
||||
|
||||
The ``!!`` method returns an ``Option[Any]`` which will be either ``Some(result)`` if returning successfully, or ``None`` if the call timed out.
|
||||
Here are some examples:
|
||||
|
||||
.. code-block:: scala
|
||||
|
||||
val resultOption = actor !! ("Hello", 1000)
|
||||
if (resultOption.isDefined) ... // handle reply
|
||||
else ... // handle timeout
|
||||
|
||||
val result: Option[String] = actor !! "Hello"
|
||||
resultOption match {
|
||||
case Some(reply) => ... // handle reply
|
||||
case None => ... // handle timeout
|
||||
}
|
||||
|
||||
val result = (actor !! "Hello").getOrElse(throw new RuntimeException("TIMEOUT"))
|
||||
|
||||
(actor !! "Hello").foreach(result => ...) // handle result
|
||||
|
||||
Send-And-Receive-Future
|
||||
^^^^^^^^^^^^^^^^^^^^^^^
|
||||
|
||||
Using ``!!!`` will send a message to the receiving Actor asynchronously and will return a 'Future':
|
||||
|
||||
.. code-block:: scala
|
||||
|
||||
val future = actor !!! "Hello"
|
||||
|
||||
See `Futures <futures-scala>`_ for more information.
|
||||
|
||||
Forward message
|
||||
^^^^^^^^^^^^^^^
|
||||
|
||||
You can forward a message from one actor to another. This means that the original sender address/reference is maintained even though the message is going through a 'mediator'. This can be useful when writing actors that work as routers, load-balancers, replicators etc.
|
||||
|
||||
.. code-block:: scala
|
||||
|
||||
actor.forward(message)
|
||||
|
||||
Receive messages
|
||||
----------------
|
||||
|
||||
An Actor has to implement the ``receive`` method to receive messages:
|
||||
|
||||
.. code-block:: scala
|
||||
|
||||
protected def receive: PartialFunction[Any, Unit]
|
||||
|
||||
Note: Akka has an alias to the ``PartialFunction[Any, Unit]`` type called ``Receive`` (``akka.actor.Actor.Receive``), so you can use this type instead for clarity. But most often you don't need to spell it out.
|
||||
|
||||
This method should return a ``PartialFunction``, e.g. a ‘match/case’ clause in which the message can be matched against the different case clauses using Scala pattern matching. Here is an example:
|
||||
|
||||
.. code-block:: scala
|
||||
|
||||
class MyActor extends Actor {
|
||||
def receive = {
|
||||
case "Hello" =>
|
||||
log.info("Received 'Hello'")
|
||||
|
||||
case _ =>
|
||||
throw new RuntimeException("unknown message")
|
||||
}
|
||||
}
|
||||
|
||||
Actor internal API
|
||||
------------------
|
||||
|
||||
The Actor trait contains almost no member fields or methods to invoke, you just use the Actor trait to implement the:
|
||||
|
||||
#. ``receive`` message handler
|
||||
#. life-cycle callbacks:
|
||||
|
||||
#. preStart
|
||||
#. postStop
|
||||
#. preRestart
|
||||
#. postRestart
|
||||
|
||||
The ``Actor`` trait has one single member field (apart from the ``log`` field from the mixed in ``Logging`` trait):
|
||||
|
||||
.. code-block:: scala
|
||||
|
||||
val self: ActorRef
|
||||
|
||||
This ``self`` field holds a reference to its ``ActorRef`` and it is this reference you want to access the Actor's API. Here, for example, you find methods to reply to messages, send yourself messages, define timeouts, fault tolerance etc., start and stop etc.
|
||||
|
||||
However, for convenience you can import these functions and fields like below, which will allow you do drop the ``self`` prefix:
|
||||
|
||||
.. code-block:: scala
|
||||
|
||||
class MyActor extends Actor {
|
||||
import self._
|
||||
id = ...
|
||||
dispatcher = ...
|
||||
start
|
||||
...
|
||||
}
|
||||
|
||||
But in this documentation we will always prefix the calls with ``self`` for clarity.
|
||||
|
||||
Let's start by looking how we can reply to messages in a convenient way using this ``ActorRef`` API.
|
||||
|
||||
Reply to messages
|
||||
-----------------
|
||||
|
||||
Reply using the reply and reply\_? methods
|
||||
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
|
||||
|
||||
If you want to send a message back to the original sender of the message you just received then you can use the ``reply(..)`` method.
|
||||
|
||||
.. code-block:: scala
|
||||
|
||||
case request =>
|
||||
val result = process(request)
|
||||
self.reply(result)
|
||||
|
||||
In this case the ``result`` will be send back to the Actor that sent the ``request``.
|
||||
|
||||
The ``reply`` method throws an ``IllegalStateException`` if unable to determine what to reply to, e.g. the sender is not an actor. You can also use the more forgiving ``reply_?`` method which returns ``true`` if reply was sent, and ``false`` if unable to determine what to reply to.
|
||||
|
||||
.. code-block:: scala
|
||||
|
||||
case request =>
|
||||
val result = process(request)
|
||||
if (self.reply_?(result)) ...// success
|
||||
else ... // handle failure
|
||||
|
||||
Reply using the sender reference
|
||||
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
|
||||
|
||||
If the sender is an Actor then its reference will be implicitly passed along together with the message and will end up in the ``sender: Option[ActorRef]`` member field in the ``ActorRef``. This means that you can use this field to send a message back to the sender.
|
||||
|
||||
.. code-block:: scala
|
||||
|
||||
// receiver code
|
||||
case request =>
|
||||
val result = process(request)
|
||||
self.sender.get ! result
|
||||
|
||||
It's important to know that ``sender.get`` will throw an exception if the ``sender`` is not defined, e.g. the ``Option`` is ``None``. You can check if it is defined by invoking the ``sender.isDefined`` method, but a more elegant solution is to use ``foreach`` which will only be executed if the sender is defined in the ``sender`` member ``Option`` field. If it is not, then the operation in the ``foreach`` method is ignored.
|
||||
|
||||
.. code-block:: scala
|
||||
|
||||
// receiver code
|
||||
case request =>
|
||||
val result = process(request)
|
||||
self.sender.foreach(_ ! result)
|
||||
|
||||
The same pattern holds for using the ``senderFuture`` in the section below.
|
||||
|
||||
Reply using the sender future
|
||||
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
|
||||
|
||||
If a message was sent with the ``!!`` or ``!!!`` methods, which both implements request-reply semantics using Future's, then you either have the option of replying using the ``reply`` method as above. This method will then resolve the Future. But you can also get a reference to the Future directly and resolve it yourself or if you would like to store it away to resolve it later, or pass it on to some other Actor to resolve it.
|
||||
|
||||
The reference to the Future resides in the ``senderFuture: Option[CompletableFuture[_]]`` member field in the ``ActorRef`` class.
|
||||
|
||||
Here is an example of how it can be used:
|
||||
|
||||
.. code-block:: scala
|
||||
|
||||
case request =>
|
||||
try {
|
||||
val result = process(request)
|
||||
self.senderFuture.foreach(_.completeWithResult(result))
|
||||
} catch {
|
||||
case e =>
|
||||
senderFuture.foreach(_.completeWithException(this, e))
|
||||
}
|
||||
|
||||
Reply using the channel
|
||||
^^^^^^^^^^^^^^^^^^^^^^^
|
||||
|
||||
If you want to have a handle to an object to whom you can reply to the message, you can use the ``Channel`` abstraction.
|
||||
Simply call ``self.channel`` and then you can forward that to others, store it away or otherwise until you want to reply, which you do by ``Channel ! response``:
|
||||
|
||||
.. code-block:: scala
|
||||
|
||||
case request =>
|
||||
val result = process(request)
|
||||
self.channel ! result
|
||||
|
||||
.. code-block:: scala
|
||||
|
||||
case request =>
|
||||
friend forward self.channel
|
||||
|
||||
Summary of reply semantics and options
|
||||
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
|
||||
|
||||
* ``self.reply(...)`` can be used to reply to an ``Actor`` or a ``Future``.
|
||||
* ``self.sender`` is a reference to the ``Actor`` you can reply to, if it exists
|
||||
* ``self.senderFuture`` is a reference to the ``Future`` you can reply to, if it exists
|
||||
* ``self.channel`` is a reference providing an abstraction to either ``self.sender`` or ``self.senderFuture`` if one is set, providing a single reference to store and reply to (the reference equivalent to the ``reply(...)`` method).
|
||||
* ``self.sender`` and ``self.senderFuture`` will never be set at the same time, as there can only be one reference to accept a reply.
|
||||
|
||||
Initial receive timeout
|
||||
-----------------------
|
||||
|
||||
A timeout mechanism can be used to receive a message when no initial message is received within a certain time. To receive this timeout you have to set the ``receiveTimeout`` property and declare a case handing the ReceiveTimeout object.
|
||||
|
||||
.. code-block:: scala
|
||||
|
||||
self.receiveTimeout = Some(30000L) // 30 seconds
|
||||
|
||||
def receive = {
|
||||
case "Hello" =>
|
||||
log.info("Received 'Hello'")
|
||||
case ReceiveTimeout =>
|
||||
throw new RuntimeException("received timeout")
|
||||
}
|
||||
|
||||
This mechanism also work for hotswapped receive functions. Every time a ``HotSwap`` is sent, the receive timeout is reset and rescheduled.
|
||||
|
||||
Starting actors
|
||||
---------------
|
||||
|
||||
Actors are started by invoking the ``start`` method.
|
||||
|
||||
.. code-block:: scala
|
||||
|
||||
val actor = actorOf[MyActor]
|
||||
actor.start()
|
||||
|
||||
You can create and start the ``Actor`` in a oneliner like this:
|
||||
|
||||
.. code-block:: scala
|
||||
|
||||
val actor = actorOf[MyActor].start()
|
||||
|
||||
When you start the ``Actor`` then it will automatically call the ``def preStart`` callback method on the ``Actor`` trait. This is an excellent place to add initialization code for the actor.
|
||||
|
||||
.. code-block:: scala
|
||||
|
||||
override def preStart = {
|
||||
... // initialization code
|
||||
}
|
||||
|
||||
Stopping actors
|
||||
---------------
|
||||
|
||||
Actors are stopped by invoking the ``stop`` method.
|
||||
|
||||
.. code-block:: scala
|
||||
|
||||
actor.stop()
|
||||
|
||||
When stop is called then a call to the ``def postStop`` callback method will take place. The ``Actor`` can use this callback to implement shutdown behavior.
|
||||
|
||||
.. code-block:: scala
|
||||
|
||||
override def postStop = {
|
||||
... // clean up resources
|
||||
}
|
||||
|
||||
You can shut down all Actors in the system by invoking:
|
||||
|
||||
.. code-block:: scala
|
||||
|
||||
Actor.registry.shutdownAll()
|
||||
|
||||
|
||||
PoisonPill
|
||||
----------
|
||||
|
||||
You can also send an actor the ``akka.actor.PoisonPill`` message, which will stop the actor when the message is processed.
|
||||
|
||||
If the sender is a ``Future`` (e.g. the message is sent with ``!!`` or ``!!!``), the ``Future`` will be completed with an ``akka.actor.ActorKilledException("PoisonPill")``.
|
||||
|
||||
HotSwap
|
||||
-------
|
||||
|
||||
Upgrade
|
||||
^^^^^^^
|
||||
|
||||
Akka supports hotswapping the Actor’s message loop (e.g. its implementation) at runtime. There are two ways you can do that:
|
||||
|
||||
* Send a ``HotSwap`` message to the Actor.
|
||||
* Invoke the ``become`` method from within the Actor.
|
||||
|
||||
Both of these takes a ``ActorRef => PartialFunction[Any, Unit]`` that implements the new message handler. The hotswapped code is kept in a Stack which can be pushed and popped.
|
||||
|
||||
To hotswap the Actor body using the ``HotSwap`` message:
|
||||
|
||||
.. code-block:: scala
|
||||
|
||||
actor ! HotSwap( self => {
|
||||
case message => self.reply("hotswapped body")
|
||||
})
|
||||
|
||||
Using the ``HotSwap`` message for hotswapping has its limitations. You can not replace it with any code that uses the Actor's ``self`` reference. If you need to do that the the ``become`` method is better.
|
||||
|
||||
To hotswap the Actor using ``become``:
|
||||
|
||||
.. code-block:: scala
|
||||
|
||||
def angry: Receive = {
|
||||
case "foo" => self reply "I am already angry!!!"
|
||||
case "bar" => become(happy)
|
||||
}
|
||||
|
||||
def happy: Receive = {
|
||||
case "bar" => self reply "I am already happy :-)"
|
||||
case "foo" => become(angry)
|
||||
}
|
||||
|
||||
def receive = {
|
||||
case "foo" => become(angry)
|
||||
case "bar" => become(happy)
|
||||
}
|
||||
|
||||
The ``become`` method is useful for many different things, but a particular nice example of it is in example where it is used to implement a Finite State Machine (FSM): `Dining Hakkers <http://github.com/jboner/akka/blob/master/akka-samples/akka-sample-fsm/src/main/scala/DiningHakkersOnBecome.scala>`_
|
||||
|
||||
Here is another little cute example of ``become`` and ``unbecome`` in action:
|
||||
|
||||
.. code-block:: scala
|
||||
|
||||
case object Swap
|
||||
class Swapper extends Actor {
|
||||
def receive = {
|
||||
case Swap =>
|
||||
println("Hi")
|
||||
become {
|
||||
case Swap =>
|
||||
println("Ho")
|
||||
unbecome() // resets the latest 'become' (just for fun)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
val swap = actorOf[Swapper].start()
|
||||
|
||||
swap ! Swap // prints Hi
|
||||
swap ! Swap // prints Ho
|
||||
swap ! Swap // prints Hi
|
||||
swap ! Swap // prints Ho
|
||||
swap ! Swap // prints Hi
|
||||
swap ! Swap // prints Ho
|
||||
|
||||
Encoding Scala Actors nested receives without accidentally leaking memory: `UnnestedReceive <https://gist.github.com/797035>`_
|
||||
------------------------------------------------------------------------------------------------------------------------------
|
||||
|
||||
Downgrade
|
||||
^^^^^^^^^
|
||||
|
||||
Since the hotswapped code is pushed to a Stack you can downgrade the code as well. There are two ways you can do that:
|
||||
|
||||
* Send the Actor a ``RevertHotswap`` message
|
||||
* Invoke the ``unbecome`` method from within the Actor.
|
||||
|
||||
Both of these will pop the Stack and replace the Actor's implementation with the ``PartialFunction[Any, Unit]`` that is at the top of the Stack.
|
||||
|
||||
Revert the Actor body using the ``RevertHotSwap`` message:
|
||||
|
||||
.. code-block:: scala
|
||||
|
||||
actor ! RevertHotSwap
|
||||
|
||||
Revert the Actor body using the ``unbecome`` method:
|
||||
|
||||
.. code-block:: scala
|
||||
|
||||
def receive: Receive = {
|
||||
case "revert" => unbecome()
|
||||
}
|
||||
|
||||
Killing an Actor
|
||||
----------------
|
||||
|
||||
You can kill an actor by sending a ``Kill`` message. This will restart the actor through regular supervisor semantics.
|
||||
|
||||
Use it like this:
|
||||
|
||||
.. code-block:: scala
|
||||
|
||||
// kill the actor called 'victim'
|
||||
victim ! Kill
|
||||
|
||||
Actor life-cycle
|
||||
----------------
|
||||
|
||||
The actor has a well-defined non-circular life-cycle.
|
||||
|
||||
::
|
||||
|
||||
NEW (newly created actor) - can't receive messages (yet)
|
||||
=> STARTED (when 'start' is invoked) - can receive messages
|
||||
=> SHUT DOWN (when 'exit' or 'stop' is invoked) - can't do anything
|
||||
|
||||
Extending Actors using PartialFunction chaining
|
||||
-----------------------------------------------
|
||||
|
||||
A bit advanced but very useful way of defining a base message handler and then extend that, either through inheritance or delegation, is to use ``PartialFunction.orElse`` chaining.
|
||||
|
||||
In generic base Actor:
|
||||
|
||||
.. code-block:: scala
|
||||
|
||||
import akka.actor.Actor.Receive
|
||||
|
||||
abstract class GenericActor extends Actor {
|
||||
// to be defined in subclassing actor
|
||||
def specificMessageHandler: Receive
|
||||
|
||||
// generic message handler
|
||||
def genericMessageHandler: Receive = {
|
||||
case event => printf("generic: %s\n", event)
|
||||
}
|
||||
|
||||
def receive = specificMessageHandler orElse genericMessageHandler
|
||||
}
|
||||
|
||||
In subclassing Actor:
|
||||
|
||||
.. code-block:: scala
|
||||
|
||||
class SpecificActor extends GenericActor {
|
||||
def specificMessageHandler = {
|
||||
case event: MyMsg => printf("specific: %s\n", event.subject)
|
||||
}
|
||||
}
|
||||
|
||||
case class MyMsg(subject: String)
|
||||
121
akka-docs/pending/agents-scala.rst
Normal file
121
akka-docs/pending/agents-scala.rst
Normal file
|
|
@ -0,0 +1,121 @@
|
|||
Agents (Scala)
|
||||
==============
|
||||
|
||||
Module stability: **SOLID**
|
||||
|
||||
Agents in Akka were inspired by `agents in Clojure <http://clojure.org/agents>`_.
|
||||
|
||||
Agents provide asynchronous change of individual locations. Agents are bound to a single storage location for their lifetime, and only allow mutation of that location (to a new state) to occur as a result of an action. Update actions are functions that are asynchronously applied to the Agent's state and whose return value becomes the Agent's new state. The state of an Agent should be immutable.
|
||||
|
||||
While updates to Agents are asynchronous, the state of an Agent is always immediately available for reading by any thread (using ``get`` or ``apply``) without any messages.
|
||||
|
||||
Agents are reactive. The update actions of all Agents get interleaved amongst threads in a thread pool. At any point in time, at most one ``send`` action for each Agent is being executed. Actions dispatched to an agent from another thread will occur in the order they were sent, potentially interleaved with actions dispatched to the same agent from other sources.
|
||||
|
||||
If an Agent is used within an enclosing transaction, then it will participate in that transaction. Agents are integrated with the STM - any dispatches made in a transaction are held until that transaction commits, and are discarded if it is retried or aborted.
|
||||
|
||||
Creating and stopping Agents
|
||||
----------------------------
|
||||
|
||||
Agents are created by invoking ``Agent(value)`` passing in the Agent's initial value.
|
||||
|
||||
.. code-block:: scala
|
||||
|
||||
val agent = Agent(5)
|
||||
|
||||
An Agent will be running until you invoke ``close`` on it. Then it will be eligible for garbage collection (unless you hold on to it in some way).
|
||||
|
||||
.. code-block:: scala
|
||||
|
||||
agent.close
|
||||
|
||||
Updating Agents
|
||||
---------------
|
||||
|
||||
You update an Agent by sending a function that transforms the current value or by sending just a new value. The Agent will apply the new value or function atomically and asynchronously. The update is done in a fire-forget manner and you are only guaranteed that it will be applied. There is no guarantee of when the update will be applied but dispatches to an Agent from a single thread will occur in order. You apply a value or a function by invoking the ``send`` function.
|
||||
|
||||
.. code-block:: scala
|
||||
|
||||
// send a value
|
||||
agent send 7
|
||||
|
||||
// send a function
|
||||
agent send (_ + 1)
|
||||
agent send (_ * 2)
|
||||
|
||||
You can also dispatch a function to update the internal state but on its own thread. This does not use the reactive thread pool and can be used for long-running or blocking operations. You do this with the ``sendOff`` method. Dispatches using either ``sendOff`` or ``send`` will still be executed in order.
|
||||
|
||||
.. code-block:: scala
|
||||
|
||||
// sendOff a function
|
||||
agent sendOff (longRunningOrBlockingFunction)
|
||||
|
||||
Reading an Agent's value
|
||||
------------------------
|
||||
|
||||
Agents can be dereferenced, e.g. you can get an Agent's value, by invoking the Agent with parenthesis like this:
|
||||
|
||||
.. code-block:: scala
|
||||
|
||||
val result = agent()
|
||||
|
||||
Or by using the get method.
|
||||
|
||||
.. code-block:: scala
|
||||
|
||||
val result = agent.get
|
||||
|
||||
Reading an Agent's current value does not involve any message passing and happens immediately. So while updates to an Agent are asynchronous, reading the state of an Agent is synchronous.
|
||||
|
||||
Awaiting an Agent's value
|
||||
-------------------------
|
||||
|
||||
It is also possible to read the value after all currently queued ``send``\s have completed. You can do this with ``await``:
|
||||
|
||||
.. code-block:: scala
|
||||
|
||||
val result = agent.await
|
||||
|
||||
You can also get a ``Future`` to this value, that will be completed after the currently queued updates have completed:
|
||||
|
||||
.. code-block:: scala
|
||||
|
||||
val future = agent.future
|
||||
// ...
|
||||
val result = future.await.result.get
|
||||
|
||||
Transactional Agents
|
||||
--------------------
|
||||
|
||||
If an Agent is used within an enclosing transaction, then it will participate in that transaction. If you send to an Agent within a transaction then the dispatch to the Agent will be held until that transaction commits, and discarded if the transaction is aborted.
|
||||
|
||||
Monadic usage
|
||||
-------------
|
||||
|
||||
Agents are also monadic, allowing you to compose operations using for-comprehensions. In a monadic usage, new Agents are created leaving the original Agents untouched. So the old values (Agents) are still available as-is. They are so-called 'persistent'.
|
||||
|
||||
Example of a monadic usage:
|
||||
|
||||
.. code-block:: scala
|
||||
|
||||
val agent1 = Agent(3)
|
||||
val agent2 = Agent(5)
|
||||
|
||||
// uses foreach
|
||||
for (value <- agent1) {
|
||||
result = value + 1
|
||||
}
|
||||
|
||||
// uses map
|
||||
val agent3 =
|
||||
for (value <- agent1) yield value + 1
|
||||
|
||||
// uses flatMap
|
||||
val agent4 = for {
|
||||
value1 <- agent1
|
||||
value2 <- agent2
|
||||
} yield value1 + value2
|
||||
|
||||
agent1.close
|
||||
agent2.close
|
||||
agent3.close
|
||||
agent4.close
|
||||
125
akka-docs/pending/articles.rst
Normal file
125
akka-docs/pending/articles.rst
Normal file
|
|
@ -0,0 +1,125 @@
|
|||
Articles & Presentations
|
||||
========================
|
||||
|
||||
Videos
|
||||
------
|
||||
|
||||
`Functional Programming eXchange - March 2011 <http://skillsmatter.com/podcast/scala/simpler-scalability-fault-tolerance-concurrency-remoting-through-actors>`_
|
||||
|
||||
`NE Scala - Feb 2011 <http://vimeo.com/20297968>`_
|
||||
|
||||
`JFokus - Feb 2011 <http://79.136.112.58/ability/show/xaimkwdli/a2_20110216_1110/mainshow.asp?STREAMID=1>`_.
|
||||
|
||||
`London Scala User Group - Oct 2010 <http://skillsmatter.com/podcast/scala/akka-simpler-scalability-fault-tolerance-concurrency-remoting-through-actors>`_
|
||||
|
||||
`Akka LinkedIn Tech Talk - Sept 2010 <http://sna-projects.com/blog/2010/10/akka>`_
|
||||
|
||||
`Akka talk at Scala Days - March 2010 <http://days2010.scala-lang.org/node/138/162>`_
|
||||
|
||||
Articles
|
||||
--------
|
||||
|
||||
`Remote Actor Class Loading with Akka <https://www.earldouglas.com/remote-actor-class-loading-with-akka>`_
|
||||
|
||||
`Akka Producer Actors: New Features and Best Practices <http://krasserm.blogspot.com/2011/02/akka-producer-actor-new-features-and.html>`_
|
||||
|
||||
`Akka Consumer Actors: New Features and Best Practices <http://krasserm.blogspot.com/2011/02/akka-consumer-actors-new-features-and.html>`_
|
||||
|
||||
`Compute Grid with Cloudy Akka <http://letitcrash.com/compute-grid-with-cloudy-akka>`_
|
||||
|
||||
`Clustered Actors with Cloudy Akka <http://letitcrash.com/clustered-actors-with-cloudy-akka>`_
|
||||
|
||||
`Unit testing Akka Actors with the TestKit <http://roestenburg.agilesquad.com/2011/02/unit-testing-akka-actors-with-testkit_12.html>`_
|
||||
|
||||
`Starting with Akka 1.0 <http://roestenburg.agilesquad.com/2011/02/starting-with-akka-10.html>`_
|
||||
|
||||
`Akka Does Async <http://altdevblogaday.com/akka-does-async>`_
|
||||
|
||||
`CQRS with Akka actors and functional domain models <http://debasishg.blogspot.com/2011/01/cqrs-with-akka-actors-and-functional.html>`_
|
||||
|
||||
`High Level Concurrency with JRuby and Akka Actors <http://metaphysicaldeveloper.wordpress.com/2010/12/16/high-level-concurrency-with-jruby-and-akka-actors/>`_
|
||||
|
||||
`Container-managed actor dispatchers <http://vasilrem.com/blog/software-development/container-managed-actor-dispatchers/>`_
|
||||
|
||||
`Even simpler scalability with Akka through RegistryActor <http://vasilrem.com/blog/software-development/even-simpler-scalability-with-akka-through-registryactor/>`_
|
||||
|
||||
`FSM in Akka (in Vietnamese) <http://cntt.tv/nodes/show/559>`_
|
||||
|
||||
`Repeater and Idempotent Receiver implementation in Akka <http://roestenburg.agilesquad.com/2010/09/repeater-and-idempotent-receiver.html>`_
|
||||
|
||||
`EDA Akka as EventBus <http://fornax-sculptor.blogspot.com/2010/08/eda-akka-as-eventbus.html>`_
|
||||
|
||||
`Upgrading examples to Akka master (0.10) and Scala 2.8.0 Final <http://roestenburg.agilesquad.com/2010/07/upgrading-to-akka-master-010-and-scala.html>`_
|
||||
|
||||
`Testing Akka Remote Actor using Serializable.Protobuf <http://roestenburg.agilesquad.com/2010/05/testing-akka-remote-actor-using.html>`_
|
||||
|
||||
`Flexible load balancing with Akka in Scala <http://vasilrem.com/blog/software-development/flexible-load-balancing-with-akka-in-scala/>`_
|
||||
|
||||
`Eventually everything, and actors <http://rossputo.blogspot.com/2010/05/eventually-everything-and-actors.html>`_
|
||||
|
||||
`Join messages with Akka <http://roestenburg.agilesquad.com/2010/05/join-messages-with-akka.html>`_
|
||||
|
||||
`Starting with Akka part 2, Intellij IDEA, Test Driven Development <http://roestenburg.agilesquad.com/2010/05/starting-with-akka-part-2-intellij-idea.htm>`_
|
||||
|
||||
`Starting with Akka and Scala <http://roestenburg.agilesquad.com/2010/04/starting-with-akka-and-scala.html>`_
|
||||
|
||||
`PubSub using Redis and Akka Actors <http://debasishg.blogspot.com/2010/04/pubsub-with-redis-and-akka-actors.html>`_
|
||||
|
||||
`Akka's grown-up hump <http://krasserm.blogspot.com/2010/08/akkas-grown-up-hump.html>`_
|
||||
|
||||
`Akka features for application integration <http://krasserm.blogspot.com/2010/04/akka-features-for-application.html>`_
|
||||
|
||||
`Load Balancing Actors with Work Stealing Techniques <http://janvanbesien.blogspot.com/2010/03/load-balancing-actors-with-work.html>`_
|
||||
|
||||
`Domain Services and Bounded Context using Akka - Part 2 <http://debasishg.blogspot.com/2010/03/domain-services-and-bounded-context.html>`_
|
||||
|
||||
`Thinking Asynchronous - Domain Modeling using Akka Transactors - Part 1 <http://debasishg.blogspot.com/2010/03/thinking-asynchronous-domain-modeling.html>`_
|
||||
|
||||
`Introducing Akka – Simpler Scalability, Fault-Tolerance, Concurrency & Remoting through Actors <http://jonasboner.com/2010/01/04/introducing-akka.html>`_
|
||||
|
||||
`Using Cassandra with Scala and Akka <http://codemonkeyism.com/cassandra-scala-akka/>`_
|
||||
|
||||
`No Comet, Hacking with WebSocket and Akka <http://debasishg.blogspot.com/2009/12/no-comet-hacking-with-websocket-and.html>`_
|
||||
|
||||
`MongoDB for Akka Persistence <http://debasishg.blogspot.com/2009/08/mongodb-for-akka-persistence.html>`_
|
||||
|
||||
`Pluggable Persistent Transactors with Akka <http://debasishg.blogspot.com/2009/10/pluggable-persistent-transactors-with.html>`_
|
||||
|
||||
`Enterprise scala actors: introducing the Akka framework <http://blog.xebia.com/2009/10/22/scala-actors-for-the-enterprise-introducing-the-akka-framework/>`_
|
||||
|
||||
Books
|
||||
-----
|
||||
|
||||
`Akka and Camel <http://www.manning.com/ibsen/appEsample.pdf>`_ (appendix E of `Camel in Action <http://www.manning.com/ibsen/>`_)
|
||||
`Ett första steg i Scala <http://www.studentlitteratur.se/o.o.i.s?id=2474&artnr=33847-01&csid=66&mp=4918>`_ (Kapitel "Aktörer och Akka") (en. "A first step in Scala", chapter "Actors and Akka", book in Swedish)
|
||||
|
||||
Presentations
|
||||
-------------
|
||||
|
||||
`Slides from Akka talk at Scala Days 2010, good short intro to Akka <http://www.slideshare.net/jboner/akka-scala-days-2010>`_
|
||||
|
||||
`Akka: Simpler Scalability, Fault-Tolerance, Concurrency & Remoting through Actors <http://www.slideshare.net/jboner/akka-simpler-scalability-faulttolerance-concurrency-remoting-through-actors>`_
|
||||
|
||||
`<http://ccombs.net/storage/presentations/Akka_High_Level_Abstractions.pdf>`_
|
||||
|
||||
`<https://github.com/deanwampler/Presentations/tree/master/akka-intro/>`_
|
||||
|
||||
Podcasts
|
||||
--------
|
||||
|
||||
`Episode 16 – Scala and Akka an Interview with Jonas Boner <http://basementcoders.com/?p=711>`_
|
||||
|
||||
`Jonas Boner on the Akka framework, Scala, and highly scalable applications <http://techcast.chariotsolutions.com/index.php?post_id=557314>`_
|
||||
|
||||
Interviews
|
||||
----------
|
||||
|
||||
`JetBrains/DZone interview: Talking about Akka, Scala and life with Jonas Bonér <http://jetbrains.dzone.com/articles/talking-about-akka-scala-and>`_
|
||||
|
||||
`Artima interview of Jonas on Akka 1.0 <http://www.artima.com/scalazine/articles/akka_jonas_boner.html>`_
|
||||
|
||||
`InfoQ interview of Jonas on Akka 1.0 <http://www.infoq.com/news/2011/02/akka10>`_
|
||||
|
||||
`InfoQ interview of Jonas on Akka 0.7 <http://www.infoq.com/news/2010/03/akka-10>`_
|
||||
|
||||
`<http://jaxenter.com/we-ve-added-tons-of-new-features-since-0-10-33360.html>`_
|
||||
31
akka-docs/pending/benchmarks.rst
Normal file
31
akka-docs/pending/benchmarks.rst
Normal file
|
|
@ -0,0 +1,31 @@
|
|||
Benchmarks
|
||||
==========
|
||||
|
||||
Scalability, Throughput and Latency benchmark
|
||||
---------------------------------------------
|
||||
|
||||
`<image:akka-sample-trading-throughput.png>`_
|
||||
|
||||
Simple Trading system.
|
||||
* `Here is the result with some graphs <https://spreadsheets.google.com/ccc?key=0AqkhZTxa6-dOdERaQnNvOEZpMDdnazRWOVNHMWIxZ0E&hl=en&authkey=CLyksoEI#gid=0>`_
|
||||
* `Here is the article <http://blog.jayway.com/2010/08/10/yet-another-akka-benchmark/>`_
|
||||
* `Here is the code <http://github.com/patriknw/akka-sample-trading>`_
|
||||
|
||||
Compares:
|
||||
* Synchronous Scala solution
|
||||
* Scala library Actors
|
||||
** Fire-forget
|
||||
** Request-reply
|
||||
* Akka
|
||||
** Request-reply
|
||||
** Fire-forget with default dispatcher
|
||||
** Fire-forget with Hawt dispatcher
|
||||
|
||||
Performance benchmark
|
||||
---------------------
|
||||
|
||||
Benchmarking Akka against:
|
||||
* Scala Library Actors
|
||||
* Raw Java concurrency
|
||||
* Jetlang (Java actors lib)
|
||||
`<http://github.com/jboner/akka-bench>`_
|
||||
319
akka-docs/pending/building-akka.rst
Normal file
319
akka-docs/pending/building-akka.rst
Normal file
|
|
@ -0,0 +1,319 @@
|
|||
Building Akka
|
||||
=============
|
||||
|
||||
This page describes how to build and run Akka from the latest source code.
|
||||
|
||||
Get the source code
|
||||
-------------------
|
||||
|
||||
Akka uses `Git <http://git-scm.com>`_ and is hosted at `Github <http://github.com>`_.
|
||||
|
||||
You first need Git installed on your machine. You can then clone the source repositories:
|
||||
* Akka repository from `<http://github.com/jboner/akka>`_
|
||||
* Akka Modules repository from `<http://github.com/jboner/akka-modules>`_
|
||||
|
||||
For example:
|
||||
|
||||
::
|
||||
|
||||
git clone git://github.com/jboner/akka.git
|
||||
git clone git://github.com/jboner/akka-modules.git
|
||||
|
||||
If you have already cloned the repositories previously then you can update the code with ``git pull``:
|
||||
|
||||
::
|
||||
|
||||
git pull origin master
|
||||
|
||||
SBT - Simple Build Tool
|
||||
-----------------------
|
||||
|
||||
Akka is using the excellent `SBT <http://code.google.com/p/simple-build-tool>`_ build system. So the first thing you have to do is to download and install SBT. You can read more about how to do that `here <http://code.google.com/p/simple-build-tool/wiki/Setup>`_ .
|
||||
|
||||
The SBT commands that you'll need to build Akka are all included below. If you want to find out more about SBT and using it for your own projects do read the `SBT documentation <http://code.google.com/p/simple-build-tool/wiki/RunningSbt>`_.
|
||||
|
||||
The Akka SBT build file is ``project/build/AkkaProject.scala`` with some properties defined in ``project/build.properties``.
|
||||
|
||||
----
|
||||
|
||||
Building Akka
|
||||
-------------
|
||||
|
||||
First make sure that you are in the akka code directory:
|
||||
|
||||
::
|
||||
|
||||
cd akka
|
||||
|
||||
Fetching dependencies
|
||||
^^^^^^^^^^^^^^^^^^^^^
|
||||
|
||||
SBT does not fetch dependencies automatically. You need to manually do this with the ``update`` command:
|
||||
|
||||
::
|
||||
|
||||
sbt update
|
||||
|
||||
Once finished, all the dependencies for Akka will be in the ``lib_managed`` directory under each module: akka-actor, akka-stm, and so on.
|
||||
|
||||
*Note: you only need to run {{update}} the first time you are building the code, or when the dependencies have changed.*
|
||||
|
||||
Building
|
||||
^^^^^^^^
|
||||
|
||||
To compile all the Akka core modules use the ``compile`` command:
|
||||
|
||||
::
|
||||
|
||||
sbt compile
|
||||
|
||||
You can run all tests with the ``test`` command:
|
||||
|
||||
::
|
||||
|
||||
sbt test
|
||||
|
||||
If compiling and testing are successful then you have everything working for the latest Akka development version.
|
||||
|
||||
Publish to local Ivy repository
|
||||
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
|
||||
|
||||
If you want to deploy the artifacts to your local Ivy repository (for example, to use from an SBT project) use the ``publish-local`` command:
|
||||
|
||||
::
|
||||
|
||||
sbt publish-local
|
||||
|
||||
Publish to local Maven repository
|
||||
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
|
||||
|
||||
If you want to deploy the artifacts to your local Maven repository use:
|
||||
|
||||
::
|
||||
|
||||
sbt publish-local publish
|
||||
|
||||
SBT interactive mode
|
||||
^^^^^^^^^^^^^^^^^^^^
|
||||
|
||||
Note that in the examples above we are calling ``sbt compile`` and ``sbt test`` and so on. SBT also has an interactive mode. If you just run ``sbt`` you enter the interactive SBT prompt and can enter the commands directly. This saves starting up a new JVM instance for each command and can be much faster and more convenient.
|
||||
|
||||
For example, building Akka as above is more commonly done like this:
|
||||
|
||||
::
|
||||
|
||||
% sbt
|
||||
[info] Building project akka 1.1-SNAPSHOT against Scala 2.8.1
|
||||
[info] using AkkaParentProject with sbt 0.7.5.RC0 and Scala 2.7.7
|
||||
> update
|
||||
[info]
|
||||
[info] == akka-actor / update ==
|
||||
...
|
||||
[success] Successful.
|
||||
[info]
|
||||
[info] Total time ...
|
||||
> compile
|
||||
...
|
||||
> test
|
||||
...
|
||||
|
||||
SBT batch mode
|
||||
^^^^^^^^^^^^^^
|
||||
|
||||
It's also possible to combine commands in a single call. For example, updating, testing, and publishing Akka to the local Ivy repository can be done with:
|
||||
|
||||
::
|
||||
|
||||
sbt update test publish-local
|
||||
|
||||
----
|
||||
|
||||
Building Akka Modules
|
||||
---------------------
|
||||
|
||||
To build Akka Modules first build and publish Akka to your local Ivy repository as described above. Or using:
|
||||
|
||||
::
|
||||
|
||||
cd akka
|
||||
sbt update publish-local
|
||||
|
||||
Then you can build Akka Modules using the same steps as building Akka. First update to get all dependencies (including the Akka core modules), then compile, test, or publish-local as needed. For example:
|
||||
|
||||
::
|
||||
|
||||
cd akka-modules
|
||||
sbt update publish-local
|
||||
|
||||
Microkernel distribution
|
||||
^^^^^^^^^^^^^^^^^^^^^^^^
|
||||
|
||||
To build the Akka Modules microkernel (the same as the Akka Modules distribution download) use the ``dist`` command:
|
||||
|
||||
::
|
||||
|
||||
sbt dist
|
||||
|
||||
The distribution zip can be found in the dist directory and is called ``akka-modules-{version}.zip``.
|
||||
|
||||
To run the mircokernel, unzip the zip file, change into the unzipped directory, set the ``AKKA_HOME`` environment variable, and run the main jar file. For example:
|
||||
|
||||
::
|
||||
|
||||
unzip dist/akka-modules-1.1-SNAPSHOT.zip
|
||||
cd akka-modules-1.1-SNAPSHOT
|
||||
export AKKA_HOME=`pwd`
|
||||
java -jar akka-modules-1.1-SNAPSHOT.jar
|
||||
|
||||
The microkernel will boot up and install the sample applications that reside in the distribution's ``deploy`` directory. You can deploy your own applications into the ``deploy`` directory as well.
|
||||
|
||||
----
|
||||
|
||||
Scripts
|
||||
-------
|
||||
|
||||
Linux/Unix init script
|
||||
^^^^^^^^^^^^^^^^^^^^^^
|
||||
|
||||
Here is a Linux/Unix init script that can be very useful:
|
||||
|
||||
`<http://github.com/jboner/akka/blob/master/scripts/akka-init-script.sh>`_
|
||||
|
||||
Copy and modify as needed.
|
||||
|
||||
Simple startup shell script
|
||||
^^^^^^^^^^^^^^^^^^^^^^^^^^^
|
||||
|
||||
This little script might help a bit. Just make sure you have the Akka distribution in the '$AKKA_HOME/dist' directory and then invoke this script to start up the kernel. The distribution is created in the './dist' dir for you if you invoke 'sbt dist'.
|
||||
|
||||
`<http://github.com/jboner/akka/blob/master/scripts/run_akka.sh>`_
|
||||
|
||||
Copy and modify as needed.
|
||||
|
||||
----
|
||||
|
||||
Dependencies
|
||||
------------
|
||||
|
||||
If you are managing dependencies by hand you can find out what all the compile dependencies are for each module by looking in the ``lib_managed/compile`` directories. For example, you can run this to create a listing of dependencies (providing you have the source code and have run ``sbt update``):
|
||||
|
||||
::
|
||||
|
||||
cd akka
|
||||
ls -1 */lib_managed/compile
|
||||
|
||||
Here are the dependencies used by the Akka core modules.
|
||||
|
||||
akka-actor
|
||||
^^^^^^^^^^
|
||||
|
||||
* No dependencies
|
||||
|
||||
akka-stm
|
||||
^^^^^^^^
|
||||
|
||||
* Depends on akka-actor
|
||||
* multiverse-alpha-0.6.2.jar
|
||||
|
||||
akka-typed-actor
|
||||
^^^^^^^^^^^^^^^^
|
||||
|
||||
* Depends on akka-stm
|
||||
* aopalliance-1.0.jar
|
||||
* aspectwerkz-2.2.3.jar
|
||||
* guice-all-2.0.jar
|
||||
|
||||
akka-remote
|
||||
^^^^^^^^^^^
|
||||
|
||||
* Depends on akka-typed-actor
|
||||
* commons-codec-1.4.jar
|
||||
* commons-io-2.0.1.jar
|
||||
* dispatch-json_2.8.1-0.7.8.jar
|
||||
* guice-all-2.0.jar
|
||||
* h2-lzf-1.0.jar
|
||||
* jackson-core-asl-1.7.1.jar
|
||||
* jackson-mapper-asl-1.7.1.jar
|
||||
* junit-4.8.1.jar
|
||||
* netty-3.2.3.Final.jar
|
||||
* objenesis-1.2.jar
|
||||
* protobuf-java-2.3.0.jar
|
||||
* sjson_2.8.1-0.9.1.jar
|
||||
|
||||
akka-http
|
||||
^^^^^^^^^
|
||||
|
||||
* Depends on akka-remote
|
||||
* jsr250-api-1.0.jar
|
||||
* jsr311-api-1.1.jar
|
||||
|
||||
----
|
||||
Here are the dependencies used by the Akka modules.
|
||||
|
||||
akka-amqp
|
||||
^^^^^^^^^
|
||||
|
||||
* Depends on akka-remote
|
||||
* commons-cli-1.1.jar
|
||||
* amqp-client-1.8.1.jar
|
||||
|
||||
akka-camel
|
||||
^^^^^^^^^^
|
||||
|
||||
* Depends on akka-actor
|
||||
* camel-core-2.5.0.jar
|
||||
* commons-logging-api-1.1.jar
|
||||
* commons-management-1.0.jar
|
||||
|
||||
akka-camel-typed
|
||||
^^^^^^^^^^^^^^^^
|
||||
|
||||
* Depends on akka-typed-actor
|
||||
* camel-core-2.5.0.jar
|
||||
* commons-logging-api-1.1.jar
|
||||
* commons-management-1.0.jar
|
||||
|
||||
akka-spring
|
||||
^^^^^^^^^^^
|
||||
|
||||
* Depends on akka-camel
|
||||
* akka-camel-typed
|
||||
* commons-logging-1.1.1.jar
|
||||
* spring-aop-3.0.4.RELEASE.jar
|
||||
* spring-asm-3.0.4.RELEASE.jar
|
||||
* spring-beans-3.0.4.RELEASE.jar
|
||||
* spring-context-3.0.4.RELEASE.jar
|
||||
* spring-core-3.0.4.RELEASE.jar
|
||||
* spring-expression-3.0.4.RELEASE.jar
|
||||
|
||||
akka-scalaz
|
||||
^^^^^^^^^^^
|
||||
|
||||
* Depends on akka-actor
|
||||
* hawtdispatch-1.1.jar
|
||||
* hawtdispatch-scala-1.1.jar
|
||||
* scalaz-core_2.8.1-6.0-SNAPSHOT.jar
|
||||
|
||||
akka-kernel
|
||||
^^^^^^^^^^^
|
||||
|
||||
* Depends on akka-http, akka-amqp, and akka-spring
|
||||
* activation-1.1.jar
|
||||
* asm-3.1.jar
|
||||
* jaxb-api-2.1.jar
|
||||
* jaxb-impl-2.1.12.jar
|
||||
* jersey-core-1.3.jar
|
||||
* jersey-json-1.3.jar
|
||||
* jersey-scala-1.3.jar
|
||||
* jersey-server-1.3.jar
|
||||
* jettison-1.1.jar
|
||||
* jetty-continuation-7.1.6.v20100715.jar
|
||||
* jetty-http-7.1.6.v20100715.jar
|
||||
* jetty-io-7.1.6.v20100715.jar
|
||||
* jetty-security-7.1.6.v20100715.jar
|
||||
* jetty-server-7.1.6.v20100715.jar
|
||||
* jetty-servlet-7.1.6.v20100715.jar
|
||||
* jetty-util-7.1.6.v20100715.jar
|
||||
* jetty-xml-7.1.6.v20100715.jar
|
||||
* servlet-api-2.5.jar
|
||||
* stax-api-1.0.1.jar
|
||||
55
akka-docs/pending/buildr.rst
Normal file
55
akka-docs/pending/buildr.rst
Normal file
|
|
@ -0,0 +1,55 @@
|
|||
Using Akka in a Buildr project
|
||||
==============================
|
||||
|
||||
This is an example on how to use Akka in a project based on Buildr
|
||||
|
||||
.. code-block:: ruby
|
||||
|
||||
require 'buildr/scala'
|
||||
|
||||
VERSION_NUMBER = "0.6"
|
||||
GROUP = "se.scalablesolutions.akka"
|
||||
|
||||
repositories.remote << "http://www.ibiblio.org/maven2/"
|
||||
repositories.remote << "http://www.lag.net/repo"
|
||||
repositories.remote << "http://multiverse.googlecode.com/svn/maven-repository/releases"
|
||||
|
||||
AKKA = group('akka-core', 'akka-comet', 'akka-util','akka-kernel', 'akka-rest', 'akka-util-java',
|
||||
'akka-security','akka-persistence-common', 'akka-persistence-redis',
|
||||
'akka-amqp',
|
||||
:under=> 'se.scalablesolutions.akka',
|
||||
:version => '0.6')
|
||||
ASPECTJ = "org.codehaus.aspectwerkz:aspectwerkz-nodeps-jdk5:jar:2.1"
|
||||
SBINARY = "sbinary:sbinary:jar:0.3"
|
||||
COMMONS_IO = "commons-io:commons-io:jar:1.4"
|
||||
CONFIGGY = "net.lag:configgy:jar:1.4.7"
|
||||
JACKSON = group('jackson-core-asl', 'jackson-mapper-asl',
|
||||
:under=> 'org.codehaus.jackson',
|
||||
:version => '1.2.1')
|
||||
MULTIVERSE = "org.multiverse:multiverse-alpha:jar:jar-with-dependencies:0.3"
|
||||
NETTY = "org.jboss.netty:netty:jar:3.2.0.ALPHA2"
|
||||
PROTOBUF = "com.google.protobuf:protobuf-java:jar:2.2.0"
|
||||
REDIS = "com.redis:redisclient:jar:1.0.1"
|
||||
SJSON = "sjson.json:sjson:jar:0.3"
|
||||
|
||||
Project.local_task "run"
|
||||
|
||||
desc "Akka Chat Sample Module"
|
||||
define "akka-sample-chat" do
|
||||
project.version = VERSION_NUMBER
|
||||
project.group = GROUP
|
||||
|
||||
compile.with AKKA, CONFIGGY
|
||||
|
||||
p artifact(MULTIVERSE).to_s
|
||||
|
||||
package(:jar)
|
||||
|
||||
task "run" do
|
||||
Java.java "scala.tools.nsc.MainGenericRunner",
|
||||
:classpath => [ compile.dependencies, compile.target,
|
||||
ASPECTJ, COMMONS_IO, JACKSON, NETTY, MULTIVERSE, PROTOBUF, REDIS,
|
||||
SBINARY, SJSON],
|
||||
:java_args => ["-server"]
|
||||
end
|
||||
end
|
||||
89
akka-docs/pending/cluster-membership.rst
Normal file
89
akka-docs/pending/cluster-membership.rst
Normal file
|
|
@ -0,0 +1,89 @@
|
|||
Cluster Membership (Scala)
|
||||
==========================
|
||||
|
||||
Module stability: **IN PROGRESS**
|
||||
|
||||
Akka supports a Cluster Membership through a `JGroups <http://www.jgroups.org/>`_ based implementation. JGroups is is a `P2P <http://en.wikipedia.org/wiki/Peer-to-peer>`_ clustering API
|
||||
|
||||
Configuration
|
||||
-------------
|
||||
|
||||
The cluster is configured in 'akka.conf' by adding the Fully Qualified Name (FQN) of the actor class and serializer:
|
||||
|
||||
.. code-block:: ruby
|
||||
|
||||
remote {
|
||||
cluster {
|
||||
service = on
|
||||
name = "default" # The name of the cluster
|
||||
serializer = "akka.serialization.Serializer$Java" # FQN of the serializer class
|
||||
}
|
||||
}
|
||||
|
||||
How to join the cluster
|
||||
-----------------------
|
||||
|
||||
The node joins the cluster when the 'RemoteNode' and/or 'RemoteServer' servers are started.
|
||||
|
||||
Cluster API
|
||||
-----------
|
||||
|
||||
Interaction with the cluster is done through the 'akka.remote.Cluster' object.
|
||||
|
||||
To send a message to all actors of a specific type on other nodes in the cluster use the 'relayMessage' function:
|
||||
|
||||
.. code-block:: scala
|
||||
|
||||
def relayMessage(to: Class[_ <: Actor], msg: AnyRef): Unit
|
||||
|
||||
Here is an example:
|
||||
|
||||
.. code-block:: scala
|
||||
|
||||
Cluster.relayMessage(classOf[ATypeOfActor], message)
|
||||
|
||||
Traversing the remote nodes in the cluster to spawn remote actors:
|
||||
|
||||
Cluster.foreach:
|
||||
|
||||
.. code-block:: scala
|
||||
|
||||
def foreach(f : (RemoteAddress) => Unit) : Unit
|
||||
|
||||
Here's an example:
|
||||
|
||||
.. code-block:: scala
|
||||
|
||||
for(endpoint <- Cluster) spawnRemote[KungFuActor](endpoint.hostname,endpoint.port)
|
||||
|
||||
and:
|
||||
|
||||
.. code-block:: scala
|
||||
|
||||
Cluster.foreach( endpoint => spawnRemote[KungFuActor](endpoint.hostname,endpoint.port) )
|
||||
|
||||
Cluster.lookup:
|
||||
|
||||
.. code-block:: scala
|
||||
|
||||
def lookup[T](handleRemoteAddress : PartialFunction[RemoteAddress, T]) : Option[T]
|
||||
|
||||
Here is an example:
|
||||
|
||||
.. code-block:: scala
|
||||
|
||||
val myRemoteActor: Option[SomeActorType] = Cluster.lookup({
|
||||
case RemoteAddress(hostname, port) => spawnRemote[SomeActorType](hostname, port)
|
||||
})
|
||||
|
||||
myRemoteActor.foreach(remoteActor => ...)
|
||||
|
||||
Here is another example:
|
||||
|
||||
.. code-block:: scala
|
||||
Cluster.lookup({
|
||||
case remoteAddress @ RemoteAddress(_,_) => remoteAddress
|
||||
}) match {
|
||||
case Some(remoteAddress) => spawnAllRemoteActors(remoteAddress)
|
||||
case None => handleNoRemoteNodeFound
|
||||
}
|
||||
170
akka-docs/pending/companies-using-akka.rst
Normal file
170
akka-docs/pending/companies-using-akka.rst
Normal file
|
|
@ -0,0 +1,170 @@
|
|||
Companies and Open Source projects using Akka
|
||||
=============================================
|
||||
|
||||
Production Users
|
||||
****************
|
||||
|
||||
These are some of the production Akka users that are able to talk about their use publicly.
|
||||
|
||||
CSC
|
||||
---
|
||||
|
||||
CSC is a global provider of information technology services. The Traffic Management business unit in the Netherlands is a systems integrator for the implementation of Traffic Information and Traffic Enforcement Systems, such as section control, weigh in motion, travel time and traffic jam detection and national data warehouse for traffic information. CSC Traffic Management is using Akka for their latest Traffic Information and Traffic Enforcement Systems.
|
||||
|
||||
`<http://www.csc.com/nl/ds/42449-traffic_management>`_
|
||||
|
||||
*"Akka has been in use for almost a year now (since 0.7) and has been used successfully for two projects so far. Akka has enabled us to deliver very flexible, scalable and high performing systems with as little friction as possible. The Actor model has simplified a lot of concerns in the type of systems that we build and is now part of our reference architecture. With Akka we deliver systems that meet the most strict performance requirements of our clients in a near-realtime environment. We have found the Akka framework and it's support team invaluable."*
|
||||
|
||||
Thatcham Motor Insurance Repair Research Centre
|
||||
-----------------------------------------------
|
||||
|
||||
Thatcham is a EuroNCAP member. They research efficient, safe, cost effective repair of vehicles, and work with manufacturers to influence the design of new vehicles Thatcham are using Akka as the implementation for their distributed modules. All Scala based research software now talks to an Akka based publishing platform. Using Akka enables Thatcham to 'free their domain', and ensures that the platform is cloud enabled and scalable, and that the team is confident that they are flexible. Akka has been in use, tested under load at Thatcham for almost a year, with no problems migrating up through the different versions. An old website currently under redesign on a new Scala powered platform: `www.thatcham.org <http://www.thatcham.org>`_
|
||||
|
||||
*“We have been in production with Akka for over 18 months with zero downtime. The core is rock solid, never a problem, performance is great, integration capabilities are diverse and ever growing, and the toolkit is just a pleasure to work with. Combine that with the excellent response you get from the devs and users on this list and you have a winner. Absolutely no regrets on our part for choosing to work with Akka.”*
|
||||
|
||||
*"Scala and Akka are now enabling improvements in the standard of vehicle damage assessment, and in the safety of vehicle repair across the UK, with Europe, USA, Asia and Australasia to follow. Thatcham (Motor Insurance Repair Research Centre) are delivering crash specific information with linked detailed repair information for over 7000 methods.*
|
||||
|
||||
*For Thatcham, the technologies enable scalability and elegance when dealing with complicated design constraints. Because of the complexity of interlinked methods, caching is virtually impossible in most cases, so in steps the 'actors' paradigm. Where previously something like JMS would have provided a stable but heavyweight, rigid solution, Thatcham are now more flexible, and can expand into the cloud in a far simpler, more rewarding way.*
|
||||
|
||||
*Thatcham's customers, body shop repairers and insurers receive up to date repair information in the form of crash repair documents of the quality necessary to ensure that every vehicle is repaired back to the original safety standard. In a market as important as this, availability is key, as is performance. Scala and Akka have delivered consistently so far.*
|
||||
|
||||
*While recently introduced, growing numbers of UK repairers are receiving up to date repair information from this service, with the rest to follow shortly. Plans are already in motion to build new clusters to roll the service out across Europe, USA, Asia and Australasia.*
|
||||
|
||||
*The sheer opportunities opened up to teams by Scala and Akka, in terms of integration, concise expression of intent and scalability are of huge benefit."*
|
||||
|
||||
SVT (Swedish Television)
|
||||
------------------------
|
||||
|
||||
`<http://svt.se>`_
|
||||
|
||||
*“I’m currently working in a project at the Swedish Television where we’re developing a subtitling system with collaboration capabilities similar to Google Wave. It’s a mission critical system and the design and server implementation is all based on Akka and actors etc. We’ve been running in production for about 6 months and have been upgrading Akka whenever a new release comes out. We’ve never had a single bug due to Akka, and it’s been a pure pleasure to work with. I would choose Akka any day of the week!*
|
||||
|
||||
*Our system is highly asynchronous so the actor style of doing things is a perfect fit. I don’t know about how you feel about concurrency in a big system, but rolling your own abstractions is not a very easy thing to do. When using Akka you can almost forget about all that. Synchronizing between threads, locking and protecting access to state etc. Akka is not just about actors, but that’s one of the most pleasurable things to work with. It’s easy to add new ones and it’s easy to design with actors. You can fire up work actors tied to a specific dispatcher etc. I could make the list of benefits much longer, but I’m at work right now. I suggest you try it out and see how it fits your requirements.*
|
||||
|
||||
*We saw a perfect businness reson for using Akka. It lets you concentrate on the business logic instead of the low level things. It’s easy to teach others and the business intent is clear just by reading the code. We didn’t chose Akka just for fun. It’s a business critical application that’s used in broadcasting. Even live broadcasting. We wouldn’t have been where we are today in such a short time without using Akka. We’re two developers that have done great things in such a short amount of time and part of this is due to Akka. As I said, it lets us focus on the business logic instead of low level things such as concurrency, locking, performence etc."*
|
||||
|
||||
Tapad
|
||||
-----
|
||||
|
||||
`<http://tapad.com>`_
|
||||
|
||||
*"Tapad is building a real-time ad exchange platform for advertising on mobile and connected devices. Real-time ad exchanges allows for advertisers (among other things) to target audiences instead of buying fixed set of ad slots that will be displayed “randomly” to users. To developers without experience in the ad space, this might seem boring, but real-time ad exchanges present some really interesting technical challenges.*
|
||||
|
||||
*Take for instance the process backing a page view with ads served by a real-time ad exchange auction (somewhat simplified):*
|
||||
|
||||
1. *A user opens a site (or app) which has ads in it.*
|
||||
2. *As the page / app loads, the ad serving components fires off a request to the ad exchange (this might just be due to an image tag on the page).*
|
||||
3. *The ad exchange enriches the request with any information about the current user (tracking cookies are often employed for this) and and display context information (“news article about parenting”, “blog about food” etc).*
|
||||
4. *The ad exchange forwards the enriched request to all bidders registered with the ad exchange.*
|
||||
5. *The bidders consider the provided user information and responds with what price they are willing to pay for this particular ad slot.*
|
||||
6. *The ad exchange picks the highest bidder and ensures that the winning bidder’s ad is shown to to user.*
|
||||
|
||||
*Any latency in this process directly influences user experience latency, so this has to happen really fast. All-in-all, the total time should not exceed about 100ms and most ad exchanges allow bidders to spend about 60ms (including network time) to return their bids. That leaves the ad exchange with less than 40ms to facilitate the auction. At Tapad, this happens billions of times per month / tens of thousands of times per second.*
|
||||
|
||||
*Tapad is building bidders which will participate in auctions facilitated by other ad exchanges, but we’re also building our own. We are using Akka in several ways in several parts of the system. Here are some examples:*
|
||||
|
||||
*Plain old parallelization*
|
||||
*During an auction in the real-time exchange, it’s obvious that all bidders must receive the bid requests in parallel. An auctioneer actor sends the bid requests to bidder actors which in turn handles throttling and eventually IO. We use futures in these requests and the auctioneer discards any responses which arrive too late.*
|
||||
|
||||
*Inside our bidders, we also rely heavily on parallel execution. In order to determine how much to pay for an ad slot, several data stores are queried for information pertinent to the current user. In a “traditional” system, we’d be doing this sequentially, but again, due to the extreme latency constraints, we’re doing this concurrently. Again, this is done with futures and data that is not available in time, get cut from the decision making (and logged :)).*
|
||||
|
||||
*Maintaining state under concurrent load*
|
||||
*This is probably the de facto standard use case for the actors model. Bidders internal to our system are actors backed by a advertiser campaign. A campaign includes, among other things, budget and “pacing” information. The budget determines how much money to spend for the duration of the campaign, whereas pacing information might set constraints on how quickly or slowly the money should be spent. Ad traffic changes from day to day and from hour to hour and our spending algorithms considers past performance in order to spend the right amount of money at the right time. Needless to say, these algorithms use a lot of state and this state is in constant flux. A bidder with a high budget may see tens of thousands of bid requests per second. Luckily, due to round-robin load-balancing and the predictability of randomness under heavy traffic, the bidder actors do not share state across cluster nodes, they just share their instance count so they know which fraction of the campaign budget to try to spend.*
|
||||
|
||||
*Pacing is also done for external bidders. Each 3rd party bidder end-point has an actor coordinating requests and measuring latency and throughput. The actor never blocks itself, but when an incoming bid request is received, it considers the current performance of the 3rd party system and decides whether to pass on the request and respond negatively immediately, or forward the request to the 3rd party request executor component (which handles the IO).*
|
||||
|
||||
*Batch processing*
|
||||
*We store a lot of data about every single ad request we serve and this is stored in a key-value data store. Due to the performance characteristics of the data store, it is not feasible to store every single data point one at at time - it must be batched up and performed in parallel. We don’t need a durable messaging system for this (losing a couple of hundred data points is no biggie). All our data logging happens asynchronously and we have a basic load-balanced actors which batches incoming messages and writes on regular intervals (using Scheduler) or whenever the specified batch size has been reached.*
|
||||
|
||||
*Analytics*
|
||||
*Needless to say, it’s not feasible / useful to store our traffic information in a relational database. A lot of analytics and data analysis is done “offline” with map / reduce on top the data store, but this doesn’t work well for real-time analytics which our customers love. We therefore have metrics actors that receives campaign bidding and click / impression information in real-time, aggregates this information over configurable periods of time and flushes it to the database used for customer dashboards for “semi-real-time” display. Five minute history is considered real-time in this business, but in theory, we could have queried the actors directly for really real-time data. :)*
|
||||
|
||||
*Our Akka journey started as a prototyping project, but Akka has now become a crucial part of our system. All of the above mentioned components, except the 3rd party bidder integration, have been running in production for a couple of weeks (on Akka 1.0RC3) and we have not seen any issues at all so far."*
|
||||
|
||||
Flowdock
|
||||
--------
|
||||
|
||||
Flowdock delivers Google Wave for the corporate world.
|
||||
|
||||
*"Flowdock makes working together a breeze. Organize the flow of information, task things over and work together towards common goals seamlessly on the web - in real time."*
|
||||
|
||||
`<http://flowdock.com/>`_
|
||||
|
||||
Travel Budget
|
||||
-------------
|
||||
|
||||
`<http://labs.inevo.pt/travel-budget>`_
|
||||
|
||||
Says.US
|
||||
-------
|
||||
|
||||
*"says.us is a gathering place for people to connect in real time - whether an informal meeting of people who love Scala or a chance for people anywhere to speak out about the latest headlines."*
|
||||
|
||||
`<http://says.us/>`_
|
||||
|
||||
LShift
|
||||
------
|
||||
|
||||
* *"Diffa is an open source data analysis tool that automatically establishes data differences between two or more real-time systems.*
|
||||
* Diffa will help you compare local or distributed systems for data consistency, without having to stop them running or implement manual cross-system comparisons. The interface provides you with simple visual summary of any consistency breaks and tools to investigate the issues.*
|
||||
* Diffa is the ideal tool to use to investigate where or when inconsistencies are occuring, or simply to provide confidence that your systems are running in perfect sync. It can be used operationally as an early warning system, in deployment for release verification, or in development with other enterprise diagnosis tools to help troubleshoot faults."*
|
||||
|
||||
`<http://diffa.lshift.net/>`_
|
||||
|
||||
Twimpact
|
||||
--------
|
||||
|
||||
*"Real-time twitter trends and user impact"*
|
||||
|
||||
`<http://twimpact.com>`_
|
||||
|
||||
Rocket Pack Platform
|
||||
--------------------
|
||||
|
||||
*"Rocket Pack Platform is the only fully integrated solution for plugin-free browser game development."*
|
||||
|
||||
`<http://rocketpack.fi/platform/>`_
|
||||
|
||||
Open Source Projects using Akka
|
||||
*******************************
|
||||
|
||||
Redis client
|
||||
------------
|
||||
|
||||
*A Redis client written Scala, using Akka actors, HawtDispath and non-blocking IO. Supports Redis 2.0+*
|
||||
|
||||
`<http://github.com/derekjw/fyrie-redis>`_
|
||||
|
||||
Narrator
|
||||
--------
|
||||
|
||||
*"Narrator is a a library which can be used to create story driven clustered load-testing packages through a very readable and understandable api."*
|
||||
|
||||
`<http://github.com/shorrockin/narrator>`_
|
||||
|
||||
Kandash
|
||||
-------
|
||||
|
||||
*"Kandash is a lightweight kanban web-based board and set of analytics tools."*
|
||||
|
||||
`<http://vasilrem.com/blog/software-development/kandash-project-v-0-3-is-now-available/>`_
|
||||
`<http://code.google.com/p/kandash/>`_
|
||||
|
||||
Wicket Cassandra Datastore
|
||||
--------------------------
|
||||
|
||||
This project provides an org.apache.wicket.pageStore.IDataStore implementation that writes pages to an Apache Cassandra cluster using Akka.
|
||||
|
||||
`<http://github.com/gseitz/wicket-cassandra-datastore/>`_
|
||||
|
||||
Spray
|
||||
-----
|
||||
|
||||
*"spray is a lightweight Scala framework for building RESTful web services on top of Akka actors and Akka Mist. It sports the following main features:*
|
||||
|
||||
* *Completely asynchronous, non-blocking, actor-based request processing for efficiently handling very high numbers of concurrent connections*
|
||||
* *Powerful, flexible and extensible internal Scala DSL for declaratively defining your web service behavior*
|
||||
* *Immutable model of the HTTP protocol, decoupled from the underlying servlet container*
|
||||
* *Full testability of your REST services, without the need to fire up containers or actors"*
|
||||
|
||||
`<https://github.com/spray/spray/wiki>`_
|
||||
180
akka-docs/pending/configuration.rst
Normal file
180
akka-docs/pending/configuration.rst
Normal file
|
|
@ -0,0 +1,180 @@
|
|||
Configuration
|
||||
=============
|
||||
|
||||
Specifying the configuration file
|
||||
---------------------------------
|
||||
|
||||
If you don't specify a configuration file then Akka is using default values. If you want to override these then you should edit the 'akka.conf' file in the 'AKKA_HOME/config' directory. This config inherits from the 'akka-reference.conf' file that you see below, use your 'akka.conf' to override any property in the reference config.
|
||||
|
||||
The config can be specified in a various of ways:
|
||||
|
||||
* Define the '-Dakka.config=...' system property option.
|
||||
* Put the 'akka.conf' file on the classpath.
|
||||
* Define 'AKKA_HOME' environment variable pointing to the root of the Akka distribution, in which the config is taken from the 'AKKA_HOME/config' directory, you can also point to the AKKA_HOME by specifying the '-Dakka.home=...' system property option.
|
||||
|
||||
Defining the configuration file
|
||||
-------------------------------
|
||||
|
||||
`<code format="ruby">`_
|
||||
####################
|
||||
# Akka Config File #
|
||||
####################
|
||||
|
||||
# This file has all the default settings, so all these could be removed with no visible effect.
|
||||
# Modify as needed.
|
||||
|
||||
akka {
|
||||
version = "1.1-SNAPSHOT" # Akka version, checked against the runtime version of Akka.
|
||||
|
||||
enabled-modules = [] # Comma separated list of the enabled modules. Options: ["remote", "camel", "http"]
|
||||
|
||||
time-unit = "seconds" # Time unit for all timeout properties throughout the config
|
||||
|
||||
event-handlers = ["akka.event.EventHandler$DefaultListener"] # event handlers to register at boot time (EventHandler$DefaultListener logs to STDOUT)
|
||||
event-handler-level = "DEBUG" # Options: ERROR, WARNING, INFO, DEBUG
|
||||
|
||||
# These boot classes are loaded (and created) automatically when the Akka Microkernel boots up
|
||||
# Can be used to bootstrap your application(s)
|
||||
# Should be the FQN (Fully Qualified Name) of the boot class which needs to have a default constructor
|
||||
# boot = ["sample.camel.Boot",
|
||||
# "sample.rest.java.Boot",
|
||||
# "sample.rest.scala.Boot",
|
||||
# "sample.security.Boot"]
|
||||
boot = []
|
||||
|
||||
actor {
|
||||
timeout = 5 # Default timeout for Future based invocations
|
||||
# - Actor: !! && !!!
|
||||
# - UntypedActor: sendRequestReply && sendRequestReplyFuture
|
||||
# - TypedActor: methods with non-void return type
|
||||
serialize-messages = off # Does a deep clone of (non-primitive) messages to ensure immutability
|
||||
throughput = 5 # Default throughput for all ExecutorBasedEventDrivenDispatcher, set to 1 for complete fairness
|
||||
throughput-deadline-time = -1 # Default throughput deadline for all ExecutorBasedEventDrivenDispatcher, set to 0 or negative for no deadline
|
||||
dispatcher-shutdown-timeout = 1 # Using the akka.time-unit, how long dispatchers by default will wait for new actors until they shut down
|
||||
|
||||
default-dispatcher {
|
||||
type = "GlobalExecutorBasedEventDriven" # Must be one of the following, all "Global*" are non-configurable
|
||||
# - ExecutorBasedEventDriven
|
||||
# - ExecutorBasedEventDrivenWorkStealing
|
||||
# - GlobalExecutorBasedEventDriven
|
||||
keep-alive-time = 60 # Keep alive time for threads
|
||||
core-pool-size-factor = 1.0 # No of core threads ... ceil(available processors * factor)
|
||||
max-pool-size-factor = 4.0 # Max no of threads ... ceil(available processors * factor)
|
||||
executor-bounds = -1 # Makes the Executor bounded, -1 is unbounded
|
||||
allow-core-timeout = on # Allow core threads to time out
|
||||
rejection-policy = "caller-runs" # abort, caller-runs, discard-oldest, discard
|
||||
throughput = 5 # Throughput for ExecutorBasedEventDrivenDispatcher, set to 1 for complete fairness
|
||||
throughput-deadline-time = -1 # Throughput deadline for ExecutorBasedEventDrivenDispatcher, set to 0 or negative for no deadline
|
||||
mailbox-capacity = -1 # If negative (or zero) then an unbounded mailbox is used (default)
|
||||
# If positive then a bounded mailbox is used and the capacity is set using the property
|
||||
# NOTE: setting a mailbox to 'blocking' can be a bit dangerous,
|
||||
# could lead to deadlock, use with care
|
||||
#
|
||||
# The following are only used for ExecutorBasedEventDriven
|
||||
# and only if mailbox-capacity > 0
|
||||
mailbox-push-timeout-time = 10 # Specifies the timeout to add a new message to a mailbox that is full - negative number means infinite timeout
|
||||
# (in unit defined by the time-unit property)
|
||||
}
|
||||
}
|
||||
|
||||
stm {
|
||||
fair = on # Should global transactions be fair or non-fair (non fair yield better performance)
|
||||
max-retries = 1000
|
||||
timeout = 5 # Default timeout for blocking transactions and transaction set (in unit defined by
|
||||
# the time-unit property)
|
||||
write-skew = true
|
||||
blocking-allowed = false
|
||||
interruptible = false
|
||||
speculative = true
|
||||
quick-release = true
|
||||
propagation = "requires"
|
||||
trace-level = "none"
|
||||
}
|
||||
|
||||
jta {
|
||||
provider = "from-jndi" # Options: - "from-jndi" (means that Akka will try to detect a TransactionManager in the JNDI)
|
||||
# - "atomikos" (means that Akka will use the Atomikos based JTA impl in 'akka-jta',
|
||||
# e.g. you need the akka-jta JARs on classpath).
|
||||
timeout = 60
|
||||
}
|
||||
|
||||
http {
|
||||
hostname = "localhost"
|
||||
port = 9998
|
||||
|
||||
#If you are using akka.http.AkkaRestServlet
|
||||
filters = ["se.scalablesolutions.akka.security.AkkaSecurityFilterFactory"] # List with all jersey filters to use
|
||||
# resource-packages = ["sample.rest.scala",
|
||||
# "sample.rest.java",
|
||||
# "sample.security"] # List with all resource packages for your Jersey services
|
||||
resource-packages = []
|
||||
|
||||
# The authentication service to use. Need to be overridden (sample now)
|
||||
# authenticator = "sample.security.BasicAuthenticationService"
|
||||
authenticator = "N/A"
|
||||
|
||||
# Uncomment if you are using the KerberosAuthenticationActor
|
||||
# kerberos {
|
||||
# servicePrincipal = "HTTP/localhost@EXAMPLE.COM"
|
||||
# keyTabLocation = "URL to keytab"
|
||||
# kerberosDebug = "true"
|
||||
# realm = "EXAMPLE.COM"
|
||||
# }
|
||||
kerberos {
|
||||
servicePrincipal = "N/A"
|
||||
keyTabLocation = "N/A"
|
||||
kerberosDebug = "N/A"
|
||||
realm = ""
|
||||
}
|
||||
|
||||
#If you are using akka.http.AkkaMistServlet
|
||||
mist-dispatcher {
|
||||
#type = "GlobalExecutorBasedEventDriven" # Uncomment if you want to use a different dispatcher than the default one for Comet
|
||||
}
|
||||
connection-close = true # toggles the addition of the "Connection" response header with a "close" value
|
||||
root-actor-id = "_httproot" # the id of the actor to use as the root endpoint
|
||||
root-actor-builtin = true # toggles the use of the built-in root endpoint base class
|
||||
timeout = 1000 # the default timeout for all async requests (in ms)
|
||||
expired-header-name = "Async-Timeout" # the name of the response header to use when an async request expires
|
||||
expired-header-value = "expired" # the value of the response header to use when an async request expires
|
||||
}
|
||||
|
||||
remote {
|
||||
|
||||
# secure-cookie = "050E0A0D0D06010A00000900040D060F0C09060B" # generate your own with '$AKKA_HOME/scripts/generate_secure_cookie.sh' or using 'Crypt.generateSecureCookie'
|
||||
secure-cookie = ""
|
||||
|
||||
compression-scheme = "zlib" # Options: "zlib" (lzf to come), leave out for no compression
|
||||
zlib-compression-level = 6 # Options: 0-9 (1 being fastest and 9 being the most compressed), default is 6
|
||||
|
||||
layer = "akka.remote.netty.NettyRemoteSupport"
|
||||
|
||||
server {
|
||||
hostname = "localhost" # The hostname or IP that clients should connect to
|
||||
port = 2552 # The port clients should connect to. Default is 2552 (AKKA)
|
||||
message-frame-size = 1048576 # Increase this if you want to be able to send messages with large payloads
|
||||
connection-timeout = 1
|
||||
require-cookie = off # Should the remote server require that it peers share the same secure-cookie (defined in the 'remote' section)?
|
||||
untrusted-mode = off # Enable untrusted mode for full security of server managed actors, allows untrusted clients to connect.
|
||||
backlog = 4096 # Sets the size of the connection backlog
|
||||
execution-pool-keepalive = 60# Length in akka.time-unit how long core threads will be kept alive if idling
|
||||
execution-pool-size = 16# Size of the core pool of the remote execution unit
|
||||
max-channel-memory-size = 0 # Maximum channel size, 0 for off
|
||||
max-total-memory-size = 0 # Maximum total size of all channels, 0 for off
|
||||
}
|
||||
|
||||
client {
|
||||
buffering {
|
||||
retry-message-send-on-failure = on
|
||||
capacity = -1 # If negative (or zero) then an unbounded mailbox is used (default)
|
||||
# If positive then a bounded mailbox is used and the capacity is set using the property
|
||||
}
|
||||
reconnect-delay = 5
|
||||
read-timeout = 10
|
||||
message-frame-size = 1048576
|
||||
reap-futures-delay = 5
|
||||
reconnection-time-window = 600 # Maximum time window that a client should try to reconnect for
|
||||
}
|
||||
}
|
||||
}
|
||||
`<code>`_
|
||||
Some files were not shown because too many files have changed in this diff Show more
Loading…
Add table
Add a link
Reference in a new issue