Merge remote-tracking branch 'origin/master' into wip-1581-patterns-ask

This commit is contained in:
Roland 2012-01-18 14:20:13 +01:00
commit 1daaee98aa
141 changed files with 4660 additions and 5579 deletions

2
.gitignore vendored
View file

@ -61,3 +61,5 @@ akka.sublime-workspace
.target
.multi-jvm
_mb
schoir.props
worker*.log

View file

@ -0,0 +1,64 @@
/**
* Copyright (C) 2009-2012 Typesafe Inc. <http://www.typesafe.com>
*/
package akka.actor
import java.io.File
import com.typesafe.config.Config
import com.typesafe.config.ConfigFactory
import com.typesafe.config.ConfigParseOptions
import com.typesafe.config.ConfigResolveOptions
@deprecated("use ActorSystem instead", "2.0")
object GlobalActorSystem extends ActorSystemImpl("GlobalSystem", OldConfigurationLoader.defaultConfig) {
start()
}
/**
* Loads configuration (akka.conf) from same location as Akka 1.x
*/
@deprecated("use default config location or write your own configuration loader", "2.0")
object OldConfigurationLoader {
val defaultConfig: Config = {
val cfg = fromProperties orElse fromClasspath orElse fromHome getOrElse emptyConfig
val config = cfg.withFallback(ConfigFactory.defaultReference)
config.checkValid(ConfigFactory.defaultReference, "akka")
config
}
// file extensions (.conf, .json, .properties), are handled by parseFileAnySyntax
val defaultLocation: String = (systemMode orElse envMode).map("akka." + _).getOrElse("akka")
private def envMode = System.getenv("AKKA_MODE") match {
case null | "" None
case value Some(value)
}
private def systemMode = System.getProperty("akka.mode") match {
case null | "" None
case value Some(value)
}
private def configParseOptions = ConfigParseOptions.defaults.setAllowMissing(false)
private def fromProperties = try {
val property = Option(System.getProperty("akka.config"))
property.map(p
ConfigFactory.systemProperties.withFallback(
ConfigFactory.parseFileAnySyntax(new File(p), configParseOptions)))
} catch { case _ None }
private def fromClasspath = try {
Option(ConfigFactory.systemProperties.withFallback(
ConfigFactory.parseResourcesAnySyntax(ActorSystem.getClass, "/" + defaultLocation, configParseOptions)))
} catch { case _ None }
private def fromHome = try {
Option(ConfigFactory.systemProperties.withFallback(
ConfigFactory.parseFileAnySyntax(new File(ActorSystem.GlobalHome.get + "/config/" + defaultLocation), configParseOptions)))
} catch { case _ None }
private def emptyConfig = ConfigFactory.systemProperties
}

View file

@ -0,0 +1,171 @@
/**
* Copyright (C) 2009-2012 Typesafe Inc. <http://www.typesafe.com>
*/
package akka.actor
import akka.japi.Creator
import akka.util.Timeout
import akka.dispatch.Future
import akka.dispatch.OldFuture
import akka.util.Duration
import java.util.concurrent.TimeUnit
import java.net.InetSocketAddress
/**
* Migration replacement for `object akka.actor.Actor`.
*/
@deprecated("use ActorRefFactory (ActorSystem or ActorContext) to create actors", "2.0")
object OldActor {
/**
* Creates an ActorRef out of the Actor with type T.
* It will be automatically started, i.e. remove old call to `start()`.
*
*/
@deprecated("use ActorRefFactory (ActorSystem or ActorContext) to create actors", "2.0")
def actorOf[T <: Actor: Manifest]: ActorRef = actorOf(manifest[T].erasure.asInstanceOf[Class[_ <: Actor]])
/**
* Creates an ActorRef out of the Actor of the specified Class.
* It will be automatically started, i.e. remove old call to `start()`.
*/
@deprecated("use ActorRefFactory (ActorSystem or ActorContext) to create actors", "2.0")
def actorOf(clazz: Class[_ <: Actor]): ActorRef = GlobalActorSystem.actorOf(Props(clazz))
/**
* Creates an ActorRef out of the Actor. Allows you to pass in a factory function
* that creates the Actor. Please note that this function can be invoked multiple
* times if for example the Actor is supervised and needs to be restarted.
*
* It will be automatically started, i.e. remove old call to `start()`.
*/
@deprecated("use ActorRefFactory (ActorSystem or ActorContext) to create actors", "2.0")
def actorOf(factory: Actor): ActorRef = GlobalActorSystem.actorOf(Props(factory))
/**
* Creates an ActorRef out of the Actor. Allows you to pass in a factory (Creator<Actor>)
* that creates the Actor. Please note that this function can be invoked multiple
* times if for example the Actor is supervised and needs to be restarted.
* <p/>
* JAVA API
*/
@deprecated("use ActorRefFactory (ActorSystem or ActorContext) to create actors", "2.0")
def actorOf(creator: Creator[Actor]): ActorRef = GlobalActorSystem.actorOf(Props(creator))
@deprecated("OldActor.remote should not be used", "2.0")
lazy val remote: OldRemoteSupport = new OldRemoteSupport
}
@deprecated("use Actor", "2.0")
abstract class OldActor extends Actor {
implicit def askTimeout: Timeout = akka.migration.askTimeout
implicit def future2OldFuture[T](future: Future[T]): OldFuture[T] = akka.migration.future2OldFuture(future)
implicit def actorRef2OldActorRef(actorRef: ActorRef) = new OldActorRef(actorRef)
@deprecated("Use context.become instead", "2.0")
def become(behavior: Receive, discardOld: Boolean = true) = context.become(behavior, discardOld)
@deprecated("Use context.unbecome instead", "2.0")
def unbecome() = context.unbecome()
class OldActorRef(actorRef: ActorRef) {
@deprecated("Actors are automatically started when creatd, i.e. remove old call to start()", "2.0")
def start(): ActorRef = actorRef
@deprecated("Stop with ActorSystem or ActorContext instead", "2.0")
def exit() = stop()
@deprecated("Stop with ActorSystem or ActorContext instead", "2.0")
def stop(): Unit = context.stop(actorRef)
@deprecated("Use context.getReceiveTimeout instead", "2.0")
def getReceiveTimeout(): Option[Long] = context.receiveTimeout.map(_.toMillis)
@deprecated("Use context.setReceiveTimeout instead", "2.0")
def setReceiveTimeout(timeout: Long) = context.setReceiveTimeout(Duration(timeout, TimeUnit.MILLISECONDS))
@deprecated("Use context.getReceiveTimeout instead", "2.0")
def receiveTimeout: Option[Long] = getReceiveTimeout()
@deprecated("Use context.setReceiveTimeout instead", "2.0")
def receiveTimeout_=(timeout: Option[Long]) = setReceiveTimeout(timeout.getOrElse(0L))
@deprecated("Use self.isTerminated instead", "2.0")
def isShutdown: Boolean = self.isTerminated
@deprecated("Use sender instead", "2.0")
def channel() = context.sender
@deprecated("Use sender instead", "2.0")
def sender() = Some(context.sender)
@deprecated("Use sender ! instead", "2.0")
def reply(message: Any) = context.sender.!(message, context.self)
@deprecated("Use sender ! instead", "2.0")
def tryReply(message: Any): Boolean = {
reply(message)
true
}
@deprecated("Use sender ! instead", "2.0")
def tryTell(message: Any)(implicit sender: ActorRef = context.self): Boolean = {
actorRef.!(message)(sender)
true
}
@deprecated("Use sender ! akka.actor.Status.Failure(e) instead", "2.0")
def sendException(ex: Throwable): Boolean = {
context.sender.!(akka.actor.Status.Failure(ex), context.self)
true
}
}
}
class OldRemoteSupport {
@deprecated("remote.start is not needed", "2.0")
def start() {}
@deprecated("remote.start is not needed, use configuration to specify RemoteActorRefProvider, host and port", "2.0")
def start(host: String, port: Int) {}
@deprecated("remote.start is not needed, use configuration to specify RemoteActorRefProvider, host and port", "2.0")
def start(host: String, port: Int, loader: ClassLoader) {}
@deprecated("remote.shutdown is not needed", "2.0")
def shutdown() {}
@deprecated("use actorFor in ActorRefProvider (ActorSystem or ActorContext) instead", "2.0")
def actorFor(classNameOrServiceId: String, hostname: String, port: Int): ActorRef =
GlobalActorSystem.actorFor("akka://%s@%s:%s/user/%s".format(GlobalActorSystem.name, hostname, port, classNameOrServiceId))
@deprecated("use actorFor in ActorRefProvider (ActorSystem or ActorContext) instead", "2.0")
def actorFor(classNameOrServiceId: String, hostname: String, port: Int, loader: ClassLoader): ActorRef =
actorFor(classNameOrServiceId, hostname, port)
@deprecated("use actorFor in ActorRefProvider (ActorSystem or ActorContext) instead", "2.0")
def actorFor(serviceId: String, className: String, hostname: String, port: Int): ActorRef =
actorFor(serviceId, hostname, port)
@deprecated("use actorFor in ActorRefProvider (ActorSystem or ActorContext) instead", "2.0")
def actorFor(serviceId: String, className: String, hostname: String, port: Int, loader: ClassLoader): ActorRef =
actorFor(serviceId, hostname, port)
@deprecated("use actorFor in ActorRefProvider (ActorSystem or ActorContext) instead", "2.0")
def actorFor(classNameOrServiceId: String, timeout: Long, hostname: String, port: Int): ActorRef =
actorFor(classNameOrServiceId, hostname, port)
@deprecated("use actorFor in ActorRefProvider (ActorSystem or ActorContext) instead", "2.0")
def actorFor(classNameOrServiceId: String, timeout: Long, hostname: String, port: Int, loader: ClassLoader): ActorRef =
actorFor(classNameOrServiceId, hostname, port)
@deprecated("use actorFor in ActorRefProvider (ActorSystem or ActorContext) instead", "2.0")
def actorFor(serviceId: String, className: String, timeout: Long, hostname: String, port: Int): ActorRef =
actorFor(serviceId, hostname, port)
}

View file

@ -0,0 +1,75 @@
/**
* Copyright (C) 2009-2012 Typesafe Inc. <http://www.typesafe.com>
*/
package akka.actor
import java.util.concurrent.TimeUnit
import akka.util.Duration
/**
* Migration replacement for `object akka.actor.Scheduler`.
*/
@deprecated("use ActorSystem.scheduler instead", "2.0")
object OldScheduler {
/**
* Schedules to send the specified message to the receiver after initialDelay and then repeated after delay
*/
@deprecated("use ActorSystem.scheduler instead", "2.0")
def schedule(receiver: ActorRef, message: Any, initialDelay: Long, delay: Long, timeUnit: TimeUnit): Cancellable =
GlobalActorSystem.scheduler.schedule(
Duration(initialDelay, timeUnit),
Duration(delay, timeUnit),
receiver,
message)
/**
* Schedules to run specified function to the receiver after initialDelay and then repeated after delay
*/
@deprecated("use ActorSystem.scheduler instead", "2.0")
def schedule(f: () Unit, initialDelay: Long, delay: Long, timeUnit: TimeUnit): Cancellable =
GlobalActorSystem.scheduler.schedule(
Duration(initialDelay, timeUnit),
Duration(delay, timeUnit),
new Runnable { def run = f() })
/**
* Schedules to run specified runnable to the receiver after initialDelay and then repeated after delay.
*/
@deprecated("use ActorSystem.scheduler instead", "2.0")
def schedule(runnable: Runnable, initialDelay: Long, delay: Long, timeUnit: TimeUnit): Cancellable =
GlobalActorSystem.scheduler.schedule(
Duration(initialDelay, timeUnit),
Duration(delay, timeUnit),
runnable)
/**
* Schedules to send the specified message to the receiver after delay
*/
@deprecated("use ActorSystem.scheduler instead", "2.0")
def scheduleOnce(receiver: ActorRef, message: Any, delay: Long, timeUnit: TimeUnit): Cancellable =
GlobalActorSystem.scheduler.scheduleOnce(
Duration(delay, timeUnit),
receiver,
message)
/**
* Schedules a function to be run after delay.
*/
@deprecated("use ActorSystem.scheduler instead", "2.0")
def scheduleOnce(f: () Unit, delay: Long, timeUnit: TimeUnit): Cancellable =
GlobalActorSystem.scheduler.scheduleOnce(
Duration(delay, timeUnit),
new Runnable { def run = f() })
/**
* Schedules a runnable to be run after delay,
*/
@deprecated("use ActorSystem.scheduler instead", "2.0")
def scheduleOnce(runnable: Runnable, delay: Long, timeUnit: TimeUnit): Cancellable =
GlobalActorSystem.scheduler.scheduleOnce(
Duration(delay, timeUnit),
runnable)
}

View file

@ -0,0 +1,162 @@
/**
* Copyright (C) 2009-2012 Typesafe Inc. <http://www.typesafe.com>
*/
package akka.config
import akka.actor.GlobalActorSystem
import com.typesafe.config.Config
/**
* Migration replacement for `object akka.config.Config`.
*/
@deprecated("use ActorSystem.settings.config instead", "2.0")
object OldConfig {
val config = new OldConfiguration(GlobalActorSystem.settings.config)
}
/**
* Migration adapter for `akka.config.Configuration`
*/
@deprecated("use ActorSystem.settings.config (com.typesafe.config.Config) instead", "2.0")
class OldConfiguration(config: Config) {
import scala.collection.JavaConverters._
@deprecated("use new com.typesafe.config.Config API instead", "2.0")
def contains(key: String): Boolean = config.hasPath(key)
@deprecated("use new com.typesafe.config.Config API instead", "2.0")
def keys: Iterable[String] = config.root.keySet.asScala
@deprecated("use new com.typesafe.config.Config API instead", "2.0")
def getAny(key: String): Option[Any] = {
try {
Option(config.getAnyRef(key))
} catch {
case _ None
}
}
@deprecated("use new com.typesafe.config.Config API instead", "2.0")
def getAny(key: String, defaultValue: Any): Any = getAny(key).getOrElse(defaultValue)
@deprecated("use new com.typesafe.config.Config API instead", "2.0")
def getSeqAny(key: String): Seq[Any] = {
try {
config.getAnyRefList(key).asScala
} catch {
case _ Seq.empty[Any]
}
}
@deprecated("use new com.typesafe.config.Config API instead", "2.0")
def getString(key: String): Option[String] =
try {
Option(config.getString(key))
} catch {
case _ None
}
@deprecated("use new com.typesafe.config.Config API instead", "2.0")
def getString(key: String, defaultValue: String): String = getString(key).getOrElse(defaultValue)
@deprecated("use new com.typesafe.config.Config API instead", "2.0")
def getList(key: String): Seq[String] = {
try {
config.getStringList(key).asScala
} catch {
case _ Seq.empty[String]
}
}
@deprecated("use new com.typesafe.config.Config API instead", "2.0")
def getInt(key: String): Option[Int] = {
try {
Option(config.getInt(key))
} catch {
case _ None
}
}
@deprecated("use new com.typesafe.config.Config API instead", "2.0")
def getInt(key: String, defaultValue: Int): Int = getInt(key).getOrElse(defaultValue)
@deprecated("use new com.typesafe.config.Config API instead", "2.0")
def getLong(key: String): Option[Long] = {
try {
Option(config.getLong(key))
} catch {
case _ None
}
}
@deprecated("use new com.typesafe.config.Config API instead", "2.0")
def getLong(key: String, defaultValue: Long): Long = getLong(key).getOrElse(defaultValue)
@deprecated("use new com.typesafe.config.Config API instead", "2.0")
def getFloat(key: String): Option[Float] = {
try {
Option(config.getDouble(key).toFloat)
} catch {
case _ None
}
}
@deprecated("use new com.typesafe.config.Config API instead", "2.0")
def getFloat(key: String, defaultValue: Float): Float = getFloat(key).getOrElse(defaultValue)
@deprecated("use new com.typesafe.config.Config API instead", "2.0")
def getDouble(key: String): Option[Double] = {
try {
Option(config.getDouble(key))
} catch {
case _ None
}
}
@deprecated("use new com.typesafe.config.Config API instead", "2.0")
def getDouble(key: String, defaultValue: Double): Double = getDouble(key).getOrElse(defaultValue)
@deprecated("use new com.typesafe.config.Config API instead", "2.0")
def getBoolean(key: String): Option[Boolean] = {
try {
Option(config.getBoolean(key))
} catch {
case _ None
}
}
@deprecated("use new com.typesafe.config.Config API instead", "2.0")
def getBoolean(key: String, defaultValue: Boolean): Boolean = getBoolean(key).getOrElse(defaultValue)
@deprecated("use new com.typesafe.config.Config API instead", "2.0")
def getBool(key: String): Option[Boolean] = getBoolean(key)
@deprecated("use new com.typesafe.config.Config API instead", "2.0")
def getBool(key: String, defaultValue: Boolean): Boolean = getBoolean(key, defaultValue)
@deprecated("use new com.typesafe.config.Config API instead", "2.0")
def apply(key: String): String = getString(key) match {
case None throw new ConfigurationException("undefined config: " + key)
case Some(v) v
}
@deprecated("use new com.typesafe.config.Config API instead", "2.0")
def apply(key: String, defaultValue: String) = getString(key, defaultValue)
@deprecated("use new com.typesafe.config.Config API instead", "2.0")
def apply(key: String, defaultValue: Int) = getInt(key, defaultValue)
@deprecated("use new com.typesafe.config.Config API instead", "2.0")
def apply(key: String, defaultValue: Long) = getLong(key, defaultValue)
@deprecated("use new com.typesafe.config.Config API instead", "2.0")
def apply(key: String, defaultValue: Boolean) = getBool(key, defaultValue)
@deprecated("use new com.typesafe.config.Config API instead", "2.0")
def getSection(name: String): Option[OldConfiguration] = {
try {
Option(new OldConfiguration(config.getConfig(name)))
} catch {
case _ None
}
}
}

View file

@ -0,0 +1,65 @@
/**
* Copyright (C) 2009-2012 Typesafe Inc. <http://www.typesafe.com>
*/
package akka.dispatch
import java.util.concurrent.TimeoutException
import akka.util.duration._
import akka.AkkaException
import akka.util.BoxedType
import akka.util.Duration
import akka.actor.GlobalActorSystem
/**
* Some old methods made available through implicit conversion in
* [[akka.migration]].
*/
@deprecated("use new Future api instead", "2.0")
class OldFuture[T](future: Future[T]) {
@deprecated("use akka.dispatch.Await.result instead", "2.0")
def get: T = try {
Await.result(future, GlobalActorSystem.settings.ActorTimeout.duration)
} catch {
case e: TimeoutException throw new FutureTimeoutException(e.getMessage, e)
}
@deprecated("use akka.dispatch.Await.ready instead", "2.0")
def await: Future[T] = await(GlobalActorSystem.settings.ActorTimeout.duration)
@deprecated("use akka.dispatch.Await.ready instead", "2.0")
def await(atMost: Duration) = try {
Await.ready(future, atMost)
future
} catch {
case e: TimeoutException throw new FutureTimeoutException(e.getMessage, e)
}
@deprecated("use new Future api instead", "2.0")
def as[A](implicit m: Manifest[A]): Option[A] = {
try await catch { case _: FutureTimeoutException }
future.value match {
case None None
case Some(Left(ex)) throw ex
case Some(Right(v)) Some(BoxedType(m.erasure).cast(v).asInstanceOf[A])
}
}
@deprecated("use new Future api instead", "2.0")
def asSilently[A](implicit m: Manifest[A]): Option[A] = {
try await catch { case _: FutureTimeoutException }
future.value match {
case None None
case Some(Left(ex)) throw ex
case Some(Right(v))
try Some(BoxedType(m.erasure).cast(v).asInstanceOf[A])
catch { case _: ClassCastException None }
}
}
}
@deprecated("Await throws java.util.concurrent.TimeoutException", "2.0")
class FutureTimeoutException(message: String, cause: Throwable = null) extends AkkaException(message, cause) {
def this(message: String) = this(message, null)
}

View file

@ -0,0 +1,81 @@
/**
* Copyright (C) 2009-2012 Typesafe Inc. <http://www.typesafe.com>
*/
package akka.event
import akka.actor.GlobalActorSystem
/**
* Migration replacement for `akka.event.EventHandler`
*/
@deprecated("use Logging instead", "2.0")
object OldEventHandler {
@deprecated("use Logging instead", "2.0")
def error(cause: Throwable, instance: AnyRef, message: String) {
val log = Logging.getLogger(GlobalActorSystem, instance)
if (log.isErrorEnabled) log.error(cause, message)
}
@deprecated("use Logging instead", "2.0")
def error(cause: Throwable, instance: AnyRef, message: Any) {
val log = Logging.getLogger(GlobalActorSystem, instance)
if (log.isErrorEnabled) log.error(cause, message.toString)
}
@deprecated("use Logging instead", "2.0")
def error(instance: AnyRef, message: String) {
val log = Logging.getLogger(GlobalActorSystem, instance)
if (log.isErrorEnabled) log.error(message.toString)
}
@deprecated("use Logging instead", "2.0")
def error(instance: AnyRef, message: Any) {
val log = Logging.getLogger(GlobalActorSystem, instance)
if (log.isErrorEnabled) log.error(message.toString)
}
@deprecated("use Logging instead", "2.0")
def warning(instance: AnyRef, message: String) {
val log = Logging.getLogger(GlobalActorSystem, instance)
if (log.isWarningEnabled) log.warning(message)
}
@deprecated("use Logging instead", "2.0")
def warning(instance: AnyRef, message: Any) {
val log = Logging.getLogger(GlobalActorSystem, instance)
if (log.isWarningEnabled) log.warning(message.toString)
}
@deprecated("use Logging instead", "2.0")
def info(instance: AnyRef, message: String) {
val log = Logging.getLogger(GlobalActorSystem, instance)
if (log.isInfoEnabled) log.info(message)
}
@deprecated("use Logging instead", "2.0")
def info(instance: AnyRef, message: Any) {
val log = Logging.getLogger(GlobalActorSystem, instance)
if (log.isInfoEnabled) log.info(message.toString)
}
@deprecated("use Logging instead", "2.0")
def debug(instance: AnyRef, message: String) {
val log = Logging.getLogger(GlobalActorSystem, instance)
if (log.isDebugEnabled) log.debug(message)
}
@deprecated("use Logging instead", "2.0")
def debug(instance: AnyRef, message: Any) {
val log = Logging.getLogger(GlobalActorSystem, instance)
if (log.isDebugEnabled) log.debug(message.toString)
}
@deprecated("use Logging instead", "2.0")
def isInfoEnabled = Logging.getLogger(GlobalActorSystem, this).isInfoEnabled
@deprecated("use Logging instead", "2.0")
def isDebugEnabled = Logging.getLogger(GlobalActorSystem, this).isDebugEnabled
}

View file

@ -0,0 +1,34 @@
/**
* Copyright (C) 2009-2012 Typesafe Inc. <http://www.typesafe.com>
*/
package akka
import akka.dispatch.Future
import akka.dispatch.OldFuture
import akka.util.Timeout
import akka.actor.GlobalActorSystem
import akka.dispatch.MessageDispatcher
import akka.actor.ActorRef
package object migration {
implicit def future2OldFuture[T](future: Future[T]): OldFuture[T] = new OldFuture[T](future)
implicit def askTimeout: Timeout = GlobalActorSystem.settings.ActorTimeout
implicit def defaultDispatcher: MessageDispatcher = GlobalActorSystem.dispatcher
implicit def actorRef2OldActorRef(actorRef: ActorRef) = new OldActorRef(actorRef)
class OldActorRef(actorRef: ActorRef) {
@deprecated("Actors are automatically started when creatd, i.e. remove old call to start()", "2.0")
def start(): ActorRef = actorRef
@deprecated("Stop with ActorSystem or ActorContext instead", "2.0")
def exit() = stop()
@deprecated("Stop with ActorSystem or ActorContext instead", "2.0")
def stop(): Unit = GlobalActorSystem.stop(actorRef)
}
}

View file

@ -15,12 +15,6 @@ import akka.pattern.{ ask, AskTimeoutException }
@org.junit.runner.RunWith(classOf[org.scalatest.junit.JUnitRunner])
class ActorTimeoutSpec extends AkkaSpec with BeforeAndAfterAll with DefaultTimeout {
def actorWithTimeout(t: Timeout): ActorRef = system.actorOf(Props(creator = () new Actor {
def receive = {
case x
}
}, timeout = t))
val defaultTimeout = system.settings.ActorTimeout.duration
val testTimeout = if (system.settings.ActorTimeout.duration < 400.millis) 500 millis else 100 millis
@ -28,7 +22,7 @@ class ActorTimeoutSpec extends AkkaSpec with BeforeAndAfterAll with DefaultTimeo
"use the global default timeout if no implicit in scope" in {
within(defaultTimeout - 100.millis, defaultTimeout + 400.millis) {
val echo = actorWithTimeout(Timeout(12))
val echo = system.actorOf(Props.empty)
try {
val d = system.settings.ActorTimeout.duration
val f = echo ? "hallo"
@ -40,7 +34,7 @@ class ActorTimeoutSpec extends AkkaSpec with BeforeAndAfterAll with DefaultTimeo
"use implicitly supplied timeout" in {
implicit val timeout = Timeout(testTimeout)
within(testTimeout - 100.millis, testTimeout + 300.millis) {
val echo = actorWithTimeout(Props.defaultTimeout)
val echo = system.actorOf(Props.empty)
try {
val f = (echo ? "hallo").mapTo[String]
intercept[AskTimeoutException] { Await.result(f, testTimeout + testTimeout) }
@ -50,7 +44,7 @@ class ActorTimeoutSpec extends AkkaSpec with BeforeAndAfterAll with DefaultTimeo
"use explicitly supplied timeout" in {
within(testTimeout - 100.millis, testTimeout + 300.millis) {
val echo = actorWithTimeout(Props.defaultTimeout)
val echo = system.actorOf(Props.empty)
val f = echo.?("hallo", testTimeout)
try {
intercept[AskTimeoutException] { Await.result(f, testTimeout + 300.millis) }

View file

@ -34,19 +34,26 @@ trait DeathWatchSpec { this: AkkaSpec with ImplicitSender with DefaultTimeout
}
"notify with one Terminated message when an Actor is stopped" in {
val terminal = system.actorOf(Props(context { case _ }))
startWatching(terminal)
testActor ! "ping"
expectMsg("ping")
val terminal = system.actorOf(Props.empty)
startWatching(terminal) ! "hallo"
expectMsg("hallo") // this ensures that the DaemonMsgWatch has been received before we send the PoisonPill
terminal ! PoisonPill
expectTerminationOf(terminal)
}
"notify with one Terminated message when an Actor is already dead" in {
val terminal = system.actorOf(Props.empty)
terminal ! PoisonPill
startWatching(terminal)
expectTerminationOf(terminal)
}
"notify with all monitors with one Terminated message when an Actor is stopped" in {
val terminal = system.actorOf(Props(context { case _ }))
val terminal = system.actorOf(Props.empty)
val monitor1, monitor2, monitor3 = startWatching(terminal)
terminal ! PoisonPill
@ -61,7 +68,7 @@ trait DeathWatchSpec { this: AkkaSpec with ImplicitSender with DefaultTimeout
}
"notify with _current_ monitors with one Terminated message when an Actor is stopped" in {
val terminal = system.actorOf(Props(context { case _ }))
val terminal = system.actorOf(Props.empty)
val monitor1, monitor3 = startWatching(terminal)
val monitor2 = system.actorOf(Props(new Actor {
context.watch(terminal)

View file

@ -13,31 +13,33 @@ import akka.util.duration._
object DeployerSpec {
val deployerConf = ConfigFactory.parseString("""
akka.actor.deployment {
/user/service1 {
/service1 {
}
/user/service3 {
create-as {
class = "akka.actor.DeployerSpec$RecipeActor"
}
}
/user/service-direct {
/service-direct {
router = from-code
}
/user/service-direct2 {
/service-direct2 {
router = from-code
# nr-of-instances ignored when router = direct
nr-of-instances = 2
}
/user/service-round-robin {
/service-round-robin {
router = round-robin
}
/user/service-random {
/service-random {
router = random
}
/user/service-scatter-gather {
/service-scatter-gather {
router = scatter-gather
within = 2 seconds
}
/service-resizer {
router = round-robin
resizer {
lower-bound = 1
upper-bound = 10
}
}
}
""", ConfigParseOptions.defaults)
@ -53,7 +55,7 @@ class DeployerSpec extends AkkaSpec(DeployerSpec.deployerConf) {
"A Deployer" must {
"be able to parse 'akka.actor.deployment._' with all default values" in {
val service = "/user/service1"
val service = "/service1"
val deployment = system.asInstanceOf[ActorSystemImpl].provider.deployer.lookup(service)
deployment must be('defined)
@ -61,36 +63,21 @@ class DeployerSpec extends AkkaSpec(DeployerSpec.deployerConf) {
Deploy(
service,
deployment.get.config,
None,
NoRouter,
LocalScope)))
}
"use None deployment for undefined service" in {
val service = "/user/undefined"
val service = "/undefined"
val deployment = system.asInstanceOf[ActorSystemImpl].provider.deployer.lookup(service)
deployment must be(None)
}
"be able to parse 'akka.actor.deployment._' with recipe" in {
val service = "/user/service3"
val deployment = system.asInstanceOf[ActorSystemImpl].provider.deployer.lookup(service)
deployment must be('defined)
deployment must be(Some(
Deploy(
service,
deployment.get.config,
Some(ActorRecipe(classOf[DeployerSpec.RecipeActor])),
NoRouter,
LocalScope)))
}
"detect invalid number-of-instances" in {
intercept[com.typesafe.config.ConfigException.WrongType] {
val invalidDeployerConf = ConfigFactory.parseString("""
akka.actor.deployment {
/user/service-invalid-number-of-instances {
/service-invalid-number-of-instances {
router = round-robin
nr-of-instances = boom
}
@ -102,37 +89,37 @@ class DeployerSpec extends AkkaSpec(DeployerSpec.deployerConf) {
}
"be able to parse 'akka.actor.deployment._' with direct router" in {
assertRouting(NoRouter, "/user/service-direct")
assertRouting(NoRouter, "/service-direct")
}
"ignore nr-of-instances with direct router" in {
assertRouting(NoRouter, "/user/service-direct2")
assertRouting(NoRouter, "/service-direct2")
}
"be able to parse 'akka.actor.deployment._' with round-robin router" in {
assertRouting(RoundRobinRouter(1), "/user/service-round-robin")
assertRouting(RoundRobinRouter(1), "/service-round-robin")
}
"be able to parse 'akka.actor.deployment._' with random router" in {
assertRouting(RandomRouter(1), "/user/service-random")
assertRouting(RandomRouter(1), "/service-random")
}
"be able to parse 'akka.actor.deployment._' with scatter-gather router" in {
assertRouting(ScatterGatherFirstCompletedRouter(nrOfInstances = 1, within = 2 seconds), "/user/service-scatter-gather")
assertRouting(ScatterGatherFirstCompletedRouter(nrOfInstances = 1, within = 2 seconds), "/service-scatter-gather")
}
"be able to parse 'akka.actor.deployment._' with router resizer" in {
val resizer = DefaultResizer()
assertRouting(RoundRobinRouter(resizer = Some(resizer)), "/service-resizer")
}
def assertRouting(expected: RouterConfig, service: String) {
val deployment = system.asInstanceOf[ActorSystemImpl].provider.deployer.lookup(service)
deployment must be('defined)
deployment must be(Some(
Deploy(
service,
deployment.get.config,
None,
expected,
LocalScope)))
deployment.get.path must be(service)
deployment.get.routing.getClass must be(expected.getClass)
deployment.get.routing.resizer must be(expected.resizer)
deployment.get.scope must be(LocalScope)
}
}

View file

@ -171,7 +171,7 @@ class FSMActorSpec extends AkkaSpec(Map("akka.actor.debug.fsm" -> true)) with Im
system.eventStream.subscribe(testActor, classOf[Logging.Error])
fsm ! "go"
expectMsgPF(1 second, hint = "Next state 2 does not exist") {
case Logging.Error(_, `name`, "Next state 2 does not exist") true
case Logging.Error(_, `name`, _, "Next state 2 does not exist") true
}
system.eventStream.unsubscribe(testActor)
}
@ -218,18 +218,19 @@ class FSMActorSpec extends AkkaSpec(Map("akka.actor.debug.fsm" -> true)) with Im
}
})
val name = fsm.path.toString
val fsmClass = fsm.underlyingActor.getClass
system.eventStream.subscribe(testActor, classOf[Logging.Debug])
fsm ! "go"
expectMsgPF(1 second, hint = "processing Event(go,null)") {
case Logging.Debug(`name`, s: String) if s.startsWith("processing Event(go,null) from Actor[") true
case Logging.Debug(`name`, `fsmClass`, s: String) if s.startsWith("processing Event(go,null) from Actor[") true
}
expectMsg(1 second, Logging.Debug(name, "setting timer 't'/1500 milliseconds: Shutdown"))
expectMsg(1 second, Logging.Debug(name, "transition 1 -> 2"))
expectMsg(1 second, Logging.Debug(name, fsmClass, "setting timer 't'/1500 milliseconds: Shutdown"))
expectMsg(1 second, Logging.Debug(name, fsmClass, "transition 1 -> 2"))
fsm ! "stop"
expectMsgPF(1 second, hint = "processing Event(stop,null)") {
case Logging.Debug(`name`, s: String) if s.startsWith("processing Event(stop,null) from Actor[") true
case Logging.Debug(`name`, `fsmClass`, s: String) if s.startsWith("processing Event(stop,null) from Actor[") true
}
expectMsgAllOf(1 second, Logging.Debug(name, "canceling timer 't'"), Normal)
expectMsgAllOf(1 second, Logging.Debug(name, fsmClass, "canceling timer 't'"), Normal)
expectNoMsg(1 second)
system.eventStream.unsubscribe(testActor)
}

View file

@ -9,8 +9,21 @@ import akka.util.duration._
import akka.util.Timeout
import akka.dispatch.{ Await, Future }
object LocalActorRefProviderSpec {
val config = """
akka {
actor {
default-dispatcher {
core-pool-size-min = 8
core-pool-size-max = 16
}
}
}
"""
}
@org.junit.runner.RunWith(classOf[org.scalatest.junit.JUnitRunner])
class LocalActorRefProviderSpec extends AkkaSpec {
class LocalActorRefProviderSpec extends AkkaSpec(LocalActorRefProviderSpec.config) {
"An LocalActorRefProvider" must {
"find actor refs using actorFor" in {

View file

@ -1,14 +1,12 @@
package akka.actor
import org.scalatest.BeforeAndAfterEach
import akka.testkit.AkkaSpec
import akka.testkit.EventFilter
import akka.util.duration._
import java.util.concurrent.{ CountDownLatch, ConcurrentLinkedQueue, TimeUnit }
import akka.testkit.DefaultTimeout
import akka.testkit.TestLatch
import akka.testkit._
import akka.dispatch.Await
import akka.pattern.ask
import java.util.concurrent.atomic.AtomicInteger
@org.junit.runner.RunWith(classOf[org.scalatest.junit.JUnitRunner])
class SchedulerSpec extends AkkaSpec with BeforeAndAfterEach with DefaultTimeout {
@ -95,6 +93,36 @@ class SchedulerSpec extends AkkaSpec with BeforeAndAfterEach with DefaultTimeout
assert(ticks.await(3, TimeUnit.SECONDS) == false) //No counting down should've been made
}
"be cancellable during initial delay" in {
val ticks = new AtomicInteger
val initialDelay = 200.milliseconds.dilated
val delay = 10.milliseconds.dilated
val timeout = collectCancellable(system.scheduler.schedule(initialDelay, delay) {
ticks.incrementAndGet()
})
10.milliseconds.dilated.sleep()
timeout.cancel()
(initialDelay + 100.milliseconds.dilated).sleep()
ticks.get must be(0)
}
"be cancellable after initial delay" in {
val ticks = new AtomicInteger
val initialDelay = 20.milliseconds.dilated
val delay = 200.milliseconds.dilated
val timeout = collectCancellable(system.scheduler.schedule(initialDelay, delay) {
ticks.incrementAndGet()
})
(initialDelay + 100.milliseconds.dilated).sleep()
timeout.cancel()
(delay + 100.milliseconds.dilated).sleep()
ticks.get must be(1)
}
/**
* ticket #307
*/

View file

@ -73,7 +73,7 @@ class SupervisorSpec extends AkkaSpec with BeforeAndAfterEach with ImplicitSende
// Creating actors and supervisors
// =====================================================
private def child(supervisor: ActorRef, props: Props): ActorRef = Await.result((supervisor ? props).mapTo[ActorRef], props.timeout.duration)
private def child(supervisor: ActorRef, props: Props): ActorRef = Await.result((supervisor ? props).mapTo[ActorRef], timeout.duration)
def temporaryActorAllForOne = {
val supervisor = system.actorOf(Props[Supervisor].withFaultHandler(AllForOneStrategy(List(classOf[Exception]), Some(0))))

View file

@ -180,13 +180,14 @@ class TypedActorSpec extends AkkaSpec(TypedActorSpec.config)
def newFooBar: Foo = newFooBar(Duration(2, "s"))
def newFooBar(d: Duration): Foo =
newFooBar(Props().withTimeout(Timeout(d)))
TypedActor(system).typedActorOf(TypedProps[Bar](classOf[Foo], classOf[Bar]).withTimeout(Timeout(d)))
def newFooBar(props: Props): Foo =
TypedActor(system).typedActorOf(classOf[Foo], classOf[Bar], props)
def newFooBar(dispatcher: String, d: Duration): Foo =
TypedActor(system).typedActorOf(TypedProps[Bar](classOf[Foo], classOf[Bar]).withTimeout(Timeout(d)).withDispatcher(dispatcher))
def newStacked(props: Props = Props().withTimeout(Timeout(2000))): Stacked =
TypedActor(system).typedActorOf(classOf[Stacked], classOf[StackedImpl], props)
def newStacked(): Stacked =
TypedActor(system).typedActorOf(
TypedProps[StackedImpl](classOf[Stacked], classOf[StackedImpl]).withTimeout(Timeout(2000)))
def mustStop(typedActor: AnyRef) = TypedActor(system).stop(typedActor) must be(true)
@ -299,11 +300,11 @@ class TypedActorSpec extends AkkaSpec(TypedActorSpec.config)
"be able to handle exceptions when calling methods" in {
filterEvents(EventFilter[IllegalStateException]("expected")) {
val boss = system.actorOf(Props(context {
case p: Props context.sender ! TypedActor(context).typedActorOf(classOf[Foo], classOf[Bar], p)
case p: TypedProps[_] context.sender ! TypedActor(context).typedActorOf(p)
}).withFaultHandler(OneForOneStrategy {
case e: IllegalStateException if e.getMessage == "expected" FaultHandlingStrategy.Resume
}))
val t = Await.result((boss ? Props().withTimeout(2 seconds)).mapTo[Foo], timeout.duration)
val t = Await.result((boss ? TypedProps[Bar](classOf[Foo], classOf[Bar]).withTimeout(2 seconds)).mapTo[Foo], timeout.duration)
t.incr()
t.failingPigdog()
@ -331,7 +332,7 @@ class TypedActorSpec extends AkkaSpec(TypedActorSpec.config)
}
"be able to support implementation only typed actors" in {
val t = TypedActor(system).typedActorOf[Foo, Bar](Props())
val t: Foo = TypedActor(system).typedActorOf(TypedProps[Bar]())
val f = t.futurePigdog(200)
val f2 = t.futurePigdog(0)
f2.isCompleted must be(false)
@ -341,16 +342,14 @@ class TypedActorSpec extends AkkaSpec(TypedActorSpec.config)
}
"be able to support implementation only typed actors with complex interfaces" in {
val t = TypedActor(system).typedActorOf[Stackable1 with Stackable2, StackedImpl]()
val t: Stackable1 with Stackable2 = TypedActor(system).typedActorOf(TypedProps[StackedImpl]())
t.stackable1 must be("foo")
t.stackable2 must be("bar")
mustStop(t)
}
"be able to use balancing dispatcher" in {
val props = Props(timeout = Timeout(6600), dispatcher = "pooled-dispatcher")
val thais = for (i 1 to 60) yield newFooBar(props)
val thais = for (i 1 to 60) yield newFooBar("pooled-dispatcher", 6 seconds)
val iterator = new CyclicIterator(thais)
val results = for (i 1 to 120) yield (i, iterator.next.futurePigdog(200L, i))
@ -406,7 +405,7 @@ class TypedActorSpec extends AkkaSpec(TypedActorSpec.config)
"be able to override lifecycle callbacks" in {
val latch = new CountDownLatch(16)
val ta = TypedActor(system)
val t: LifeCycles = ta.typedActorOf(classOf[LifeCycles], new Creator[LifeCyclesImpl] { def create = new LifeCyclesImpl(latch) }, Props())
val t: LifeCycles = ta.typedActorOf(TypedProps[LifeCyclesImpl](classOf[LifeCycles], new LifeCyclesImpl(latch)))
EventFilter[IllegalStateException]("Crash!", occurrences = 1) intercept {
t.crash()
}

View file

@ -152,7 +152,7 @@ object ActorModelSpec {
await(deadline)(stops == dispatcher.stops.get)
} catch {
case e
system.eventStream.publish(Error(e, dispatcher.toString, "actual: stops=" + dispatcher.stops.get +
system.eventStream.publish(Error(e, dispatcher.toString, dispatcher.getClass, "actual: stops=" + dispatcher.stops.get +
" required: stops=" + stops))
throw e
}
@ -209,9 +209,12 @@ object ActorModelSpec {
await(deadline)(stats.restarts.get() == restarts)
} catch {
case e
system.eventStream.publish(Error(e, Option(dispatcher).toString, "actual: " + stats + ", required: InterceptorStats(susp=" + suspensions +
",res=" + resumes + ",reg=" + registers + ",unreg=" + unregisters +
",recv=" + msgsReceived + ",proc=" + msgsProcessed + ",restart=" + restarts))
system.eventStream.publish(Error(e,
Option(dispatcher).toString,
(Option(dispatcher) getOrElse this).getClass,
"actual: " + stats + ", required: InterceptorStats(susp=" + suspensions +
",res=" + resumes + ",reg=" + registers + ",unreg=" + unregisters +
",recv=" + msgsReceived + ",proc=" + msgsProcessed + ",restart=" + restarts))
throw e
}
}
@ -312,7 +315,7 @@ abstract class ActorModelSpec(config: String) extends AkkaSpec(config) with Defa
try {
f
} catch {
case e system.eventStream.publish(Error(e, "spawn", "error in spawned thread"))
case e system.eventStream.publish(Error(e, "spawn", this.getClass, "error in spawned thread"))
}
}
}

View file

@ -26,8 +26,8 @@ class ConfigSpec extends AkkaSpec(ConfigFactory.defaultReference) {
getString("akka.actor.default-dispatcher.type") must equal("Dispatcher")
getString("akka.actor.default-dispatcher.name") must equal("default-dispatcher")
getMilliseconds("akka.actor.default-dispatcher.keep-alive-time") must equal(60 * 1000)
getDouble("akka.actor.default-dispatcher.core-pool-size-factor") must equal(8.0)
getDouble("akka.actor.default-dispatcher.max-pool-size-factor") must equal(8.0)
getDouble("akka.actor.default-dispatcher.core-pool-size-factor") must equal(3.0)
getDouble("akka.actor.default-dispatcher.max-pool-size-factor") must equal(3.0)
getInt("akka.actor.default-dispatcher.task-queue-size") must equal(-1)
getString("akka.actor.default-dispatcher.task-queue-type") must equal("linked")
getBoolean("akka.actor.default-dispatcher.allow-core-timeout") must equal(true)

View file

@ -50,9 +50,17 @@ class FutureSpec extends AkkaSpec with Checkers with BeforeAndAfterAll with Defa
"never completed" must {
behave like emptyFuture(_(Promise()))
"return supplied value on timeout" in {
val failure = Promise.failed[String](new RuntimeException("br0ken"))
val otherFailure = Promise.failed[String](new RuntimeException("last"))
val empty = Promise[String]()
val timedOut = Promise.successful[String]("Timedout")
val promise = Promise[String]() orElse timedOut
Await.result(promise, timeout.duration) must be("Timedout")
Await.result(failure or timedOut, timeout.duration) must be("Timedout")
Await.result(timedOut or empty, timeout.duration) must be("Timedout")
Await.result(failure or failure or timedOut, timeout.duration) must be("Timedout")
intercept[RuntimeException] {
Await.result(failure or otherFailure, timeout.duration)
}.getMessage must be("last")
}
}
"completed with a result" must {

View file

@ -108,7 +108,7 @@ class EventStreamSpec extends AkkaSpec(EventStreamSpec.config) {
private def verifyLevel(bus: LoggingBus, level: Logging.LogLevel) {
import Logging._
val allmsg = Seq(Debug("", "debug"), Info("", "info"), Warning("", "warning"), Error("", "error"))
val allmsg = Seq(Debug("", null, "debug"), Info("", null, "info"), Warning("", null, "warning"), Error("", null, "error"))
val msg = allmsg filter (_.level <= level)
allmsg foreach bus.publish
msg foreach (x expectMsg(x))

View file

@ -59,7 +59,7 @@ class LoggingReceiveSpec extends WordSpec with BeforeAndAfterEach with BeforeAnd
}
val log = LoggingReceive("funky")(r)
log.isDefinedAt("hallo")
expectMsg(1 second, Logging.Debug("funky", "received unhandled message hallo"))
expectMsg(1 second, Logging.Debug("funky", classOf[DummyClassForStringSources], "received unhandled message hallo"))
}
}
@ -83,7 +83,7 @@ class LoggingReceiveSpec extends WordSpec with BeforeAndAfterEach with BeforeAnd
val name = actor.path.toString
actor ! "buh"
within(1 second) {
expectMsg(Logging.Debug(name, "received handled message buh"))
expectMsg(Logging.Debug(name, actor.underlyingActor.getClass, "received handled message buh"))
expectMsg("x")
}
@ -109,7 +109,7 @@ class LoggingReceiveSpec extends WordSpec with BeforeAndAfterEach with BeforeAnd
})
actor ! "buh"
within(1 second) {
expectMsg(Logging.Debug(actor.path.toString, "received handled message buh"))
expectMsg(Logging.Debug(actor.path.toString, actor.underlyingActor.getClass, "received handled message buh"))
expectMsg("x")
}
}
@ -130,7 +130,7 @@ class LoggingReceiveSpec extends WordSpec with BeforeAndAfterEach with BeforeAnd
val name = actor.path.toString
actor ! PoisonPill
expectMsgPF() {
case Logging.Debug(`name`, msg: String) if msg startsWith "received AutoReceiveMessage Envelope(PoisonPill" true
case Logging.Debug(`name`, _, msg: String) if msg startsWith "received AutoReceiveMessage Envelope(PoisonPill" true
}
awaitCond(actor.isTerminated, 100 millis)
}
@ -142,7 +142,7 @@ class LoggingReceiveSpec extends WordSpec with BeforeAndAfterEach with BeforeAnd
val sys = impl.systemGuardian.path.toString
ignoreMute(this)
ignoreMsg {
case Logging.Debug(s, _) s.contains("MainBusReaper") || s == sys
case Logging.Debug(`sys`, _, _) true
}
system.eventStream.subscribe(testActor, classOf[Logging.Debug])
system.eventStream.subscribe(testActor, classOf[Logging.Error])
@ -151,51 +151,53 @@ class LoggingReceiveSpec extends WordSpec with BeforeAndAfterEach with BeforeAnd
val lname = lifecycleGuardian.path.toString
val supervisor = TestActorRef[TestLogActor](Props[TestLogActor].withFaultHandler(OneForOneStrategy(List(classOf[Throwable]), 5, 5000)))
val sname = supervisor.path.toString
val sclass = classOf[TestLogActor]
val supervisorSet = receiveWhile(messages = 2) {
case Logging.Debug(`lname`, msg: String) if msg startsWith "now supervising" 1
case Logging.Debug(`sname`, msg: String) if msg startsWith "started" 2
case Logging.Debug(`lname`, _, msg: String) if msg startsWith "now supervising" 1
case Logging.Debug(`sname`, `sclass`, msg: String) if msg startsWith "started" 2
}.toSet
expectNoMsg(Duration.Zero)
assert(supervisorSet == Set(1, 2), supervisorSet + " was not Set(1, 2)")
val actor = TestActorRef[TestLogActor](Props[TestLogActor], supervisor, "none")
val aname = actor.path.toString
val aclass = classOf[TestLogActor]
val set = receiveWhile(messages = 2) {
case Logging.Debug(`sname`, msg: String) if msg startsWith "now supervising" 1
case Logging.Debug(`aname`, msg: String) if msg startsWith "started" 2
case Logging.Debug(`sname`, _, msg: String) if msg startsWith "now supervising" 1
case Logging.Debug(`aname`, `aclass`, msg: String) if msg startsWith "started" 2
}.toSet
expectNoMsg(Duration.Zero)
assert(set == Set(1, 2), set + " was not Set(1, 2)")
supervisor watch actor
expectMsgPF(hint = "now monitoring") {
case Logging.Debug(ref, msg: String)
case Logging.Debug(ref, `sclass`, msg: String)
ref == supervisor.underlyingActor && msg.startsWith("now monitoring")
}
supervisor unwatch actor
expectMsgPF(hint = "stopped monitoring") {
case Logging.Debug(ref, msg: String)
case Logging.Debug(ref, `sclass`, msg: String)
ref == supervisor.underlyingActor && msg.startsWith("stopped monitoring")
}
EventFilter[ActorKilledException](occurrences = 1) intercept {
actor ! Kill
val set = receiveWhile(messages = 3) {
case Logging.Error(_: ActorKilledException, `aname`, "Kill") 1
case Logging.Debug(`aname`, "restarting") 2
case Logging.Debug(`aname`, "restarted") 3
case Logging.Error(_: ActorKilledException, `aname`, `aclass`, "Kill") 1
case Logging.Debug(`aname`, `aclass`, "restarting") 2
case Logging.Debug(`aname`, `aclass`, "restarted") 3
}.toSet
expectNoMsg(Duration.Zero)
assert(set == Set(1, 2, 3), set + " was not Set(1, 2, 3)")
}
system.stop(supervisor)
expectMsg(Logging.Debug(sname, "stopping"))
expectMsg(Logging.Debug(aname, "stopped"))
expectMsg(Logging.Debug(sname, "stopped"))
expectMsg(Logging.Debug(sname, `sclass`, "stopping"))
expectMsg(Logging.Debug(aname, `aclass`, "stopped"))
expectMsg(Logging.Debug(sname, `sclass`, "stopped"))
}
}
}

View file

@ -0,0 +1,52 @@
/**
* Copyright (C) 2009-2012 Typesafe Inc. <http://www.typesafe.com>
*/
package akka.pattern
import akka.testkit.AkkaSpec
import akka.actor.Props
import akka.actor.Actor
import akka.actor.ActorTimeoutException
import akka.dispatch.Await
import akka.util.Duration
import akka.util.duration._
object PatternSpec {
case class Work(duration: Duration)
class TargetActor extends Actor {
def receive = {
case Work(duration) duration.sleep()
}
}
}
@org.junit.runner.RunWith(classOf[org.scalatest.junit.JUnitRunner])
class PatternSpec extends AkkaSpec {
import PatternSpec._
"pattern.gracefulStop" must {
"provide Future for stopping an actor" in {
val target = system.actorOf(Props[TargetActor])
val result = gracefulStop(target, 5 seconds)
Await.result(result, 6 seconds) must be(true)
}
"complete Future when actor already terminated" in {
val target = system.actorOf(Props[TargetActor])
Await.ready(gracefulStop(target, 5 seconds), 6 seconds)
Await.ready(gracefulStop(target, 1 millis), 1 second)
}
"complete Future with ActorTimeoutException when actor not terminated within timeout" in {
val target = system.actorOf(Props[TargetActor])
target ! Work(250 millis)
val result = gracefulStop(target, 10 millis)
intercept[ActorTimeoutException] {
Await.result(result, 200 millis)
}
}
}
}

View file

@ -1,362 +0,0 @@
package akka.routing
import akka.actor._
import akka.testkit._
import akka.util.duration._
import java.util.concurrent.atomic.{ AtomicBoolean, AtomicInteger }
import akka.testkit.AkkaSpec
import akka.dispatch.{ Await, Promise, Future }
import akka.pattern.ask
object ActorPoolSpec {
trait Foo {
def sq(x: Int, sleep: Long): Future[Int]
}
class FooImpl extends Foo {
import TypedActor.dispatcher
def sq(x: Int, sleep: Long): Future[Int] = {
if (sleep > 0) Thread.sleep(sleep)
Promise.successful(x * x)
}
}
val faultHandler = OneForOneStrategy(List(classOf[Exception]), 5, 1000)
}
@org.junit.runner.RunWith(classOf[org.scalatest.junit.JUnitRunner])
class TypedActorPoolSpec extends AkkaSpec with DefaultTimeout {
import ActorPoolSpec._
"Actor Pool (2)" must {
"support typed actors" in {
val ta = TypedActor(system)
val pool = ta.createProxy[Foo](new Actor with DefaultActorPool with BoundedCapacityStrategy with MailboxPressureCapacitor with SmallestMailboxSelector with Filter with RunningMeanBackoff with BasicRampup {
val typedActor = TypedActor(context)
def lowerBound = 1
def upperBound = 5
def pressureThreshold = 1
def partialFill = true
def selectionCount = 1
def rampupRate = 0.1
def backoffRate = 0.50
def backoffThreshold = 0.50
def instance(p: Props) = typedActor.getActorRefFor(typedActor.typedActorOf[Foo, FooImpl](props = p.withTimeout(10 seconds)))
def receive = _route
}, Props().withTimeout(10 seconds).withFaultHandler(faultHandler))
val results = for (i 1 to 100) yield (i, pool.sq(i, 0))
for ((i, r) results)
Await.result(r, timeout.duration) must equal(i * i)
ta.stop(pool)
}
}
}
@org.junit.runner.RunWith(classOf[org.scalatest.junit.JUnitRunner])
class ActorPoolSpec extends AkkaSpec with DefaultTimeout {
import ActorPoolSpec._
"Actor Pool" must {
"have expected capacity" in {
val latch = TestLatch(2)
val count = new AtomicInteger(0)
val pool = system.actorOf(
Props(new Actor with DefaultActorPool with FixedCapacityStrategy with SmallestMailboxSelector {
def instance(p: Props) = system.actorOf(p.withCreator(new Actor {
def receive = {
case _
count.incrementAndGet
latch.countDown()
sender.tell("success")
}
}))
def limit = 2
def selectionCount = 1
def partialFill = true
def receive = _route
}).withFaultHandler(faultHandler))
val successes = TestLatch(2)
val successCounter = system.actorOf(Props(new Actor {
def receive = {
case "success" successes.countDown()
}
}))
implicit val replyTo = successCounter
pool ! "a"
pool ! "b"
Await.ready(latch, TestLatch.DefaultTimeout)
Await.ready(successes, TestLatch.DefaultTimeout)
count.get must be(2)
Await.result((pool ? ActorPool.Stat).mapTo[ActorPool.Stats], timeout.duration).size must be(2)
system.stop(pool)
}
"pass ticket #705" in {
val pool = system.actorOf(
Props(new Actor with DefaultActorPool with BoundedCapacityStrategy with MailboxPressureCapacitor with SmallestMailboxSelector with BasicFilter {
def lowerBound = 2
def upperBound = 20
def rampupRate = 0.1
def backoffRate = 0.1
def backoffThreshold = 0.5
def partialFill = true
def selectionCount = 1
def receive = _route
def pressureThreshold = 1
def instance(p: Props) = system.actorOf(p.withCreator(new Actor {
def receive = {
case req: String {
(10 millis).dilated.sleep
sender.tell("Response")
}
}
}))
}).withFaultHandler(faultHandler))
try {
(for (count 1 to 500) yield pool.?("Test", 20 seconds)) foreach {
Await.result(_, 20 seconds) must be("Response")
}
} finally {
system.stop(pool)
}
}
"grow as needed under pressure" in {
// make sure the pool starts at the expected lower limit and grows to the upper as needed
// as influenced by the backlog of blocking pooled actors
var latch = TestLatch(3)
val count = new AtomicInteger(0)
val pool = system.actorOf(
Props(new Actor with DefaultActorPool with BoundedCapacityStrategy with ActiveActorsPressureCapacitor with SmallestMailboxSelector with BasicNoBackoffFilter {
def instance(p: Props) = system.actorOf(p.withCreator(new Actor {
def receive = {
case n: Int
(n millis).dilated.sleep
count.incrementAndGet
latch.countDown()
}
}))
def lowerBound = 2
def upperBound = 4
def rampupRate = 0.1
def partialFill = true
def selectionCount = 1
def receive = _route
}).withFaultHandler(faultHandler))
// first message should create the minimum number of delgates
pool ! 1
Await.result((pool ? ActorPool.Stat).mapTo[ActorPool.Stats], timeout.duration).size must be(2)
var loops = 0
def loop(t: Int) = {
latch = TestLatch(loops)
count.set(0)
for (m 0 until loops) {
pool ? t
(50 millis).dilated.sleep
}
}
// 2 more should go thru without triggering more
loops = 2
loop(500)
Await.ready(latch, TestLatch.DefaultTimeout)
count.get must be(loops)
Await.result((pool ? ActorPool.Stat).mapTo[ActorPool.Stats], timeout.duration).size must be(2)
// a whole bunch should max it out
loops = 10
loop(500)
Await.ready(latch, TestLatch.DefaultTimeout)
count.get must be(loops)
Await.result((pool ? ActorPool.Stat).mapTo[ActorPool.Stats], timeout.duration).size must be(4)
system.stop(pool)
}
"grow as needed under mailbox pressure" in {
// make sure the pool starts at the expected lower limit and grows to the upper as needed
// as influenced by the backlog of messages in the delegate mailboxes
var latch = TestLatch(3)
val count = new AtomicInteger(0)
val pool = system.actorOf(
Props(new Actor with DefaultActorPool with BoundedCapacityStrategy with MailboxPressureCapacitor with SmallestMailboxSelector with BasicNoBackoffFilter {
def instance(p: Props) = system.actorOf(p.withCreator(new Actor {
def receive = {
case n: Int
(n millis).dilated.sleep
count.incrementAndGet
latch.countDown()
}
}))
def lowerBound = 2
def upperBound = 4
def pressureThreshold = 3
def rampupRate = 0.1
def partialFill = true
def selectionCount = 1
def receive = _route
}).withFaultHandler(faultHandler))
var loops = 0
def loop(t: Int) = {
latch = TestLatch(loops)
count.set(0)
for (m 0 until loops) {
pool ! t
}
}
// send a few messages and observe pool at its lower bound
loops = 3
loop(500)
Await.ready(latch, TestLatch.DefaultTimeout)
count.get must be(loops)
Await.result((pool ? ActorPool.Stat).mapTo[ActorPool.Stats], timeout.duration).size must be(2)
// send a bunch over the threshold and observe an increment
loops = 15
loop(500)
Await.ready(latch, 10 seconds)
count.get must be(loops)
Await.result((pool ? ActorPool.Stat).mapTo[ActorPool.Stats], timeout.duration).size must be >= (3)
system.stop(pool)
}
"round robin" in {
val latch1 = TestLatch(2)
val delegates = new java.util.concurrent.ConcurrentHashMap[String, String]
val pool1 = system.actorOf(
Props(new Actor with DefaultActorPool with FixedCapacityStrategy with RoundRobinSelector with BasicNoBackoffFilter {
def instance(p: Props): ActorRef = system.actorOf(p.withCreator(new Actor {
def receive = {
case _
delegates put (self.path.toString, "")
latch1.countDown()
}
}))
def limit = 1
def selectionCount = 1
def rampupRate = 0.1
def partialFill = true
def receive = _route
}).withFaultHandler(faultHandler))
pool1 ! "a"
pool1 ! "b"
Await.ready(latch1, TestLatch.DefaultTimeout)
delegates.size must be(1)
system.stop(pool1)
val latch2 = TestLatch(2)
delegates.clear()
val pool2 = system.actorOf(
Props(new Actor with DefaultActorPool with FixedCapacityStrategy with RoundRobinSelector with BasicNoBackoffFilter {
def instance(p: Props) = system.actorOf(p.withCreator(new Actor {
def receive = {
case _
delegates put (self.path.toString, "")
latch2.countDown()
}
}))
def limit = 2
def selectionCount = 1
def rampupRate = 0.1
def partialFill = false
def receive = _route
}).withFaultHandler(faultHandler))
pool2 ! "a"
pool2 ! "b"
Await.ready(latch2, TestLatch.DefaultTimeout)
delegates.size must be(2)
system.stop(pool2)
}
"backoff" in {
val latch = TestLatch(10)
val pool = system.actorOf(
Props(new Actor with DefaultActorPool with BoundedCapacityStrategy with MailboxPressureCapacitor with SmallestMailboxSelector with Filter with RunningMeanBackoff with BasicRampup {
def instance(p: Props) = system.actorOf(p.withCreator(new Actor {
def receive = {
case n: Int
(n millis).dilated.sleep
latch.countDown()
}
}))
def lowerBound = 1
def upperBound = 5
def pressureThreshold = 1
def partialFill = true
def selectionCount = 1
def rampupRate = 0.1
def backoffRate = 0.50
def backoffThreshold = 0.50
def receive = _route
}).withFaultHandler(faultHandler))
// put some pressure on the pool
for (m 0 to 10) pool ! 250
(5 millis).dilated.sleep
val z = Await.result((pool ? ActorPool.Stat).mapTo[ActorPool.Stats], timeout.duration).size
z must be >= (2)
// let it cool down
for (m 0 to 3) {
pool ! 1
(500 millis).dilated.sleep
}
Await.result((pool ? ActorPool.Stat).mapTo[ActorPool.Stats], timeout.duration).size must be <= (z)
system.stop(pool)
}
}
}

View file

@ -8,15 +8,28 @@ import akka.util.duration._
import akka.dispatch.Await
import akka.pattern.ask
object ConfiguredLocalRoutingSpec {
val config = """
akka {
actor {
default-dispatcher {
core-pool-size-min = 8
core-pool-size-max = 16
}
}
}
"""
}
@org.junit.runner.RunWith(classOf[org.scalatest.junit.JUnitRunner])
class ConfiguredLocalRoutingSpec extends AkkaSpec with DefaultTimeout with ImplicitSender {
class ConfiguredLocalRoutingSpec extends AkkaSpec(ConfiguredLocalRoutingSpec.config) with DefaultTimeout with ImplicitSender {
val deployer = system.asInstanceOf[ActorSystemImpl].provider.deployer
"RouterConfig" must {
"be overridable in config" in {
deployer.deploy(Deploy("/config", null, None, RandomRouter(4), LocalScope))
deployer.deploy(Deploy("/config", null, RandomRouter(4), LocalScope))
val actor = system.actorOf(Props(new Actor {
def receive = {
case "get" sender ! context.props

View file

@ -0,0 +1,250 @@
/**
* Copyright (C) 2009-2011 Typesafe Inc. <http://www.typesafe.com>
*/
package akka.routing
import akka.actor.Actor
import akka.testkit._
import akka.actor.Props
import akka.dispatch.Await
import akka.util.duration._
import akka.actor.ActorRef
import java.util.concurrent.atomic.AtomicInteger
import akka.pattern.ask
object ResizerSpec {
val config = """
akka.actor.deployment {
/router1 {
router = round-robin
resizer {
lower-bound = 2
upper-bound = 3
}
}
}
"""
class TestActor extends Actor {
def receive = {
case latch: TestLatch latch.countDown()
}
}
class BusyActor extends Actor {
def receive = {
case (latch: TestLatch, busy: TestLatch)
latch.countDown()
Await.ready(busy, 5 seconds)
}
}
}
@org.junit.runner.RunWith(classOf[org.scalatest.junit.JUnitRunner])
class ResizerSpec extends AkkaSpec(ResizerSpec.config) with DefaultTimeout with ImplicitSender {
import akka.routing.ResizerSpec._
"DefaultResizer" must {
"use settings to evaluate capacity" in {
val resizer = DefaultResizer(
lowerBound = 2,
upperBound = 3)
val c1 = resizer.capacity(IndexedSeq.empty[ActorRef])
c1 must be(2)
val current = IndexedSeq(system.actorOf(Props[TestActor]), system.actorOf(Props[TestActor]))
val c2 = resizer.capacity(current)
c2 must be(0)
}
"use settings to evaluate rampUp" in {
val resizer = DefaultResizer(
lowerBound = 2,
upperBound = 10,
rampupRate = 0.2)
resizer.rampup(pressure = 9, capacity = 10) must be(0)
resizer.rampup(pressure = 5, capacity = 5) must be(1)
resizer.rampup(pressure = 6, capacity = 6) must be(2)
}
"use settings to evaluate backoff" in {
val resizer = DefaultResizer(
lowerBound = 2,
upperBound = 10,
backoffThreshold = 0.3,
backoffRate = 0.1)
resizer.backoff(pressure = 10, capacity = 10) must be(0)
resizer.backoff(pressure = 4, capacity = 10) must be(0)
resizer.backoff(pressure = 3, capacity = 10) must be(0)
resizer.backoff(pressure = 2, capacity = 10) must be(-1)
resizer.backoff(pressure = 0, capacity = 10) must be(-1)
resizer.backoff(pressure = 1, capacity = 9) must be(-1)
resizer.backoff(pressure = 0, capacity = 9) must be(-1)
}
"be possible to define programatically" in {
val latch = new TestLatch(3)
val resizer = DefaultResizer(
lowerBound = 2,
upperBound = 3)
val router = system.actorOf(Props[TestActor].withRouter(RoundRobinRouter(resizer = Some(resizer))))
router ! latch
router ! latch
router ! latch
Await.ready(latch, 5 seconds)
val current = Await.result(router ? CurrentRoutees, 5 seconds).asInstanceOf[RouterRoutees]
current.routees.size must be(2)
}
"be possible to define in configuration" in {
val latch = new TestLatch(3)
val router = system.actorOf(Props[TestActor].withRouter(FromConfig()), "router1")
router ! latch
router ! latch
router ! latch
Await.ready(latch, 5 seconds)
val current = Await.result(router ? CurrentRoutees, 5 seconds).asInstanceOf[RouterRoutees]
current.routees.size must be(2)
}
"resize when busy" in {
val busy = new TestLatch(1)
val resizer = DefaultResizer(
lowerBound = 1,
upperBound = 3,
pressureThreshold = 0,
messagesPerResize = 1)
val router = system.actorOf(Props[BusyActor].withRouter(RoundRobinRouter(resizer = Some(resizer))))
val latch1 = new TestLatch(1)
router ! (latch1, busy)
Await.ready(latch1, 2 seconds)
val latch2 = new TestLatch(1)
router ! (latch2, busy)
Await.ready(latch2, 2 seconds)
val latch3 = new TestLatch(1)
router ! (latch3, busy)
Await.ready(latch3, 2 seconds)
Await.result(router ? CurrentRoutees, 5 seconds).asInstanceOf[RouterRoutees].routees.size must be(3)
busy.countDown()
}
"grow as needed under pressure" in {
// make sure the pool starts at the expected lower limit and grows to the upper as needed
// as influenced by the backlog of blocking pooled actors
val resizer = DefaultResizer(
lowerBound = 2,
upperBound = 4,
rampupRate = 0.1,
pressureThreshold = 1,
messagesPerResize = 1,
backoffThreshold = 0.0)
val router = system.actorOf(Props(new Actor {
def receive = {
case (n: Int, latch: TestLatch, count: AtomicInteger)
(n millis).dilated.sleep
count.incrementAndGet
latch.countDown()
}
}).withRouter(RoundRobinRouter(resizer = Some(resizer))))
// first message should create the minimum number of routees
router ! 1
Await.result(router ? CurrentRoutees, 5 seconds).asInstanceOf[RouterRoutees].routees.size must be(2)
def loop(loops: Int, t: Int, latch: TestLatch, count: AtomicInteger) = {
(10 millis).dilated.sleep
for (m 0 until loops) {
router.!((t, latch, count))
(10 millis).dilated.sleep
}
}
// 2 more should go thru without triggering more
val count1 = new AtomicInteger
val latch1 = TestLatch(2)
loop(2, 200, latch1, count1)
Await.ready(latch1, TestLatch.DefaultTimeout)
count1.get must be(2)
Await.result(router ? CurrentRoutees, 5 seconds).asInstanceOf[RouterRoutees].routees.size must be(2)
// a whole bunch should max it out
val count2 = new AtomicInteger
val latch2 = TestLatch(10)
loop(10, 200, latch2, count2)
Await.ready(latch2, TestLatch.DefaultTimeout)
count2.get must be(10)
Await.result(router ? CurrentRoutees, 5 seconds).asInstanceOf[RouterRoutees].routees.size must be(4)
}
"backoff" in {
val resizer = DefaultResizer(
lowerBound = 1,
upperBound = 5,
rampupRate = 1.0,
backoffRate = 1.0,
backoffThreshold = 0.20,
pressureThreshold = 1,
messagesPerResize = 1)
val router = system.actorOf(Props(new Actor {
def receive = {
case n: Int
(n millis).dilated.sleep
}
}).withRouter(RoundRobinRouter(resizer = Some(resizer))))
// put some pressure on the router
for (m 0 to 5) {
router ! 100
(5 millis).dilated.sleep
}
val z = Await.result(router ? CurrentRoutees, 5 seconds).asInstanceOf[RouterRoutees].routees.size
z must be >= (2)
(300 millis).dilated.sleep
// let it cool down
for (m 0 to 3) {
router ! 1
(200 millis).dilated.sleep
}
Await.result(router ? CurrentRoutees, 5 seconds).asInstanceOf[RouterRoutees].routees.size must be < (z)
}
}
}

View file

@ -5,7 +5,7 @@ package akka.routing
import java.util.concurrent.atomic.AtomicInteger
import akka.actor._
import collection.mutable.LinkedList
import scala.collection.mutable.LinkedList
import akka.testkit._
import akka.util.duration._
import akka.dispatch.Await
@ -13,9 +13,24 @@ import akka.util.Duration
import akka.config.ConfigurationException
import com.typesafe.config.ConfigFactory
import akka.pattern.ask
import java.util.concurrent.ConcurrentHashMap
import com.typesafe.config.Config
object RoutingSpec {
val config = """
akka.actor.deployment {
/router1 {
router = round-robin
nr-of-instances = 3
}
/myrouter {
router = "akka.routing.RoutingSpec$MyRouter"
foo = bar
}
}
"""
class TestActor extends Actor {
def receive = {
case _
@ -29,12 +44,22 @@ object RoutingSpec {
}
}
class MyRouter(config: Config) extends RouterConfig {
val foo = config.getString("foo")
def createRoute(routeeProps: Props, routeeProvider: RouteeProvider): Route = {
val routees = IndexedSeq(routeeProvider.context.actorOf(Props[Echo]))
routeeProvider.registerRoutees(routees)
{
case (sender, message) Nil
}
}
}
}
@org.junit.runner.RunWith(classOf[org.scalatest.junit.JUnitRunner])
class RoutingSpec extends AkkaSpec with DefaultTimeout with ImplicitSender {
val impl = system.asInstanceOf[ActorSystemImpl]
class RoutingSpec extends AkkaSpec(RoutingSpec.config) with DefaultTimeout with ImplicitSender {
import akka.routing.RoutingSpec._
@ -90,6 +115,18 @@ class RoutingSpec extends AkkaSpec with DefaultTimeout with ImplicitSender {
Await.ready(doneLatch, 1 seconds)
}
"use configured nr-of-instances when FromConfig" in {
val router = system.actorOf(Props[TestActor].withRouter(FromConfig), "router1")
Await.result(router ? CurrentRoutees, 5 seconds).asInstanceOf[RouterRoutees].routees.size must be(3)
system.stop(router)
}
"use configured nr-of-instances when router is specified" in {
val router = system.actorOf(Props[TestActor].withRouter(RoundRobinRouter(nrOfInstances = 2)), "router1")
Await.result(router ? CurrentRoutees, 5 seconds).asInstanceOf[RouterRoutees].routees.size must be(3)
system.stop(router)
}
}
"no router" must {
@ -238,6 +275,61 @@ class RoutingSpec extends AkkaSpec with DefaultTimeout with ImplicitSender {
}
}
"smallest mailbox router" must {
"be started when constructed" in {
val routedActor = system.actorOf(Props[TestActor].withRouter(SmallestMailboxRouter(nrOfInstances = 1)))
routedActor.isTerminated must be(false)
}
"deliver messages to idle actor" in {
val usedActors = new ConcurrentHashMap[Int, String]()
val router = system.actorOf(Props(new Actor {
def receive = {
case (busy: TestLatch, receivedLatch: TestLatch)
usedActors.put(0, self.path.toString)
self ! "another in busy mailbox"
receivedLatch.countDown()
Await.ready(busy, TestLatch.DefaultTimeout)
case (msg: Int, receivedLatch: TestLatch)
usedActors.put(msg, self.path.toString)
receivedLatch.countDown()
case s: String
}
}).withRouter(SmallestMailboxRouter(3)))
val busy = TestLatch(1)
val received0 = TestLatch(1)
router ! (busy, received0)
Await.ready(received0, TestLatch.DefaultTimeout)
val received1 = TestLatch(1)
router ! (1, received1)
Await.ready(received1, TestLatch.DefaultTimeout)
val received2 = TestLatch(1)
router ! (2, received2)
Await.ready(received2, TestLatch.DefaultTimeout)
val received3 = TestLatch(1)
router ! (3, received3)
Await.ready(received3, TestLatch.DefaultTimeout)
busy.countDown()
val busyPath = usedActors.get(0)
busyPath must not be (null)
val path1 = usedActors.get(1)
val path2 = usedActors.get(2)
val path3 = usedActors.get(3)
path1 must not be (busyPath)
path2 must not be (busyPath)
path3 must not be (busyPath)
}
}
"broadcast router" must {
"be started when constructed" in {
val routedActor = system.actorOf(Props[TestActor].withRouter(BroadcastRouter(nrOfInstances = 1)))
@ -391,16 +483,20 @@ class RoutingSpec extends AkkaSpec with DefaultTimeout with ImplicitSender {
sys.shutdown()
}
}
"support custom router" in {
val myrouter = system.actorOf(Props().withRouter(FromConfig), "myrouter")
myrouter.isTerminated must be(false)
}
}
"custom router" must {
"be started when constructed" in {
val routedActor = system.actorOf(Props[TestActor].withRouter(VoteCountRouter))
val routedActor = system.actorOf(Props[TestActor].withRouter(VoteCountRouter()))
routedActor.isTerminated must be(false)
}
"count votes as intended - not as in Florida" in {
val routedActor = system.actorOf(Props[TestActor].withRouter(VoteCountRouter))
val routedActor = system.actorOf(Props().withRouter(VoteCountRouter()))
routedActor ! DemocratVote
routedActor ! DemocratVote
routedActor ! RepublicanVote
@ -444,18 +540,16 @@ class RoutingSpec extends AkkaSpec with DefaultTimeout with ImplicitSender {
//#crActors
//#crRouter
object VoteCountRouter extends RouterConfig {
case class VoteCountRouter() extends RouterConfig {
//#crRoute
def createRoute(props: Props,
actorContext: ActorContext,
ref: RoutedActorRef): Route = {
val democratActor = actorContext.actorOf(Props(new DemocratActor()), "d")
val republicanActor = actorContext.actorOf(Props(new RepublicanActor()), "r")
def createRoute(routeeProps: Props, routeeProvider: RouteeProvider): Route = {
val democratActor = routeeProvider.context.actorOf(Props(new DemocratActor()), "d")
val republicanActor = routeeProvider.context.actorOf(Props(new RepublicanActor()), "r")
val routees = Vector[ActorRef](democratActor, republicanActor)
//#crRegisterRoutees
registerRoutees(actorContext, routees)
routeeProvider.registerRoutees(routees)
//#crRegisterRoutees
//#crRoutingLogic

View file

@ -1,35 +0,0 @@
package akka.ticket
import akka.actor._
import akka.routing._
import akka.testkit.AkkaSpec
import akka.dispatch.Await
import akka.util.duration._
import akka.pattern.ask
@org.junit.runner.RunWith(classOf[org.scalatest.junit.JUnitRunner])
class Ticket703Spec extends AkkaSpec {
"A ? call to an actor pool" should {
"reuse the proper timeout" in {
val actorPool = system.actorOf(
Props(new Actor with DefaultActorPool with BoundedCapacityStrategy with MailboxPressureCapacitor with SmallestMailboxSelector with BasicNoBackoffFilter {
def lowerBound = 2
def upperBound = 20
def rampupRate = 0.1
def partialFill = true
def selectionCount = 1
def receive = _route
def pressureThreshold = 1
def instance(p: Props) = system.actorOf(p.withCreator(new Actor {
def receive = {
case req: String
Thread.sleep(6000L)
sender.tell("Response")
}
}))
}).withFaultHandler(OneForOneStrategy(List(classOf[Exception]), 5, 1000)))
Await.result(actorPool.?("Ping", 10000), 10 seconds) must be === "Response"
}
}
}

View file

@ -54,8 +54,7 @@ import com.eaio.util.lang.Hex;
* @author <a href="mailto:jb@eaio.de">Johann Burkard</a>
* @version $Id: UUID.java 1888 2009-03-15 12:43:24Z johann $
*/
public class UUID implements Comparable<UUID>, Serializable, Cloneable,
IDLEntity {
public class UUID implements Comparable<UUID>, Serializable, Cloneable {
/**
* Hasn't ever changed between versions.

View file

@ -1,55 +0,0 @@
/*
* uuid.idl
*
* Created 19:49 16.12.2003
*
* eaio: UUID - an implementation of the UUID specification
* Copyright (c) 2003-2009 Johann Burkard (jb@eaio.com) http://eaio.com.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included
* in all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
* OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN
* NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM,
* DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
* OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
* USE OR OTHER DEALINGS IN THE SOFTWARE.
*
*/
module com {
module eaio {
module uuid {
/**
* The UUID struct.
*/
struct UUID {
/**
* The time field of the UUID.
*/
long long time;
/**
* The clock sequence and node field of the UUID.
*/
long long clockSeqAndNode;
};
};
};
};

View file

@ -30,7 +30,7 @@ akka {
# List FQCN of extensions which shall be loaded at actor system startup.
# Should be on the format: 'extensions = ["foo", "bar"]' etc.
# FIXME: clarify "extensions" here, "Akka Extensions (<link to docs>)"
# See the Akka Documentation for more info about Extensions
extensions = []
actor {
@ -64,18 +64,20 @@ akka {
default {
# routing (load-balance) scheme to use
# available: "from-code", "round-robin", "random", "scatter-gather", "broadcast"
# or: fully qualified class name of the router class
# available: "from-code", "round-robin", "random", "smallest-mailbox", "scatter-gather", "broadcast"
# or: Fully qualified class name of the router class.
# The router class must extend akka.routing.CustomRouterConfig and and have constructor
# with com.typesafe.config.Config parameter.
# default is "from-code";
# Whether or not an actor is transformed to a Router is decided in code only (Props.withRouter).
# The type of router can be overridden in the configuration; specifying "from-code" means
# that the values specified in the code shall be used.
# In case of routing, the actors to be routed to can be specified
# in several ways:
# - nr-of-instances: will create that many children given the actor factory
# supplied in the source code (overridable using create-as below)
# - nr-of-instances: will create that many children
# - routees.paths: will look the paths up using actorFor and route to
# them, i.e. will not create children
# - resizer: dynamically resizable number of routees as specified in resizer below
router = "from-code"
# number of children to create in case of a non-direct router; this setting
@ -85,18 +87,62 @@ akka {
# within is the timeout used for routers containing future calls
within = 5 seconds
# FIXME document 'create-as', ticket 1511
create-as {
# fully qualified class name of recipe implementation
class = ""
}
routees {
# Alternatively to giving nr-of-instances you can specify the full
# paths of those actors which should be routed to. This setting takes
# precedence over nr-of-instances
paths = []
}
# Routers with dynamically resizable number of routees
resizer {
# The fewest number of routees the router should ever have.
lower-bound = 1
# The most number of routees the router should ever have.
# Must be greater than or equal to lower-bound.
upper-bound = 10
# Threshold to evaluate if routee is considered to be busy (under pressure).
# Implementation depends on this value (default is 1).
# 0: number of routees currently processing a message.
# 1: number of routees currently processing a message has
# some messages in mailbox.
# > 1: number of routees with at least the configured pressure-threshold
# messages in their mailbox. Note that estimating mailbox size of
# default UnboundedMailbox is O(N) operation.
pressure-threshold = 1
# Percentage to increase capacity whenever all routees are busy.
# For example, 0.2 would increase 20% (rounded up), i.e. if current
# capacity is 6 it will request an increase of 2 more routees.
rampup-rate = 0.2
# Minimum fraction of busy routees before backing off.
# For example, if this is 0.3, then we'll remove some routees only when
# less than 30% of routees are busy, i.e. if current capacity is 10 and
# 3 are busy then the capacity is unchanged, but if 2 or less are busy
# the capacity is decreased.
# Use 0.0 or negative to avoid removal of routees.
backoff-threshold = 0.3
# Fraction of routees to be removed when the resizer reaches the
# backoffThreshold.
# For example, 0.1 would decrease 10% (rounded up), i.e. if current
# capacity is 9 it will request an decrease of 1 routee.
backoff-rate = 0.1
# When the resizer reduce the capacity the abandoned routee actors are stopped
# with PoisonPill after this delay. The reason for the delay is to give concurrent
# messages a chance to be placed in mailbox before sending PoisonPill.
# Use 0s to skip delay.
stop-delay = 1s
# Number of messages between resize operation.
# Use 1 to resize before each message.
messages-per-resize = 10
}
}
}
@ -119,23 +165,23 @@ akka {
keep-alive-time = 60s
# minimum number of threads to cap factor-based core number to
core-pool-size-min = 8
core-pool-size-min = 6
# No of core threads ... ceil(available processors * factor)
core-pool-size-factor = 8.0
core-pool-size-factor = 3.0
# maximum number of threads to cap factor-based number to
core-pool-size-max = 4096
core-pool-size-max = 64
# Hint: max-pool-size is only used for bounded task queues
# minimum number of threads to cap factor-based max number to
max-pool-size-min = 8
max-pool-size-min = 6
# Max no of threads ... ceil(available processors * factor)
max-pool-size-factor = 8.0
max-pool-size-factor = 3.0
# maximum number of threads to cap factor-based max number to
max-pool-size-max = 4096
max-pool-size-max = 64
# Specifies the bounded capacity of the task queue (< 1 == unbounded)
task-queue-size = -1
@ -162,11 +208,13 @@ akka {
# property
# NOTE: setting a mailbox to 'blocking' can be a bit dangerous, could lead to
# deadlock, use with care
# The following are only used for Dispatcher and only if mailbox-capacity > 0
# The following mailbox-push-timeout-time is only used for type=Dispatcher and
# only if mailbox-capacity > 0
mailbox-capacity = -1
# Specifies the timeout to add a new message to a mailbox that is full -
# negative number means infinite timeout
# negative number means infinite timeout. It is only used for type=Dispatcher
# and only if mailbox-capacity > 0
mailbox-push-timeout-time = 10s
# FQCN of the MailboxType, if not specified the default bounded or unbounded

View file

@ -112,7 +112,7 @@ object Status {
}
trait ActorLogging { this: Actor
val log = akka.event.Logging(context.system.eventStream, context.self)
val log = akka.event.Logging(context.system, context.self)
}
object Actor {
@ -248,7 +248,7 @@ trait Actor {
* up of resources before Actor is terminated.
*/
def preRestart(reason: Throwable, message: Option[Any]) {
context.children foreach (context.stop(_))
context.children foreach context.stop
postStop()
}
@ -279,7 +279,6 @@ trait Actor {
// =========================================
private[akka] final def apply(msg: Any) = {
// FIXME this should all go into ActorCell
val behaviorStack = context.asInstanceOf[ActorCell].hotswap
msg match {
case msg if behaviorStack.nonEmpty && behaviorStack.head.isDefinedAt(msg) behaviorStack.head.apply(msg)

View file

@ -268,8 +268,8 @@ private[akka] class ActorCell(
//Not thread safe, so should only be used inside the actor that inhabits this ActorCell
final protected def randomName(): String = {
val n = nextNameSequence + 1
nextNameSequence = n
val n = nextNameSequence
nextNameSequence = n + 1
Helpers.base64(n)
}
@ -289,6 +289,9 @@ private[akka] class ActorCell(
// ➡➡➡ NEVER SEND THE SAME SYSTEM MESSAGE OBJECT TO TWO ACTORS ⬅⬅⬅
parent.sendSystemMessage(akka.dispatch.Supervise(self))
// ➡➡➡ NEVER SEND THE SAME SYSTEM MESSAGE OBJECT TO TWO ACTORS ⬅⬅⬅
dispatcher.systemDispatch(this, Create())
dispatcher.attach(this)
}
@ -358,12 +361,12 @@ private[akka] class ActorCell(
actor = created
created.preStart()
checkReceiveTimeout
if (system.settings.DebugLifecycle) system.eventStream.publish(Debug(self.path.toString, "started (" + actor + ")"))
if (system.settings.DebugLifecycle) system.eventStream.publish(Debug(self.path.toString, clazz(created), "started (" + created + ")"))
} catch {
// FIXME catching all and continue isn't good for OOME, ticket #1418
// TODO catching all and continue isn't good for OOME, ticket #1418
case e
try {
system.eventStream.publish(Error(e, self.path.toString, "error while creating actor"))
system.eventStream.publish(Error(e, self.path.toString, clazz(actor), "error while creating actor"))
// prevent any further messages to be processed until the actor has been restarted
dispatcher.suspend(this)
} finally {
@ -373,7 +376,7 @@ private[akka] class ActorCell(
def recreate(cause: Throwable): Unit = try {
val failedActor = actor
if (system.settings.DebugLifecycle) system.eventStream.publish(Debug(self.path.toString, "restarting"))
if (system.settings.DebugLifecycle) system.eventStream.publish(Debug(self.path.toString, clazz(failedActor), "restarting"))
val freshActor = newActor()
if (failedActor ne null) {
val c = currentMessage //One read only plz
@ -388,15 +391,15 @@ private[akka] class ActorCell(
actor = freshActor // assign it here so if preStart fails, we can null out the sef-refs next call
hotswap = Props.noHotSwap // Reset the behavior
freshActor.postRestart(cause)
if (system.settings.DebugLifecycle) system.eventStream.publish(Debug(self.path.toString, "restarted"))
if (system.settings.DebugLifecycle) system.eventStream.publish(Debug(self.path.toString, clazz(freshActor), "restarted"))
dispatcher.resume(this) //FIXME should this be moved down?
props.faultHandler.handleSupervisorRestarted(cause, self, children)
} catch {
// FIXME catching all and continue isn't good for OOME, ticket #1418
// TODO catching all and continue isn't good for OOME, ticket #1418
case e try {
system.eventStream.publish(Error(e, self.path.toString, "error while creating actor"))
system.eventStream.publish(Error(e, self.path.toString, clazz(actor), "error while creating actor"))
// prevent any further messages to be processed until the actor has been restarted
dispatcher.suspend(this)
} finally {
@ -417,7 +420,7 @@ private[akka] class ActorCell(
else {
// do not process normal messages while waiting for all children to terminate
dispatcher suspend this
if (system.settings.DebugLifecycle) system.eventStream.publish(Debug(self.path.toString, "stopping"))
if (system.settings.DebugLifecycle) system.eventStream.publish(Debug(self.path.toString, clazz(actor), "stopping"))
// do not use stop(child) because that would dissociate the children from us, but we still want to wait for them
for (child c) child.asInstanceOf[InternalActorRef].stop()
stopping = true
@ -428,12 +431,12 @@ private[akka] class ActorCell(
childrenRefs.get(child.path.name) match {
case None
childrenRefs = childrenRefs.updated(child.path.name, ChildRestartStats(child))
if (system.settings.DebugLifecycle) system.eventStream.publish(Debug(self.path.toString, "now supervising " + child))
if (system.settings.DebugLifecycle) system.eventStream.publish(Debug(self.path.toString, clazz(actor), "now supervising " + child))
case Some(ChildRestartStats(`child`, _, _))
// this is the nominal case where we created the child and entered it in actorCreated() above
if (system.settings.DebugLifecycle) system.eventStream.publish(Debug(self.path.toString, "now supervising " + child))
if (system.settings.DebugLifecycle) system.eventStream.publish(Debug(self.path.toString, clazz(actor), "now supervising " + child))
case Some(ChildRestartStats(c, _, _))
system.eventStream.publish(Warning(self.path.toString, "Already supervising other child with same name '" + child.path.name + "', old: " + c + " new: " + child))
system.eventStream.publish(Warning(self.path.toString, clazz(actor), "Already supervising other child with same name '" + child.path.name + "', old: " + c + " new: " + child))
}
}
@ -448,10 +451,10 @@ private[akka] class ActorCell(
case Recreate(cause) recreate(cause)
case Link(subject)
system.deathWatch.subscribe(self, subject)
if (system.settings.DebugLifecycle) system.eventStream.publish(Debug(self.path.toString, "now monitoring " + subject))
if (system.settings.DebugLifecycle) system.eventStream.publish(Debug(self.path.toString, clazz(actor), "now monitoring " + subject))
case Unlink(subject)
system.deathWatch.unsubscribe(self, subject)
if (system.settings.DebugLifecycle) system.eventStream.publish(Debug(self.path.toString, "stopped monitoring " + subject))
if (system.settings.DebugLifecycle) system.eventStream.publish(Debug(self.path.toString, clazz(actor), "stopped monitoring " + subject))
case Suspend() suspend()
case Resume() resume()
case Terminate() terminate()
@ -460,7 +463,7 @@ private[akka] class ActorCell(
}
} catch {
case e //Should we really catch everything here?
system.eventStream.publish(Error(e, self.path.toString, "error while processing " + message))
system.eventStream.publish(Error(e, self.path.toString, clazz(actor), "error while processing " + message))
//TODO FIXME How should problems here be handled???
throw e
}
@ -480,7 +483,7 @@ private[akka] class ActorCell(
currentMessage = null // reset current message after successful invocation
} catch {
case e
system.eventStream.publish(Error(e, self.path.toString, e.getMessage))
system.eventStream.publish(Error(e, self.path.toString, clazz(actor), e.getMessage))
// prevent any further messages to be processed until the actor has been restarted
dispatcher.suspend(this)
@ -500,7 +503,7 @@ private[akka] class ActorCell(
}
} catch {
case e
system.eventStream.publish(Error(e, self.path.toString, e.getMessage))
system.eventStream.publish(Error(e, self.path.toString, clazz(actor), e.getMessage))
throw e
}
}
@ -530,7 +533,8 @@ private[akka] class ActorCell(
}
def autoReceiveMessage(msg: Envelope) {
if (system.settings.DebugAutoReceive) system.eventStream.publish(Debug(self.path.toString, "received AutoReceiveMessage " + msg))
if (system.settings.DebugAutoReceive)
system.eventStream.publish(Debug(self.path.toString, clazz(actor), "received AutoReceiveMessage " + msg))
msg.message match {
case Failed(cause) handleFailure(sender, cause)
@ -554,7 +558,8 @@ private[akka] class ActorCell(
try {
parent.sendSystemMessage(ChildTerminated(self))
system.deathWatch.publish(Terminated(self))
if (system.settings.DebugLifecycle) system.eventStream.publish(Debug(self.path.toString, "stopped"))
if (system.settings.DebugLifecycle)
system.eventStream.publish(Debug(self.path.toString, clazz(actor), "stopped")) // FIXME: can actor be null?
} finally {
currentMessage = null
clearActorFields()
@ -565,8 +570,8 @@ private[akka] class ActorCell(
final def handleFailure(child: ActorRef, cause: Throwable): Unit = childrenRefs.get(child.path.name) match {
case Some(stats) if stats.child == child if (!props.faultHandler.handleFailure(this, child, cause, stats, childrenRefs.values)) throw cause
case Some(stats) system.eventStream.publish(Warning(self.path.toString, "dropping Failed(" + cause + ") from unknown child " + child + " matching names but not the same, was: " + stats.child))
case None system.eventStream.publish(Warning(self.path.toString, "dropping Failed(" + cause + ") from unknown child " + child))
case Some(stats) system.eventStream.publish(Warning(self.path.toString, clazz(actor), "dropping Failed(" + cause + ") from unknown child " + child + " matching names but not the same, was: " + stats.child))
case None system.eventStream.publish(Warning(self.path.toString, clazz(actor), "dropping Failed(" + cause + ") from unknown child " + child))
}
final def handleChildTerminated(child: ActorRef): Unit = {
@ -625,4 +630,9 @@ private[akka] class ActorCell(
lookupAndSetField(a.getClass, a, "self", self)
}
}
private def clazz(o: AnyRef): Class[_] = {
if (o eq null) this.getClass
else o.getClass
}
}

View file

@ -389,7 +389,10 @@ object DeadLetterActorRef {
val serialized = new SerializedDeadLetterActorRef
}
class DeadLetterActorRef(val eventStream: EventStream) extends MinimalActorRef {
trait DeadLetterActorRefLike extends MinimalActorRef {
def eventStream: EventStream
@volatile
private var brokenPromise: Future[Any] = _
@volatile
@ -411,6 +414,10 @@ class DeadLetterActorRef(val eventStream: EventStream) extends MinimalActorRef {
case _ eventStream.publish(DeadLetter(message, sender, this))
}
// FIXME reimplement behavior of brokenPromise on ask
}
class DeadLetterActorRef(val eventStream: EventStream) extends DeadLetterActorRefLike {
@throws(classOf[java.io.ObjectStreamException])
override protected def writeReplace(): AnyRef = DeadLetterActorRef.serialized
}
@ -419,8 +426,8 @@ class DeadLetterActorRef(val eventStream: EventStream) extends MinimalActorRef {
* This special dead letter reference has a name: it is that which is returned
* by a local look-up which is unsuccessful.
*/
class EmptyLocalActorRef(_eventStream: EventStream, _dispatcher: MessageDispatcher, _path: ActorPath)
extends DeadLetterActorRef(_eventStream) {
class EmptyLocalActorRef(val eventStream: EventStream, _dispatcher: MessageDispatcher, _path: ActorPath)
extends DeadLetterActorRefLike {
init(_dispatcher, _path)
override def !(message: Any)(implicit sender: ActorRef = null): Unit = message match {
case d: DeadLetter // do NOT form endless loops

View file

@ -5,15 +5,12 @@
package akka.actor
import java.util.concurrent.atomic.AtomicLong
import org.jboss.netty.akka.util.{ TimerTask, HashedWheelTimer }
import akka.util.Timeout.intToTimeout
import akka.config.ConfigurationException
import akka.dispatch._
import akka.routing._
import akka.AkkaException
import akka.util.{ Duration, Switch, Helpers, Timeout }
import akka.event._
import java.io.Closeable
/**
* Interface for all ActorRef providers to implement.
@ -42,12 +39,6 @@ trait ActorRefProvider {
*/
def deathWatch: DeathWatch
// FIXME: remove/replace???
def nodename: String
// FIXME: remove/replace???
def clustername: String
/**
* The root path for all actors within this actor system, including remote
* address if enabled.
@ -308,11 +299,7 @@ class LocalActorRefProvider(
new RootActorPath(LocalAddress(_systemName)),
new Deployer(settings))
// FIXME remove both
val nodename: String = "local"
val clustername: String = "local"
val log = Logging(eventStream, "LocalActorRefProvider")
val log = Logging(eventStream, "LocalActorRefProvider(" + rootPath.address + ")")
/*
* generate name for temporary actor refs
@ -534,114 +521,3 @@ class LocalDeathWatch(val mapSize: Int) extends DeathWatch with ActorClassificat
}
}
/**
* Scheduled tasks (Runnable and functions) are executed with the supplied dispatcher.
* Note that dispatcher is by-name parameter, because dispatcher might not be initialized
* when the scheduler is created.
*
* The HashedWheelTimer used by this class MUST throw an IllegalStateException
* if it does not enqueue a task. Once a task is queued, it MUST be executed or
* returned from stop().
*/
class DefaultScheduler(hashedWheelTimer: HashedWheelTimer, log: LoggingAdapter, dispatcher: MessageDispatcher) extends Scheduler with Closeable {
import org.jboss.netty.akka.util.{ Timeout HWTimeout }
def schedule(initialDelay: Duration, delay: Duration, receiver: ActorRef, message: Any): Cancellable =
new DefaultCancellable(hashedWheelTimer.newTimeout(createContinuousTask(delay, receiver, message), initialDelay))
def schedule(initialDelay: Duration, delay: Duration)(f: Unit): Cancellable =
new DefaultCancellable(hashedWheelTimer.newTimeout(createContinuousTask(delay, f), initialDelay))
def schedule(initialDelay: Duration, delay: Duration, runnable: Runnable): Cancellable =
new DefaultCancellable(hashedWheelTimer.newTimeout(createContinuousTask(delay, runnable), initialDelay))
def scheduleOnce(delay: Duration, runnable: Runnable): Cancellable =
new DefaultCancellable(hashedWheelTimer.newTimeout(createSingleTask(runnable), delay))
def scheduleOnce(delay: Duration, receiver: ActorRef, message: Any): Cancellable =
new DefaultCancellable(hashedWheelTimer.newTimeout(createSingleTask(receiver, message), delay))
def scheduleOnce(delay: Duration)(f: Unit): Cancellable =
new DefaultCancellable(hashedWheelTimer.newTimeout(createSingleTask(f), delay))
private def createSingleTask(runnable: Runnable): TimerTask =
new TimerTask() {
def run(timeout: org.jboss.netty.akka.util.Timeout) { dispatcher.execute(runnable) }
}
private def createSingleTask(receiver: ActorRef, message: Any): TimerTask =
new TimerTask {
def run(timeout: org.jboss.netty.akka.util.Timeout) {
receiver ! message
}
}
private def createSingleTask(f: Unit): TimerTask =
new TimerTask {
def run(timeout: org.jboss.netty.akka.util.Timeout) {
dispatcher.execute(new Runnable { def run = f })
}
}
private def createContinuousTask(delay: Duration, receiver: ActorRef, message: Any): TimerTask = {
new TimerTask {
def run(timeout: org.jboss.netty.akka.util.Timeout) {
// Check if the receiver is still alive and kicking before sending it a message and reschedule the task
if (!receiver.isTerminated) {
receiver ! message
try timeout.getTimer.newTimeout(this, delay) catch {
case _: IllegalStateException // stop recurring if timer is stopped
}
} else {
log.warning("Could not reschedule message to be sent because receiving actor has been terminated.")
}
}
}
}
private def createContinuousTask(delay: Duration, f: Unit): TimerTask = {
new TimerTask {
def run(timeout: org.jboss.netty.akka.util.Timeout) {
dispatcher.execute(new Runnable { def run = f })
try timeout.getTimer.newTimeout(this, delay) catch {
case _: IllegalStateException // stop recurring if timer is stopped
}
}
}
}
private def createContinuousTask(delay: Duration, runnable: Runnable): TimerTask = {
new TimerTask {
def run(timeout: org.jboss.netty.akka.util.Timeout) {
dispatcher.execute(runnable)
try timeout.getTimer.newTimeout(this, delay) catch {
case _: IllegalStateException // stop recurring if timer is stopped
}
}
}
}
private def execDirectly(t: HWTimeout): Unit = {
try t.getTask.run(t) catch {
case e: InterruptedException throw e
case e: Exception log.error(e, "exception while executing timer task")
}
}
def close() = {
import scala.collection.JavaConverters._
hashedWheelTimer.stop().asScala foreach execDirectly
}
}
class DefaultCancellable(val timeout: org.jboss.netty.akka.util.Timeout) extends Cancellable {
def cancel() {
timeout.cancel()
}
def isCancelled: Boolean = {
timeout.isCancelled
}
}

View file

@ -119,12 +119,12 @@ object ActorSystem {
* system.actorOf(props)
*
* // Scala
* system.actorOf(Props[MyActor]("name")
* system.actorOf(Props[MyActor]
* system.actorOf(Props(new MyActor(...))
* system.actorOf(Props[MyActor], "name")
* system.actorOf(Props[MyActor])
* system.actorOf(Props(new MyActor(...)))
*
* // Java
* system.actorOf(classOf[MyActor]);
* system.actorOf(MyActor.class);
* system.actorOf(Props(new Creator<MyActor>() {
* public MyActor create() { ... }
* });
@ -154,16 +154,6 @@ abstract class ActorSystem extends ActorRefFactory {
*/
def logConfiguration(): Unit
/**
* The logical node name where this actor system resides.
*/
def nodename: String
/**
* The logical name of the cluster this actor system belongs to.
*/
def clustername: String
/**
* Construct a path below the application guardian to be used with [[ActorSystem.actorFor]].
*/
@ -235,13 +225,15 @@ abstract class ActorSystem extends ActorRefFactory {
/**
* Register a block of code to run after all actors in this actor system have
* been stopped.
* been stopped. Multiple code blocks may be registered by calling this method multiple times; there is no
* guarantee that they will be executed in a particular order.
*/
def registerOnTermination[T](code: T)
/**
* Register a block of code to run after all actors in this actor system have
* been stopped (Java API).
* been stopped. Multiple code blocks may be registered by calling this method multiple times; there is no
* guarantee that they will be executed in a particular order (Java API).
*/
def registerOnTermination(code: Runnable)
@ -329,7 +321,11 @@ class ActorSystemImpl(val name: String, applicationConfig: Config) extends Actor
// this provides basic logging (to stdout) until .start() is called below
val eventStream = new EventStream(DebugEventStream)
eventStream.startStdoutLogger(settings)
val log = new BusLogging(eventStream, "ActorSystem") // this used only for .getClass in tagging messages
// unfortunately we need logging before we know the rootpath address, which wants to be inserted here
@volatile
private var _log = new BusLogging(eventStream, "ActorSystem(" + name + ")", this.getClass)
def log = _log
val scheduler = createScheduler()
@ -373,8 +369,6 @@ class ActorSystemImpl(val name: String, applicationConfig: Config) extends Actor
def guardian: InternalActorRef = provider.guardian
def systemGuardian: InternalActorRef = provider.systemGuardian
def deathWatch: DeathWatch = provider.deathWatch
def nodename: String = provider.nodename
def clustername: String = provider.clustername
def /(actorName: String): ActorPath = guardian.path / actorName
def /(path: Iterable[String]): ActorPath = guardian.path / path
@ -382,6 +376,7 @@ class ActorSystemImpl(val name: String, applicationConfig: Config) extends Actor
private lazy val _start: this.type = {
// the provider is expected to start default loggers, LocalActorRefProvider does this
provider.init(this)
_log = new BusLogging(eventStream, "ActorSystem(" + lookupRoot.path.address + ")", this.getClass)
deadLetters.init(dispatcher, lookupRoot.path / "deadLetters")
// this starts the reaper actor and the user-configured logging subscribers, which are also actors
registerOnTermination(stopScheduler())
@ -497,4 +492,6 @@ class ActorSystemImpl(val name: String, applicationConfig: Config) extends Actor
}
}
override def toString = lookupRoot.path.root.address.toString
}

View file

@ -13,8 +13,9 @@ import akka.event.EventStream
import com.typesafe.config._
import akka.routing._
import java.util.concurrent.{ TimeUnit, ConcurrentHashMap }
import akka.util.ReflectiveAccess
case class Deploy(path: String, config: Config, recipe: Option[ActorRecipe] = None, routing: RouterConfig = NoRouter, scope: Scope = LocalScope)
case class Deploy(path: String, config: Config, routing: RouterConfig = NoRouter, scope: Scope = LocalScope)
case class ActorRecipe(implementationClass: Class[_ <: Actor]) //TODO Add ActorConfiguration here
@ -55,25 +56,33 @@ class Deployer(val settings: ActorSystem.Settings) {
val within = Duration(deployment.getMilliseconds("within"), TimeUnit.MILLISECONDS)
val router: RouterConfig = deployment.getString("router") match {
case "from-code" NoRouter
case "round-robin" RoundRobinRouter(nrOfInstances, routees)
case "random" RandomRouter(nrOfInstances, routees)
case "scatter-gather" ScatterGatherFirstCompletedRouter(nrOfInstances, routees, within)
case "broadcast" BroadcastRouter(nrOfInstances, routees)
case x throw new ConfigurationException("unknown router type " + x + " for path " + key)
val resizer: Option[Resizer] = if (config.hasPath("resizer")) {
Some(DefaultResizer(deployment.getConfig("resizer")))
} else {
None
}
val recipe: Option[ActorRecipe] =
deployment.getString("create-as.class") match {
case "" None
case impl
val implementationClass = getClassFor[Actor](impl).fold(e throw new ConfigurationException(
"Config option [akka.actor.deployment." + key + ".create-as.class] load failed", e), identity)
Some(ActorRecipe(implementationClass))
}
val router: RouterConfig = deployment.getString("router") match {
case "from-code" NoRouter
case "round-robin" RoundRobinRouter(nrOfInstances, routees, resizer)
case "random" RandomRouter(nrOfInstances, routees, resizer)
case "smallest-mailbox" SmallestMailboxRouter(nrOfInstances, routees, resizer)
case "scatter-gather" ScatterGatherFirstCompletedRouter(nrOfInstances, routees, within, resizer)
case "broadcast" BroadcastRouter(nrOfInstances, routees, resizer)
case fqn
val constructorSignature = Array[Class[_]](classOf[Config])
ReflectiveAccess.createInstance[RouterConfig](fqn, constructorSignature, Array[AnyRef](deployment)) match {
case Right(router) router
case Left(exception)
throw new IllegalArgumentException(
("Cannot instantiate router [%s], defined in [%s], " +
"make sure it extends [akka.routing.RouterConfig] and has constructor with " +
"[com.typesafe.config.Config] parameter")
.format(fqn, key), exception)
}
}
Some(Deploy(key, deployment, recipe, router, LocalScope))
Some(Deploy(key, deployment, router, LocalScope))
}
}

View file

@ -8,6 +8,7 @@ import akka.util._
import scala.collection.mutable
import akka.event.Logging
import akka.util.Duration._
import akka.routing.{ Deafen, Listen, Listeners }
object FSM {
@ -179,7 +180,7 @@ object FSM {
* timerActive_? ("tock")
* </pre>
*/
trait FSM[S, D] extends ListenerManagement {
trait FSM[S, D] extends Listeners {
this: Actor
import FSM._
@ -189,7 +190,7 @@ trait FSM[S, D] extends ListenerManagement {
type Timeout = Option[Duration]
type TransitionHandler = PartialFunction[(S, S), Unit]
val log = Logging(context.system, context.self)
val log = Logging(context.system, this)
/**
* ****************************************
@ -447,9 +448,6 @@ trait FSM[S, D] extends ListenerManagement {
for (te transitionEvent) { if (te.isDefinedAt(tuple)) te(tuple) }
}
// ListenerManagement shall not start() or stop() listener actors
override protected val manageLifeCycleOfListeners = false
/*
* *******************************************
* Main actor receive() method
@ -474,11 +472,18 @@ trait FSM[S, D] extends ListenerManagement {
}
case SubscribeTransitionCallBack(actorRef)
// TODO use DeathWatch to clean up list
addListener(actorRef)
listeners.add(actorRef)
// send current state back as reference point
actorRef ! CurrentState(self, currentState.stateName)
case Listen(actorRef)
// TODO use DeathWatch to clean up list
listeners.add(actorRef)
// send current state back as reference point
actorRef ! CurrentState(self, currentState.stateName)
case UnsubscribeTransitionCallBack(actorRef)
removeListener(actorRef)
listeners.remove(actorRef)
case Deafen(actorRef)
listeners.remove(actorRef)
case value {
if (timeoutFuture.isDefined) {
timeoutFuture.get.cancel()
@ -523,7 +528,7 @@ trait FSM[S, D] extends ListenerManagement {
if (currentState.stateName != nextState.stateName) {
this.nextState = nextState
handleTransition(currentState.stateName, nextState.stateName)
notifyListeners(Transition(self, currentState.stateName, nextState.stateName))
gossip(Transition(self, currentState.stateName, nextState.stateName))
}
currentState = nextState
val timeout = if (currentState.timeout.isDefined) currentState.timeout else stateTimeouts(currentState.stateName)

View file

@ -13,10 +13,10 @@ case class ChildRestartStats(val child: ActorRef, var maxNrOfRetriesCount: Int =
def requestRestartPermission(retriesWindow: (Option[Int], Option[Int])): Boolean =
retriesWindow match {
case (Some(retries), _) if retries < 1 false
case (Some(retries), None) maxNrOfRetriesCount += 1; maxNrOfRetriesCount <= retries
case (x @ (Some(_) | None), Some(window)) retriesInWindowOkay(if (x.isDefined) x.get else 1, window)
case (None, _) true
case (Some(retries), _) if retries < 1 false
case (Some(retries), None) maxNrOfRetriesCount += 1; maxNrOfRetriesCount <= retries
case (x, Some(window)) retriesInWindowOkay(if (x.isDefined) x.get else 1, window)
case (None, _) true
}
private def retriesInWindowOkay(retries: Int, window: Int): Boolean = {

View file

@ -21,7 +21,6 @@ object Props {
import FaultHandlingStrategy._
final val defaultCreator: () Actor = () throw new UnsupportedOperationException("No actor creator specified!")
final val defaultTimeout: Timeout = Timeout(Duration.MinusInf)
final val defaultDecider: Decider = {
case _: ActorInitializationException Stop
case _: ActorKilledException Stop
@ -95,12 +94,10 @@ object Props {
* val props = Props(
* creator = ..,
* dispatcher = ..,
* timeout = ..,
* faultHandler = ..,
* routerConfig = ..
* )
* val props = Props().withCreator(new MyActor)
* val props = Props[MyActor].withTimeout(timeout)
* val props = Props[MyActor].withRouter(RoundRobinRouter(..))
* val props = Props[MyActor].withFaultHandler(OneForOneStrategy {
* case e: IllegalStateException Resume
@ -117,7 +114,6 @@ object Props {
* }
* });
* Props props = new Props().withCreator(new UntypedActorFactory() { ... });
* Props props = new Props(MyActor.class).withTimeout(timeout);
* Props props = new Props(MyActor.class).withFaultHandler(new OneForOneStrategy(...));
* Props props = new Props(MyActor.class).withRouter(new RoundRobinRouter(..));
* }}}
@ -125,7 +121,6 @@ object Props {
case class Props(
creator: () Actor = Props.defaultCreator,
dispatcher: String = Dispatchers.DefaultDispatcherId,
timeout: Timeout = Props.defaultTimeout,
faultHandler: FaultHandlingStrategy = Props.defaultFaultHandler,
routerConfig: RouterConfig = Props.defaultRoutedProps) {
@ -135,7 +130,6 @@ case class Props(
def this() = this(
creator = Props.defaultCreator,
dispatcher = Dispatchers.DefaultDispatcherId,
timeout = Props.defaultTimeout,
faultHandler = Props.defaultFaultHandler)
/**
@ -144,7 +138,6 @@ case class Props(
def this(factory: UntypedActorFactory) = this(
creator = () factory.create(),
dispatcher = Dispatchers.DefaultDispatcherId,
timeout = Props.defaultTimeout,
faultHandler = Props.defaultFaultHandler)
/**
@ -153,7 +146,6 @@ case class Props(
def this(actorClass: Class[_ <: Actor]) = this(
creator = () actorClass.newInstance,
dispatcher = Dispatchers.DefaultDispatcherId,
timeout = Props.defaultTimeout,
faultHandler = Props.defaultFaultHandler,
routerConfig = Props.defaultRoutedProps)
@ -183,11 +175,6 @@ case class Props(
*/
def withDispatcher(d: String) = copy(dispatcher = d)
/**
* Returns a new Props with the specified timeout set.
*/
def withTimeout(t: Timeout) = copy(timeout = t)
/**
* Returns a new Props with the specified faulthandler set.
*/

View file

@ -13,6 +13,11 @@
package akka.actor
import akka.util.Duration
import org.jboss.netty.akka.util.{ Timer, TimerTask, HashedWheelTimer, Timeout HWTimeout }
import akka.event.LoggingAdapter
import akka.dispatch.MessageDispatcher
import java.io.Closeable
//#scheduler
/**
* An Akka scheduler service. This one needs one special behavior: if
@ -108,3 +113,149 @@ trait Cancellable {
def isCancelled: Boolean
}
//#cancellable
/**
* Scheduled tasks (Runnable and functions) are executed with the supplied dispatcher.
* Note that dispatcher is by-name parameter, because dispatcher might not be initialized
* when the scheduler is created.
*
* The HashedWheelTimer used by this class MUST throw an IllegalStateException
* if it does not enqueue a task. Once a task is queued, it MUST be executed or
* returned from stop().
*/
class DefaultScheduler(hashedWheelTimer: HashedWheelTimer, log: LoggingAdapter, dispatcher: MessageDispatcher) extends Scheduler with Closeable {
def schedule(initialDelay: Duration, delay: Duration, receiver: ActorRef, message: Any): Cancellable = {
val continuousCancellable = new ContinuousCancellable
val task = new TimerTask with ContinuousScheduling {
def run(timeout: HWTimeout) {
receiver ! message
// Check if the receiver is still alive and kicking before reschedule the task
if (receiver.isTerminated) {
log.warning("Could not reschedule message to be sent because receiving actor has been terminated.")
} else {
scheduleNext(timeout, delay, continuousCancellable)
}
}
}
continuousCancellable.init(hashedWheelTimer.newTimeout(task, initialDelay))
continuousCancellable
}
def schedule(initialDelay: Duration, delay: Duration)(f: Unit): Cancellable = {
val continuousCancellable = new ContinuousCancellable
val task = new TimerTask with ContinuousScheduling with Runnable {
def run = f
def run(timeout: HWTimeout) {
dispatcher execute this
scheduleNext(timeout, delay, continuousCancellable)
}
}
continuousCancellable.init(hashedWheelTimer.newTimeout(task, initialDelay))
continuousCancellable
}
def schedule(initialDelay: Duration, delay: Duration, runnable: Runnable): Cancellable = {
val continuousCancellable = new ContinuousCancellable
val task = new TimerTask with ContinuousScheduling {
def run(timeout: HWTimeout) {
dispatcher.execute(runnable)
scheduleNext(timeout, delay, continuousCancellable)
}
}
continuousCancellable.init(hashedWheelTimer.newTimeout(task, initialDelay))
continuousCancellable
}
def scheduleOnce(delay: Duration, runnable: Runnable): Cancellable = {
val task = new TimerTask() {
def run(timeout: HWTimeout) { dispatcher.execute(runnable) }
}
new DefaultCancellable(hashedWheelTimer.newTimeout(task, delay))
}
def scheduleOnce(delay: Duration, receiver: ActorRef, message: Any): Cancellable = {
val task = new TimerTask {
def run(timeout: HWTimeout) {
receiver ! message
}
}
new DefaultCancellable(hashedWheelTimer.newTimeout(task, delay))
}
def scheduleOnce(delay: Duration)(f: Unit): Cancellable = {
val task = new TimerTask {
def run(timeout: HWTimeout) {
dispatcher.execute(new Runnable { def run = f })
}
}
new DefaultCancellable(hashedWheelTimer.newTimeout(task, delay))
}
private trait ContinuousScheduling { this: TimerTask
def scheduleNext(timeout: HWTimeout, delay: Duration, delegator: ContinuousCancellable) {
try {
delegator.swap(timeout.getTimer.newTimeout(this, delay))
} catch {
case _: IllegalStateException // stop recurring if timer is stopped
}
}
}
private def execDirectly(t: HWTimeout): Unit = {
try t.getTask.run(t) catch {
case e: InterruptedException throw e
case e: Exception log.error(e, "exception while executing timer task")
}
}
def close() = {
import scala.collection.JavaConverters._
hashedWheelTimer.stop().asScala foreach execDirectly
}
}
/**
* Wrapper of a [[org.jboss.netty.akka.util.Timeout]] that delegates all
* methods. Needed to be able to cancel continuous tasks,
* since they create new Timeout for each tick.
*/
private[akka] class ContinuousCancellable extends Cancellable {
@volatile
private var delegate: HWTimeout = _
@volatile
private var cancelled = false
private[akka] def init(initialTimeout: HWTimeout): Unit = {
delegate = initialTimeout
}
private[akka] def swap(newTimeout: HWTimeout): Unit = {
val wasCancelled = isCancelled
delegate = newTimeout
if (wasCancelled || isCancelled) cancel()
}
def isCancelled(): Boolean = {
// delegate is initially null, but this object will not be exposed to the world until after init
cancelled || delegate.isCancelled()
}
def cancel(): Unit = {
// the underlying Timeout will not become cancelled once the task has been started to run,
// therefore we keep a flag here to make sure that rescheduling doesn't occur when cancelled
cancelled = true
// delegate is initially null, but this object will not be exposed to the world until after init
delegate.cancel()
}
}
class DefaultCancellable(val timeout: HWTimeout) extends Cancellable {
def cancel() {
timeout.cancel()
}
def isCancelled: Boolean = {
timeout.isCancelled
}
}

View file

@ -8,10 +8,10 @@ import akka.japi.{ Creator, Option ⇒ JOption }
import java.lang.reflect.{ InvocationTargetException, Method, InvocationHandler, Proxy }
import akka.util.{ Duration, Timeout }
import java.util.concurrent.atomic.{ AtomicReference AtomVar }
import akka.serialization.{ Serializer, Serialization }
import akka.serialization.{ Serializer, Serialization, SerializationExtension }
import akka.dispatch._
import akka.serialization.SerializationExtension
import java.util.concurrent.TimeoutException
import java.lang.IllegalStateException
trait TypedActorFactory {
@ -48,100 +48,31 @@ trait TypedActorFactory {
def getActorRefFor(proxy: AnyRef): ActorRef
/**
* Creates a new TypedActor proxy using the supplied Props,
* the interfaces usable by the returned proxy is the suppli ed interface class (if the class represents an interface) or
* all interfaces (Class.getInterfaces) if it's not an interface class
*
* Java API
* Creates a new TypedActor with the specified properies
*/
def typedActorOf[R <: AnyRef, T <: R](interface: Class[R], impl: Class[T], props: Props): R =
typedActor.createProxyAndTypedActor(actorFactory, interface, impl.newInstance, props, None, interface.getClassLoader)
/**
* Creates a new TypedActor proxy using the supplied Props,
* the interfaces usable by the returned proxy is the supplied interface class (if the class represents an interface) or
* all interfaces (Class.getInterfaces) if it's not an interface class
*
* Java API
*/
def typedActorOf[R <: AnyRef, T <: R](interface: Class[R], impl: Class[T], props: Props, name: String): R =
typedActor.createProxyAndTypedActor(actorFactory, interface, impl.newInstance, props, Some(name), interface.getClassLoader)
/**
* Creates a new TypedActor proxy using the supplied Props,
* the interfaces usable by the returned proxy is the supplied interface class (if the class represents an interface) or
* all interfaces (Class.getInterfaces) if it's not an interface class
*
* Java API
*/
def typedActorOf[R <: AnyRef, T <: R](interface: Class[R], impl: Creator[T], props: Props): R =
typedActor.createProxyAndTypedActor(actorFactory, interface, impl.create, props, None, interface.getClassLoader)
/**
* Creates a new TypedActor proxy using the supplied Props,
* the interfaces usable by the returned proxy is the supplied interface class (if the class represents an interface) or
* all interfaces (Class.getInterfaces) if it's not an interface class
*
* Java API
*/
def typedActorOf[R <: AnyRef, T <: R](interface: Class[R], impl: Creator[T], props: Props, name: String): R =
typedActor.createProxyAndTypedActor(actorFactory, interface, impl.create, props, Some(name), interface.getClassLoader)
/**
* Creates a new TypedActor proxy using the supplied Props,
* the interfaces usable by the returned proxy is the supplied interface class (if the class represents an interface) or
* all interfaces (Class.getInterfaces) if it's not an interface class
*
* Scala API
*/
def typedActorOf[R <: AnyRef, T <: R](interface: Class[R], impl: T, props: Props, name: String): R =
typedActor.createProxyAndTypedActor(actorFactory, interface, impl, props, Some(name), interface.getClassLoader)
/**
* Creates a new TypedActor proxy using the supplied Props,
* the interfaces usable by the returned proxy is the supplied implementation class' interfaces (Class.getInterfaces)
*
* Scala API
*/
def typedActorOf[R <: AnyRef, T <: R: ClassManifest](props: Props = Props(), name: String = null): R = {
val clazz = implicitly[ClassManifest[T]].erasure.asInstanceOf[Class[T]]
typedActor.createProxyAndTypedActor(actorFactory, clazz, clazz.newInstance, props, Option(name), clazz.getClassLoader)
def typedActorOf[R <: AnyRef, T <: R](props: TypedProps[T]): R = {
val proxyVar = new AtomVar[R] //Chicken'n'egg-resolver
val c = props.creator //Cache this to avoid closing over the Props
val ap = props.actorProps.withCreator(new TypedActor.TypedActor[R, T](proxyVar, c()))
typedActor.createActorRefProxy(props, proxyVar, actorFactory.actorOf(ap))
}
/**
* Creates a proxy given the supplied Props, this is not a TypedActor, so you'll need to implement the MethodCall handling yourself,
* to create TypedActor proxies, use typedActorOf
* Creates a new TypedActor with the specified properies
*/
def createProxy[R <: AnyRef](constructor: Actor, props: Props = Props(), name: String = null, loader: ClassLoader = null)(implicit m: Manifest[R]): R =
typedActor.createProxy[R](actorFactory, typedActor.extractInterfaces(m.erasure), (ref: AtomVar[R]) constructor, props, Option(name), if (loader eq null) m.erasure.getClassLoader else loader)
def typedActorOf[R <: AnyRef, T <: R](props: TypedProps[T], name: String): R = {
val proxyVar = new AtomVar[R] //Chicken'n'egg-resolver
val c = props.creator //Cache this to avoid closing over the Props
val ap = props.actorProps.withCreator(new akka.actor.TypedActor.TypedActor[R, T](proxyVar, c()))
typedActor.createActorRefProxy(props, proxyVar, actorFactory.actorOf(ap, name))
}
/**
* Creates a proxy given the supplied Props, this is not a TypedActor, so you'll need to implement the MethodCall handling yourself,
* to create TypedActor proxies, use typedActorOf
* Creates a TypedActor that intercepts the calls and forwards them as [[akka.actor.TypedActor.MethodCall]]
* to the provided ActorRef.
*/
def createProxy[R <: AnyRef](interfaces: Array[Class[_]], constructor: Creator[Actor], props: Props, loader: ClassLoader): R =
typedActor.createProxy(actorFactory, interfaces, (ref: AtomVar[R]) constructor.create, props, None, loader)
/**
* Creates a proxy given the supplied Props, this is not a TypedActor, so you'll need to implement the MethodCall handling yourself,
* to create TypedActor proxies, use typedActorOf
*/
def createProxy[R <: AnyRef](interfaces: Array[Class[_]], constructor: Creator[Actor], props: Props, name: String, loader: ClassLoader): R =
typedActor.createProxy(actorFactory, interfaces, (ref: AtomVar[R]) constructor.create, props, Some(name), loader)
/**
* Creates a proxy given the supplied Props, this is not a TypedActor, so you'll need to implement the MethodCall handling yourself,
* to create TypedActor proxies, use typedActorOf
*/
def createProxy[R <: AnyRef](interfaces: Array[Class[_]], constructor: Actor, props: Props, loader: ClassLoader): R =
typedActor.createProxy[R](actorFactory, interfaces, (ref: AtomVar[R]) constructor, props, None, loader)
/**
* Creates a proxy given the supplied Props, this is not a TypedActor, so you'll need to implement the MethodCall handling yourself,
* to create TypedActor proxies, use typedActorOf
*/
def createProxy[R <: AnyRef](interfaces: Array[Class[_]], constructor: Actor, props: Props, name: String, loader: ClassLoader): R =
typedActor.createProxy[R](actorFactory, interfaces, (ref: AtomVar[R]) constructor, props, Some(name), loader)
def typedActorOf[R <: AnyRef, T <: R](props: TypedProps[T], actorRef: ActorRef): R =
typedActor.createActorRefProxy(props, null: AtomVar[R], actorRef)
}
@ -192,22 +123,25 @@ object TypedActor extends ExtensionId[TypedActorExtension] with ExtensionIdProvi
} catch { case i: InvocationTargetException throw i.getTargetException }
private def writeReplace(): AnyRef = parameters match {
case null SerializedMethodCall(method.getDeclaringClass, method.getName, method.getParameterTypes, null, null)
case ps if ps.length == 0 SerializedMethodCall(method.getDeclaringClass, method.getName, method.getParameterTypes, Array[Int](), Array[Array[Byte]]())
case null SerializedMethodCall(method.getDeclaringClass, method.getName, method.getParameterTypes, null)
case ps if ps.length == 0 SerializedMethodCall(method.getDeclaringClass, method.getName, method.getParameterTypes, Array())
case ps
val serializers: Array[Serializer] = ps map SerializationExtension(Serialization.currentSystem.value).findSerializerFor
val serializedParameters: Array[Array[Byte]] = Array.ofDim[Array[Byte]](serializers.length)
for (i 0 until serializers.length)
serializedParameters(i) = serializers(i) toBinary parameters(i) //Mutable for the sake of sanity
val serializedParameters = Array.ofDim[(Int, Class[_], Array[Byte])](ps.length)
for (i 0 until ps.length) {
val p = ps(i)
val s = SerializationExtension(Serialization.currentSystem.value).findSerializerFor(p)
val m = if (s.includeManifest) p.getClass else null
serializedParameters(i) = (s.identifier, m, s toBinary parameters(i)) //Mutable for the sake of sanity
}
SerializedMethodCall(method.getDeclaringClass, method.getName, method.getParameterTypes, serializers.map(_.identifier), serializedParameters)
SerializedMethodCall(method.getDeclaringClass, method.getName, method.getParameterTypes, serializedParameters)
}
}
/**
* Represents the serialized form of a MethodCall, uses readResolve and writeReplace to marshall the call
*/
case class SerializedMethodCall(ownerType: Class[_], methodName: String, parameterTypes: Array[Class[_]], serializerIdentifiers: Array[Int], serializedParameters: Array[Array[Byte]]) {
case class SerializedMethodCall(ownerType: Class[_], methodName: String, parameterTypes: Array[Class[_]], serializedParameters: Array[(Int, Class[_], Array[Byte])]) {
//TODO implement writeObject and readObject to serialize
//TODO Possible optimization is to special encode the parameter-types to conserve space
@ -222,8 +156,10 @@ object TypedActor extends ExtensionId[TypedActorExtension] with ExtensionIdProvi
case a if a.length == 0 Array[AnyRef]()
case a
val deserializedParameters: Array[AnyRef] = Array.ofDim[AnyRef](a.length) //Mutable for the sake of sanity
for (i 0 until a.length)
deserializedParameters(i) = serialization.serializerByIdentity(serializerIdentifiers(i)).fromBinary(serializedParameters(i))
for (i 0 until a.length) {
val (sId, manifest, bytes) = a(i)
deserializedParameters(i) = serialization.serializerByIdentity(sId).fromBinary(bytes, Option(manifest))
}
deserializedParameters
})
@ -259,7 +195,7 @@ object TypedActor extends ExtensionId[TypedActorExtension] with ExtensionIdProvi
/**
* Returns the ActorContext (for a TypedActor) when inside a method call in a TypedActor.
*/
def context = currentContext.get match {
def context: ActorContext = currentContext.get match {
case null throw new IllegalStateException("Calling TypedActor.context outside of a TypedActor implementation method!")
case some some
}
@ -413,6 +349,174 @@ object TypedActor extends ExtensionId[TypedActorExtension] with ExtensionIdProvi
}
}
/**
* TypedProps is a TypedActor configuration object, that is thread safe and fully sharable.
* It's used in TypedActorFactory.typedActorOf to configure a TypedActor instance.
*/
object TypedProps {
val defaultDispatcherId: String = Dispatchers.DefaultDispatcherId
val defaultFaultHandler: FaultHandlingStrategy = akka.actor.Props.defaultFaultHandler
val defaultTimeout: Option[Timeout] = None
val defaultLoader: Option[ClassLoader] = None
/**
* @returns a sequence of interfaces that the speicified class implements,
* or a sequence containing only itself, if itself is an interface.
*/
def extractInterfaces(clazz: Class[_]): Seq[Class[_]] =
if (clazz.isInterface) Seq[Class[_]](clazz) else clazz.getInterfaces.toList
/**
* Uses the supplied class as the factory for the TypedActor implementation,
* proxying all the interfaces it implements.
*
* Scala API
*/
def apply[T <: AnyRef](implementation: Class[T]): TypedProps[T] =
new TypedProps[T](implementation)
/**
* Uses the supplied class as the factory for the TypedActor implementation,
* and that has the specified interface,
* or if the interface class is not an interface, all the interfaces it implements,
* appended in the sequence of interfaces.
*
* Scala API
*/
def apply[T <: AnyRef](interface: Class[_ >: T], implementation: Class[T]): TypedProps[T] =
new TypedProps[T](extractInterfaces(interface), () implementation.newInstance())
/**
* Uses the supplied thunk as the factory for the TypedActor implementation,
* and that has the specified interface,
* or if the interface class is not an interface, all the interfaces it implements,
* appended in the sequence of interfaces.
*
* Scala API
*/
def apply[T <: AnyRef](interface: Class[_ >: T], creator: T): TypedProps[T] =
new TypedProps[T](extractInterfaces(interface), () creator)
/**
* Uses the supplied class as the factory for the TypedActor implementation,
* proxying all the interfaces it implements.
*
* Scala API
*/
def apply[T <: AnyRef: ClassManifest](): TypedProps[T] =
new TypedProps[T](implicitly[ClassManifest[T]].erasure.asInstanceOf[Class[T]])
}
/**
* TypedProps is a TypedActor configuration object, that is thread safe and fully sharable.
* It's used in TypedActorFactory.typedActorOf to configure a TypedActor instance.
*/
case class TypedProps[T <: AnyRef] protected[TypedProps] (
interfaces: Seq[Class[_]],
creator: () T,
dispatcher: String = TypedProps.defaultDispatcherId,
faultHandler: FaultHandlingStrategy = TypedProps.defaultFaultHandler,
timeout: Option[Timeout] = TypedProps.defaultTimeout,
loader: Option[ClassLoader] = TypedProps.defaultLoader) {
/**
* Uses the supplied class as the factory for the TypedActor implementation,
* and that has the specified interface,
* or if the interface class is not an interface, all the interfaces it implements,
* appended in the sequence of interfaces.
*/
def this(implementation: Class[T]) =
this(interfaces = TypedProps.extractInterfaces(implementation),
creator = () implementation.newInstance())
/**
* Uses the supplied Creator as the factory for the TypedActor implementation,
* and that has the specified interface,
* or if the interface class is not an interface, all the interfaces it implements,
* appended in the sequence of interfaces.
*
* Java API.
*/
def this(interface: Class[_ >: T], implementation: Creator[T]) =
this(interfaces = TypedProps.extractInterfaces(interface),
creator = () implementation.create())
/**
* Uses the supplied class as the factory for the TypedActor implementation,
* and that has the specified interface,
* or if the interface class is not an interface, all the interfaces it implements,
* appended in the sequence of interfaces.
*
* Java API.
*/
def this(interface: Class[_ >: T], implementation: Class[T]) =
this(interfaces = TypedProps.extractInterfaces(interface),
creator = () implementation.newInstance())
/**
* Returns a new Props with the specified dispatcher set.
*/
def withDispatcher(d: String) = copy(dispatcher = d)
/**
* Returns a new Props with the specified faulthandler set.
*/
def withFaultHandler(f: FaultHandlingStrategy) = copy(faultHandler = f)
/**
* @returns a new Props that will use the specified ClassLoader to create its proxy class in
* If loader is null, it will use the bootstrap classloader.
*
* Java API
*/
def withLoader(loader: ClassLoader): TypedProps[T] = withLoader(Option(loader))
/**
* @returns a new Props that will use the specified ClassLoader to create its proxy class in
* If loader is null, it will use the bootstrap classloader.
*
* Scala API
*/
def withLoader(loader: Option[ClassLoader]): TypedProps[T] = this.copy(loader = loader)
/**
* @returns a new Props that will use the specified Timeout for its non-void-returning methods,
* if null is specified, it will use the default ActorTimeout as specified in the configuration.
*
* Java API
*/
def withTimeout(timeout: Timeout): TypedProps[T] = this.copy(timeout = Option(timeout))
/**
* @returns a new Props that will use the specified Timeout for its non-void-returning methods,
* if None is specified, it will use the default ActorTimeout as specified in the configuration.
*
* Scala API
*/
def withTimeout(timeout: Option[Timeout]): TypedProps[T] = this.copy(timeout = timeout)
/**
* Returns a new Props that has the specified interface,
* or if the interface class is not an interface, all the interfaces it implements,
* appended in the sequence of interfaces.
*/
def withInterface(interface: Class[_ >: T]): TypedProps[T] =
this.copy(interfaces = interfaces ++ TypedProps.extractInterfaces(interface))
/**
* Returns a new Props without the specified interface,
* or if the interface class is not an interface, all the interfaces it implements.
*/
def withoutInterface(interface: Class[_ >: T]): TypedProps[T] =
this.copy(interfaces = interfaces diff TypedProps.extractInterfaces(interface))
import akka.actor.{ Props ActorProps }
def actorProps(): ActorProps =
if (dispatcher == ActorProps().dispatcher && faultHandler == ActorProps().faultHandler) ActorProps()
else ActorProps(dispatcher = dispatcher, faultHandler = faultHandler)
}
case class ContextualTypedActorFactory(typedActor: TypedActorExtension, actorFactory: ActorContext) extends TypedActorFactory {
override def getActorRefFor(proxy: AnyRef): ActorRef = typedActor.getActorRefFor(proxy)
override def isTypedActor(proxyOrNot: AnyRef): Boolean = typedActor.isTypedActor(proxyOrNot)
@ -441,29 +545,28 @@ class TypedActorExtension(system: ActorSystemImpl) extends TypedActorFactory wit
// Private API
private[akka] def createProxy[R <: AnyRef](supervisor: ActorRefFactory, interfaces: Array[Class[_]], constructor: (AtomVar[R]) Actor, props: Props, name: Option[String], loader: ClassLoader): R = {
val proxyVar = new AtomVar[R]
configureAndProxyLocalActorRef[R](supervisor, interfaces, proxyVar, props.withCreator(constructor(proxyVar)), name, loader)
}
private[akka] def createProxyAndTypedActor[R <: AnyRef, T <: R](supervisor: ActorRefFactory, interface: Class[_], constructor: T, props: Props, name: Option[String], loader: ClassLoader): R =
createProxy[R](supervisor, extractInterfaces(interface), (ref: AtomVar[R]) new TypedActor[R, T](ref, constructor), props, name, loader)
private[akka] def configureAndProxyLocalActorRef[T <: AnyRef](supervisor: ActorRefFactory, interfaces: Array[Class[_]], proxyVar: AtomVar[T], props: Props, name: Option[String], loader: ClassLoader): T = {
private[akka] def createActorRefProxy[R <: AnyRef, T <: R](props: TypedProps[T], proxyVar: AtomVar[R], actorRef: ActorRef): R = {
//Warning, do not change order of the following statements, it's some elaborate chicken-n-egg handling
val actorVar = new AtomVar[ActorRef](null)
val timeout = props.timeout match {
case Props.`defaultTimeout` settings.ActorTimeout
case x x
}
val proxy: T = Proxy.newProxyInstance(loader, interfaces, new TypedActorInvocationHandler(this, actorVar, timeout)).asInstanceOf[T]
proxyVar.set(proxy) // Chicken and egg situation we needed to solve, set the proxy so that we can set the self-reference inside each receive
val ref = if (name.isDefined) supervisor.actorOf(props, name.get) else supervisor.actorOf(props)
actorVar.set(ref) //Make sure the InvocationHandler gets ahold of the actor reference, this is not a problem since the proxy hasn't escaped this method yet
proxyVar.get
}
val classLoader: ClassLoader = if (props.loader.nonEmpty) props.loader.get else props.interfaces.headOption.map(_.getClassLoader).orNull //If we have no loader, we arbitrarily take the loader of the first interface
val proxy = Proxy.newProxyInstance(
classLoader,
props.interfaces.toArray,
new TypedActorInvocationHandler(
this,
actorVar,
if (props.timeout.isDefined) props.timeout.get else this.settings.ActorTimeout)).asInstanceOf[R]
private[akka] def extractInterfaces(clazz: Class[_]): Array[Class[_]] = if (clazz.isInterface) Array[Class[_]](clazz) else clazz.getInterfaces
proxyVar match {
case null
actorVar.set(actorRef)
proxy
case _
proxyVar.set(proxy) // Chicken and egg situation we needed to solve, set the proxy so that we can set the self-reference inside each receive
actorVar.set(actorRef) //Make sure the InvocationHandler gets ahold of the actor reference, this is not a problem since the proxy hasn't escaped this method yet
proxyVar.get
}
}
private[akka] def invocationHandlerFor(typedActor_? : AnyRef): TypedActorInvocationHandler =
if ((typedActor_? ne null) && Proxy.isProxyClass(typedActor_?.getClass)) typedActor_? match {

View file

@ -79,8 +79,8 @@ final case class TaskInvocation(eventStream: EventStream, runnable: Runnable, cl
try {
runnable.run()
} catch {
// FIXME catching all and continue isn't good for OOME, ticket #1418
case e eventStream.publish(Error(e, "TaskInvocation", e.getMessage))
// TODO catching all and continue isn't good for OOME, ticket #1418
case e eventStream.publish(Error(e, "TaskInvocation", this.getClass, e.getMessage))
} finally {
cleanup()
}
@ -208,8 +208,6 @@ abstract class MessageDispatcher(val prerequisites: DispatcherPrerequisites) ext
*/
protected[akka] def register(actor: ActorCell) {
inhabitantsUpdater.incrementAndGet(this)
// ➡➡➡ NEVER SEND THE SAME SYSTEM MESSAGE OBJECT TO TWO ACTORS ⬅⬅⬅
systemDispatch(actor, Create()) //FIXME should this be here or moved into ActorCell.start perhaps?
}
/**

View file

@ -59,7 +59,7 @@ class Dispatcher(
executorService.get() execute invocation
} catch {
case e2: RejectedExecutionException
prerequisites.eventStream.publish(Warning("Dispatcher", e2.toString))
prerequisites.eventStream.publish(Warning("Dispatcher", this.getClass, e2.toString))
throw e2
}
}

View file

@ -77,7 +77,7 @@ class Dispatchers(val settings: ActorSystem.Settings, val prerequisites: Dispatc
} else {
// Note that the configurator of the default dispatcher will be registered for this id,
// so this will only be logged once, which is crucial.
prerequisites.eventStream.publish(Warning("Dispatchers",
prerequisites.eventStream.publish(Warning("Dispatchers", this.getClass,
"Dispatcher [%s] not configured, using default-dispatcher".format(id)))
lookupConfigurator(DefaultDispatcherId)
}

View file

@ -148,7 +148,7 @@ object Future {
try {
Right(body)
} catch {
// FIXME catching all and continue isn't good for OOME, ticket #1418
// TODO catching all and continue isn't good for OOME, ticket #1418
case e Left(e)
}
}
@ -294,47 +294,44 @@ object Future {
*/
def blocking(implicit executor: ExecutionContext): Unit =
_taskStack.get match {
case Some(taskStack) if taskStack.nonEmpty
val tasks = taskStack.elems
taskStack.clear()
_taskStack set None
dispatchTask(() _taskStack.get.get.elems = tasks, true)
case Some(_) _taskStack set None
case _ // already None
case stack if (stack ne null) && stack.nonEmpty
val tasks = stack.elems
stack.clear()
_taskStack.remove()
dispatchTask(() _taskStack.get.elems = tasks, true)
case _ _taskStack.remove()
}
private val _taskStack = new ThreadLocal[Option[Stack[() Unit]]]() {
override def initialValue = None
}
private val _taskStack = new ThreadLocal[Stack[() Unit]]()
/**
* Internal API, do not call
*/
private[akka] def dispatchTask(task: () Unit, force: Boolean = false)(implicit executor: ExecutionContext): Unit =
_taskStack.get match {
case Some(taskStack) if !force taskStack push task
case stack if (stack ne null) && !force stack push task
case _ executor.execute(
new Runnable {
def run =
try {
val taskStack = Stack[() Unit](task)
_taskStack set Some(taskStack)
_taskStack set taskStack
while (taskStack.nonEmpty) {
val next = taskStack.pop()
try {
next.apply()
} catch {
case e
// FIXME catching all and continue isn't good for OOME, ticket #1418
// TODO catching all and continue isn't good for OOME, ticket #1418
executor match {
case m: MessageDispatcher
m.prerequisites.eventStream.publish(Error(e, "Future.dispatchTask", e.getMessage))
m.prerequisites.eventStream.publish(Error(e, "Future.dispatchTask", this.getClass, e.getMessage))
case other
e.printStackTrace()
}
}
}
} finally { _taskStack set None }
} finally { _taskStack.remove() }
})
}
}
@ -423,11 +420,17 @@ sealed trait Future[+T] extends japi.Future[T] with Await.Awaitable[T] {
}
/**
* Creates a Future that will be the result of the first completed Future of this and the Future that was passed into this.
* This is semantically the same as: Future.firstCompletedOf(Seq(this, that))
* Returns a new Future that will either hold the successful value of this Future,
* or, it this Future fails, it will hold the result of "that" Future.
*/
//FIXME implement as The result of any of the Futures, or if oth failed, the first failure
def orElse[A >: T](that: Future[A]): Future[A] = Future.firstCompletedOf(List(this, that)) //TODO Optimize
def or[U >: T](that: Future[U]): Future[U] = {
val p = Promise[U]()
onComplete {
case r @ Right(_) p complete r
case _ p completeWith that
}
p
}
/**
* Creates a new Future that will handle any matching Throwable that this
@ -569,7 +572,7 @@ sealed trait Future[+T] extends japi.Future[T] with Await.Awaitable[T] {
protected def logError(msg: String, problem: Throwable): Unit = {
executor match {
case m: MessageDispatcher m.prerequisites.eventStream.publish(Error(problem, msg, problem.getMessage))
case m: MessageDispatcher m.prerequisites.eventStream.publish(Error(problem, msg, this.getClass, problem.getMessage))
case other problem.printStackTrace()
}
}
@ -796,7 +799,7 @@ class DefaultPromise[T](implicit val executor: ExecutionContext) extends Abstrac
final class KeptPromise[T](suppliedValue: Either[Throwable, T])(implicit val executor: ExecutionContext) extends Promise[T] {
val value = Some(resolve(suppliedValue))
def tryComplete(value: Either[Throwable, T]): Boolean = true
def tryComplete(value: Either[Throwable, T]): Boolean = false
def onComplete(func: Either[Throwable, T] Unit): this.type = {
val completedAs = value.get
Future dispatchTask (() func(completedAs))

View file

@ -214,7 +214,7 @@ private[akka] abstract class Mailbox(val actor: ActorCell) extends MessageQueue
}
} catch {
case e
actor.system.eventStream.publish(Error(e, actor.self.path.toString, "exception during processing system messages, dropping " + SystemMessage.size(nextMessage) + " messages!"))
actor.system.eventStream.publish(Error(e, actor.self.path.toString, this.getClass, "exception during processing system messages, dropping " + SystemMessage.size(nextMessage) + " messages!"))
throw e
}
}
@ -227,27 +227,28 @@ private[akka] abstract class Mailbox(val actor: ActorCell) extends MessageQueue
* called when an actor is unregistered.
* By default it dequeues all system messages + messages and ships them to the owning actors' systems' DeadLetterMailbox
*/
protected[dispatch] def cleanUp(): Unit = if (actor ne null) {
val dlq = actor.systemImpl.deadLetterMailbox
if (hasSystemMessages) {
var message = systemDrain()
while (message ne null) {
// message must be virgin before being able to systemEnqueue again
val next = message.next
message.next = null
dlq.systemEnqueue(actor.self, message)
message = next
protected[dispatch] def cleanUp(): Unit =
if (actor ne null) { // actor is null for the deadLetterMailbox
val dlq = actor.systemImpl.deadLetterMailbox
if (hasSystemMessages) {
var message = systemDrain()
while (message ne null) {
// message must be virgin before being able to systemEnqueue again
val next = message.next
message.next = null
dlq.systemEnqueue(actor.self, message)
message = next
}
}
}
if (hasMessages) {
var envelope = dequeue
while (envelope ne null) {
dlq.enqueue(actor.self, envelope)
envelope = dequeue
if (hasMessages) {
var envelope = dequeue
while (envelope ne null) {
dlq.enqueue(actor.self, envelope)
envelope = dequeue
}
}
}
}
}
trait MessageQueue {

View file

@ -38,19 +38,19 @@ class EventStream(private val debug: Boolean = false) extends LoggingBus with Su
}
override def subscribe(subscriber: ActorRef, channel: Class[_]): Boolean = {
if (debug) publish(Logging.Debug(simpleName(this), "subscribing " + subscriber + " to channel " + channel))
if (debug) publish(Logging.Debug(simpleName(this), this.getClass, "subscribing " + subscriber + " to channel " + channel))
super.subscribe(subscriber, channel)
}
override def unsubscribe(subscriber: ActorRef, channel: Class[_]): Boolean = {
val ret = super.unsubscribe(subscriber, channel)
if (debug) publish(Logging.Debug(simpleName(this), "unsubscribing " + subscriber + " from channel " + channel))
if (debug) publish(Logging.Debug(simpleName(this), this.getClass, "unsubscribing " + subscriber + " from channel " + channel))
ret
}
override def unsubscribe(subscriber: ActorRef) {
super.unsubscribe(subscriber)
if (debug) publish(Logging.Debug(simpleName(this), "unsubscribing " + subscriber + " from all channels"))
if (debug) publish(Logging.Debug(simpleName(this), this.getClass, "unsubscribing " + subscriber + " from all channels"))
}
}

View file

@ -16,10 +16,6 @@ import scala.util.control.NoStackTrace
import java.util.concurrent.TimeoutException
import akka.dispatch.Await
object LoggingBus {
implicit def fromActorSystem(system: ActorSystem): LoggingBus = system.eventStream
}
/**
* This trait brings log level handling to the EventStream: it reads the log
* levels for the initial logging (StandardOutLogger) and the loggers & level
@ -75,7 +71,7 @@ trait LoggingBus extends ActorEventBus {
*/
private[akka] def startStdoutLogger(config: Settings) {
val level = levelFor(config.StdoutLogLevel) getOrElse {
StandardOutLogger.print(Error(new EventHandlerException, simpleName(this), "unknown akka.stdout-loglevel " + config.StdoutLogLevel))
StandardOutLogger.print(Error(new EventHandlerException, simpleName(this), this.getClass, "unknown akka.stdout-loglevel " + config.StdoutLogLevel))
ErrorLevel
}
AllLogLevels filter (level >= _) foreach (l subscribe(StandardOutLogger, classFor(l)))
@ -83,15 +79,16 @@ trait LoggingBus extends ActorEventBus {
loggers = Seq(StandardOutLogger)
_logLevel = level
}
publish(Debug(simpleName(this), "StandardOutLogger started"))
publish(Debug(simpleName(this), this.getClass, "StandardOutLogger started"))
}
/**
* Internal Akka use only
*/
private[akka] def startDefaultLoggers(system: ActorSystemImpl) {
val logName = simpleName(this) + "(" + system + ")"
val level = levelFor(system.settings.LogLevel) getOrElse {
StandardOutLogger.print(Error(new EventHandlerException, simpleName(this), "unknown akka.stdout-loglevel " + system.settings.LogLevel))
StandardOutLogger.print(Error(new EventHandlerException, logName, this.getClass, "unknown akka.stdout-loglevel " + system.settings.LogLevel))
ErrorLevel
}
try {
@ -105,7 +102,7 @@ trait LoggingBus extends ActorEventBus {
} yield {
try {
ReflectiveAccess.getClassFor[Actor](loggerName) match {
case Right(actorClass) addLogger(system, actorClass, level)
case Right(actorClass) addLogger(system, actorClass, level, logName)
case Left(exception) throw exception
}
} catch {
@ -119,7 +116,7 @@ trait LoggingBus extends ActorEventBus {
loggers = myloggers
_logLevel = level
}
publish(Debug(simpleName(this), "Default Loggers started"))
publish(Debug(logName, this.getClass, "Default Loggers started"))
if (!(defaultLoggers contains StandardOutLoggerName)) {
unsubscribe(StandardOutLogger)
}
@ -138,7 +135,7 @@ trait LoggingBus extends ActorEventBus {
val level = _logLevel // volatile access before reading loggers
if (!(loggers contains StandardOutLogger)) {
AllLogLevels filter (level >= _) foreach (l subscribe(StandardOutLogger, classFor(l)))
publish(Debug(simpleName(this), "shutting down: StandardOutLogger started"))
publish(Debug(simpleName(this), this.getClass, "shutting down: StandardOutLogger started"))
}
for {
logger loggers
@ -151,34 +148,106 @@ trait LoggingBus extends ActorEventBus {
case _
}
}
publish(Debug(simpleName(this), "all default loggers stopped"))
publish(Debug(simpleName(this), this.getClass, "all default loggers stopped"))
}
private def addLogger(system: ActorSystemImpl, clazz: Class[_ <: Actor], level: LogLevel): ActorRef = {
private def addLogger(system: ActorSystemImpl, clazz: Class[_ <: Actor], level: LogLevel, logName: String): ActorRef = {
val name = "log" + Extension(system).id() + "-" + simpleName(clazz)
val actor = system.systemActorOf(Props(clazz), name)
implicit val timeout = Timeout(3 seconds)
import akka.pattern.ask
val response = try Await.result(actor ? InitializeLogger(this), timeout.duration) catch {
case _: TimeoutException
publish(Warning(simpleName(this), "Logger " + name + " did not respond within " + timeout + " to InitializeLogger(bus)"))
publish(Warning(logName, this.getClass, "Logger " + name + " did not respond within " + timeout + " to InitializeLogger(bus)"))
}
if (response != LoggerInitialized)
throw new LoggerInitializationException("Logger " + name + " did not respond with LoggerInitialized, sent instead " + response)
AllLogLevels filter (level >= _) foreach (l subscribe(actor, classFor(l)))
publish(Debug(simpleName(this), "logger " + name + " started"))
publish(Debug(logName, this.getClass, "logger " + name + " started"))
actor
}
}
/**
* This trait defines the interface to be provided by a log source formatting
* rule as used by [[akka.event.Logging]]s `apply`/`create` method.
*
* See the companion object for default implementations.
*
* Example:
* {{{
* trait MyType { // as an example
* def name: String
* }
*
* implicit val myLogSourceType: LogSource[MyType] = new LogSource {
* def genString(a: MyType) = a.name
* }
*
* class MyClass extends MyType {
* val log = Logging(eventStream, this) // will use "hallo" as logSource
* def name = "hallo"
* }
* }}}
*
* The second variant is used for including the actor systems address:
* {{{
* trait MyType { // as an example
* def name: String
* }
*
* implicit val myLogSourceType: LogSource[MyType] = new LogSource {
* def genString(a: MyType) = a.name
* def genString(a: MyType, s: ActorSystem) = a.name + "," + s
* }
*
* class MyClass extends MyType {
* val sys = ActorSyste("sys")
* val log = Logging(sys, this) // will use "hallo,akka://sys" as logSource
* def name = "hallo"
* }
* }}}
*
* The default implementation of the second variant will just call the first.
*/
trait LogSource[-T] {
def genString(t: T): String
def genString(t: T, system: ActorSystem): String = genString(t)
def getClazz(t: T): Class[_] = t.getClass
}
/**
* This is a marker class which is inserted as originator class into
* [[akka.event.LogEvent]] when the string representation was supplied
* directly.
*/
class DummyClassForStringSources
/**
* This object holds predefined formatting rules for log sources.
*
* In case an [[akka.actor.ActorSystem]] is provided, the following apply:
* <ul>
* <li>[[akka.actor.Actor]] and [[akka.actor.ActorRef]] will be represented by their absolute physical path</li>
* <li>providing a `String` as source will append "(<system address>)" and use the result</li>
* <li>providing a `Class` will extract its simple name, append "(<system address>)" and use the result</li>
* <li>anything else gives compile error unless implicit [[akka.event.LogSource]] is in scope for it</li>
* </ul>
*
* In case a [[akka.event.LoggingBus]] is provided, the following apply:
* <ul>
* <li>[[akka.actor.Actor]] and [[akka.actor.ActorRef]] will be represented by their absolute physical path</li>
* <li>providing a `String` as source will be used as is</li>
* <li>providing a `Class` will extract its simple name</li>
* <li>anything else gives compile error unless implicit [[akka.event.LogSource]] is in scope for it</li>
* </ul>
*/
object LogSource {
implicit val fromString: LogSource[String] = new LogSource[String] {
def genString(s: String) = s
override def genString(s: String, system: ActorSystem) = s + "(" + system + ")"
override def getClazz(s: String) = classOf[DummyClassForStringSources]
}
implicit val fromActor: LogSource[Actor] = new LogSource[Actor] {
@ -192,18 +261,55 @@ object LogSource {
// this one unfortunately does not work as implicit, because existential types have some weird behavior
val fromClass: LogSource[Class[_]] = new LogSource[Class[_]] {
def genString(c: Class[_]) = simpleName(c)
override def genString(c: Class[_], system: ActorSystem) = simpleName(c) + "(" + system + ")"
override def getClazz(c: Class[_]) = c
}
implicit def fromAnyClass[T]: LogSource[Class[T]] = fromClass.asInstanceOf[LogSource[Class[T]]]
def apply[T: LogSource](o: T) = implicitly[LogSource[T]].genString(o)
/**
* Convenience converter access: given an implicit `LogSource`, generate the
* string representation and originating class.
*/
def apply[T: LogSource](o: T): (String, Class[_]) = {
val ls = implicitly[LogSource[T]]
(ls.genString(o), ls.getClazz(o))
}
def fromAnyRef(o: AnyRef): String =
/**
* Convenience converter access: given an implicit `LogSource` and
* [[akka.actor.ActorSystem]], generate the string representation and
* originating class.
*/
def apply[T: LogSource](o: T, system: ActorSystem): (String, Class[_]) = {
val ls = implicitly[LogSource[T]]
(ls.genString(o, system), ls.getClazz(o))
}
/**
* construct string representation for any object according to
* rules above with fallback to its `Class`s simple name.
*/
def fromAnyRef(o: AnyRef): (String, Class[_]) =
o match {
case c: Class[_] fromClass.genString(c)
case a: Actor fromActor.genString(a)
case a: ActorRef fromActorRef.genString(a)
case s: String s
case x simpleName(x)
case c: Class[_] apply(c)
case a: Actor apply(a)
case a: ActorRef apply(a)
case s: String apply(s)
case x (simpleName(x), x.getClass)
}
/**
* construct string representation for any object according to
* rules above (including the actor systems address) with fallback to its
* `Class`s simple name.
*/
def fromAnyRef(o: AnyRef, system: ActorSystem): (String, Class[_]) =
o match {
case c: Class[_] apply(c)
case a: Actor apply(a)
case a: ActorRef apply(a)
case s: String apply(s)
case x (simpleName(x) + "(" + system + ")", x.getClass)
}
}
@ -219,6 +325,11 @@ object LogSource {
* log.info("hello world!")
* </code></pre>
*
* The source object is used in two fashions: its `Class[_]` will be part of
* all log events produced by this logger, plus a string representation is
* generated which may contain per-instance information, see `apply` or `create`
* below.
*
* Loggers are attached to the level-specific channels <code>Error</code>,
* <code>Warning</code>, <code>Info</code> and <code>Debug</code> as
* appropriate for the configured (or set) log level. If you want to implement
@ -306,42 +417,80 @@ object Logging {
val debugFormat = "[DEBUG] [%s] [%s] [%s] %s".intern
/**
* Obtain LoggingAdapter for the given event stream (system) and source object.
* Note that there is an implicit conversion from [[akka.actor.ActorSystem]]
* to [[akka.event.LoggingBus]].
* Obtain LoggingAdapter for the given actor system and source object. This
* will use the systems event stream and include the systems address in the
* log source string.
*
* The source is used to identify the source of this logging channel and must have
* a corresponding LogSource[T] instance in scope; by default these are
* provided for Class[_], Actor, ActorRef and String types. The source
* object is translated to a String according to the following rules:
* <ul>
* <li>if it is an Actor or ActorRef, its path is used</li>
* <li>in case of a String it is used as is</li>
* <li>in case of a class an approximation of its simpleName
* <li>and in all other cases the simpleName of its class</li>
* </ul>
* <b>Do not use this if you want to supply a log category string (like
* com.example.app.whatever) unaltered,</b> supply `system.eventStream` in this
* case or use
*
* {{{
* Logging(system, this.getClass)
* }}}
*
* The source is used to identify the source of this logging channel and
* must have a corresponding implicit LogSource[T] instance in scope; by
* default these are provided for Class[_], Actor, ActorRef and String types.
* See the companion object of [[akka.event.LogSource]] for details.
*
* You can add your own rules quite easily, see [[akka.event.LogSource]].
*/
def apply[T: LogSource](eventStream: LoggingBus, logSource: T): LoggingAdapter =
new BusLogging(eventStream, implicitly[LogSource[T]].genString(logSource))
def apply[T: LogSource](system: ActorSystem, logSource: T): LoggingAdapter = {
val (str, clazz) = LogSource(logSource, system)
new BusLogging(system.eventStream, str, clazz)
}
/**
* Java API: Obtain LoggingAdapter for the given system and source object. The
* source object is used to identify the source of this logging channel. The source
* object is translated to a String according to the following rules:
* <ul>
* <li>if it is an Actor or ActorRef, its path is used</li>
* <li>in case of a String it is used as is</li>
* <li>in case of a class an approximation of its simpleName
* <li>and in all other cases the simpleName of its class</li>
* </ul>
* Obtain LoggingAdapter for the given logging bus and source object.
*
* The source is used to identify the source of this logging channel and
* must have a corresponding implicit LogSource[T] instance in scope; by
* default these are provided for Class[_], Actor, ActorRef and String types.
* See the companion object of [[akka.event.LogSource]] for details.
*
* You can add your own rules quite easily, see [[akka.event.LogSource]].
*/
def getLogger(system: ActorSystem, logSource: AnyRef): LoggingAdapter = apply(system.eventStream, LogSource.fromAnyRef(logSource))
def apply[T: LogSource](bus: LoggingBus, logSource: T): LoggingAdapter = {
val (str, clazz) = LogSource(logSource)
new BusLogging(bus, str, clazz)
}
/**
* Java API: Obtain LoggingAdapter for the given event bus and source object. The
* source object is used to identify the source of this logging channel.
* Obtain LoggingAdapter for the given actor system and source object. This
* will use the systems event stream and include the systems address in the
* log source string.
*
* <b>Do not use this if you want to supply a log category string (like
* com.example.app.whatever) unaltered,</b> supply `system.eventStream` in this
* case or use
*
* {{{
* Logging.getLogger(system, this.getClass());
* }}}
*
* The source is used to identify the source of this logging channel and
* must have a corresponding implicit LogSource[T] instance in scope; by
* default these are provided for Class[_], Actor, ActorRef and String types.
* See the companion object of [[akka.event.LogSource]] for details.
*/
def getLogger(bus: LoggingBus, logSource: AnyRef): LoggingAdapter = apply(bus, LogSource.fromAnyRef(logSource))
def getLogger(system: ActorSystem, logSource: AnyRef): LoggingAdapter = {
val (str, clazz) = LogSource.fromAnyRef(logSource, system)
new BusLogging(system.eventStream, str, clazz)
}
/**
* Obtain LoggingAdapter for the given logging bus and source object.
*
* The source is used to identify the source of this logging channel and
* must have a corresponding implicit LogSource[T] instance in scope; by
* default these are provided for Class[_], Actor, ActorRef and String types.
* See the companion object of [[akka.event.LogSource]] for details.
*/
def getLogger(bus: LoggingBus, logSource: AnyRef): LoggingAdapter = {
val (str, clazz) = LogSource.fromAnyRef(logSource)
new BusLogging(bus, str, clazz)
}
/**
* Artificial exception injected into Error events if no Throwable is
@ -363,19 +512,34 @@ object Logging {
* The LogLevel of this LogEvent
*/
def level: LogLevel
/**
* The source of this event
*/
def logSource: String
/**
* The class of the source of this event
*/
def logClass: Class[_]
/**
* The message, may be any object or null.
*/
def message: Any
}
/**
* For ERROR Logging
*/
case class Error(cause: Throwable, logSource: String, message: Any = "") extends LogEvent {
def this(logSource: String, message: Any) = this(Error.NoCause, logSource, message)
case class Error(cause: Throwable, logSource: String, logClass: Class[_], message: Any = "") extends LogEvent {
def this(logSource: String, logClass: Class[_], message: Any) = this(Error.NoCause, logSource, logClass, message)
override def level = ErrorLevel
}
object Error {
def apply(logSource: String, message: Any) = new Error(NoCause, logSource, message)
def apply(logSource: String, logClass: Class[_], message: Any) = new Error(NoCause, logSource, logClass, message)
/** Null Object used for errors without cause Throwable */
object NoCause extends NoStackTrace
@ -384,21 +548,21 @@ object Logging {
/**
* For WARNING Logging
*/
case class Warning(logSource: String, message: Any = "") extends LogEvent {
case class Warning(logSource: String, logClass: Class[_], message: Any = "") extends LogEvent {
override def level = WarningLevel
}
/**
* For INFO Logging
*/
case class Info(logSource: String, message: Any = "") extends LogEvent {
case class Info(logSource: String, logClass: Class[_], message: Any = "") extends LogEvent {
override def level = InfoLevel
}
/**
* For DEBUG Logging
*/
case class Debug(logSource: String, message: Any = "") extends LogEvent {
case class Debug(logSource: String, logClass: Class[_], message: Any = "") extends LogEvent {
override def level = DebugLevel
}
@ -440,7 +604,7 @@ object Logging {
case e: Warning warning(e)
case e: Info info(e)
case e: Debug debug(e)
case e warning(Warning(simpleName(this), "received unexpected event of class " + e.getClass + ": " + e))
case e warning(Warning(simpleName(this), this.getClass, "received unexpected event of class " + e.getClass + ": " + e))
}
}
@ -627,7 +791,7 @@ trait LoggingAdapter {
}
}
class BusLogging(val bus: LoggingBus, val logSource: String) extends LoggingAdapter {
class BusLogging(val bus: LoggingBus, val logSource: String, val logClass: Class[_]) extends LoggingAdapter {
import Logging._
@ -636,14 +800,14 @@ class BusLogging(val bus: LoggingBus, val logSource: String) extends LoggingAdap
def isInfoEnabled = bus.logLevel >= InfoLevel
def isDebugEnabled = bus.logLevel >= DebugLevel
protected def notifyError(message: String) { bus.publish(Error(logSource, message)) }
protected def notifyError(message: String) { bus.publish(Error(logSource, logClass, message)) }
protected def notifyError(cause: Throwable, message: String) { bus.publish(Error(cause, logSource, message)) }
protected def notifyError(cause: Throwable, message: String) { bus.publish(Error(cause, logSource, logClass, message)) }
protected def notifyWarning(message: String) { bus.publish(Warning(logSource, message)) }
protected def notifyWarning(message: String) { bus.publish(Warning(logSource, logClass, message)) }
protected def notifyInfo(message: String) { bus.publish(Info(logSource, message)) }
protected def notifyInfo(message: String) { bus.publish(Info(logSource, logClass, message)) }
protected def notifyDebug(message: String) { bus.publish(Debug(logSource, message)) }
protected def notifyDebug(message: String) { bus.publish(Debug(logSource, logClass, message)) }
}

View file

@ -36,7 +36,8 @@ object LoggingReceive {
class LoggingReceive(source: AnyRef, r: Receive)(implicit system: ActorSystem) extends Receive {
def isDefinedAt(o: Any) = {
val handled = r.isDefinedAt(o)
system.eventStream.publish(Debug(LogSource.fromAnyRef(source), "received " + (if (handled) "handled" else "unhandled") + " message " + o))
val (str, clazz) = LogSource.fromAnyRef(source)
system.eventStream.publish(Debug(str, clazz, "received " + (if (handled) "handled" else "unhandled") + " message " + o))
handled
}
def apply(o: Any): Unit = r(o)

View file

@ -4,10 +4,10 @@
package akka.pattern
object Patterns {
import akka.actor.ActorRef
import akka.actor.{ ActorRef, ActorSystem }
import akka.dispatch.Future
import akka.pattern.{ ask scalaAsk }
import akka.util.Timeout
import akka.util.{ Timeout, Duration }
/**
* <i>Java API for `akka.pattern.ask`:</i>
@ -121,4 +121,18 @@ object Patterns {
* [see [[akka.dispatch.Future]] for a description of `flow`]
*/
def pipeTo[T](future: Future[T], actorRef: ActorRef): Future[T] = akka.pattern.pipeTo(future, actorRef)
/**
* Returns a [[akka.dispatch.Future]] that will be completed with success (value `true`) when
* existing messages of the target actor has been processed and the actor has been
* terminated.
*
* Useful when you need to wait for termination or compose ordered termination of several actors.
*
* If the target actor isn't terminated within the timeout the [[akka.dispatch.Future]]
* is completed with failure [[akka.actor.ActorTimeoutException]].
*/
def gracefulStop(target: ActorRef, timeout: Duration, system: ActorSystem): Future[java.lang.Boolean] = {
akka.pattern.gracefulStop(target, timeout)(system).asInstanceOf[Future[java.lang.Boolean]]
}
}

View file

@ -38,9 +38,9 @@ package akka
*/
package object pattern {
import akka.actor.{ ActorRef, InternalActorRef, ActorRefWithProvider }
import akka.actor._
import akka.dispatch.{ Future, Promise }
import akka.util.Timeout
import akka.util.{ Timeout, Duration }
/**
* Import this implicit conversion to gain `?` and `ask` methods on
@ -137,4 +137,40 @@ package object pattern {
future
}
/**
* Returns a [[akka.dispatch.Future]] that will be completed with success (value `true`) when
* existing messages of the target actor has been processed and the actor has been
* terminated.
*
* Useful when you need to wait for termination or compose ordered termination of several actors.
*
* If the target actor isn't terminated within the timeout the [[akka.dispatch.Future]]
* is completed with failure [[akka.actor.ActorTimeoutException]].
*/
def gracefulStop(target: ActorRef, timeout: Duration)(implicit system: ActorSystem): Future[Boolean] = {
if (target.isTerminated) {
Promise.successful(true)
} else {
val result = Promise[Boolean]()
system.actorOf(Props(new Actor {
// Terminated will be received when target has been stopped
context watch target
target ! PoisonPill
// ReceiveTimeout will be received if nothing else is received within the timeout
context setReceiveTimeout timeout
def receive = {
case Terminated(a) if a == target
result success true
context stop self
case ReceiveTimeout
result failure new ActorTimeoutException(
"Failed to stop [%s] within [%s]".format(target.path, context.receiveTimeout))
context stop self
}
}))
result
}
}
}

View file

@ -25,7 +25,7 @@ case class WithListeners(f: (ActorRef) ⇒ Unit) extends ListenerMessage
* Send <code>WithListeners(fun)</code> to traverse the current listeners.
*/
trait Listeners { self: Actor
private val listeners = new ConcurrentSkipListSet[ActorRef]
protected val listeners = new ConcurrentSkipListSet[ActorRef]
protected def listenerManagement: Actor.Receive = {
case Listen(l) listeners add l

View file

@ -1,492 +0,0 @@
/**
* Copyright (C) 2009-2011 Typesafe Inc. <http://www.typesafe.com>
*/
package akka.routing
import akka.dispatch.{ Promise }
import akka.actor._
/**
* Actor pooling
*
* An actor pool is an message router for a set of delegate actors. The pool is an actor itself.
* There are a handful of basic concepts that need to be understood when working with and defining your pool.
*
* Selectors - A selector is a trait that determines how and how many pooled actors will receive an incoming message.
* Capacitors - A capacitor is a trait that influences the size of pool. There are effectively two types.
* The first determines the size itself - either fixed or bounded.
* The second determines how to adjust of the pool according to some internal pressure characteristic.
* Filters - A filter can be used to refine the raw pressure value returned from a capacitor.
*
* It should be pointed out that all actors in the pool are treated as essentially equivalent. This is not to say
* that one couldn't instance different classes within the pool, only that the pool, when selecting and routing,
* will not take any type information into consideration.
*
* @author Garrick Evans
*/
object ActorPool {
case object Stat
case class Stats(size: Int)
}
/**
* Defines the nature of an actor pool.
*/
trait ActorPool {
/**
* Adds a new actor to the pool. The DefaultActorPool implementation will start and link (supervise) this actor.
* This method is invoked whenever the pool determines it must boost capacity.
* @return A new actor for the pool
*/
def instance(defaults: Props): ActorRef
/**
* This method gets called when a delegate is to be evicted, by default it sends a PoisonPill to the delegate
*/
def evict(delegate: ActorRef): Unit = delegate ! PoisonPill
/**
* Returns the overall desired change in pool capacity. This method is used by non-static pools as the means
* for the capacity strategy to influence the pool.
* @param _delegates The current sequence of pooled actors
* @return the number of delegates by which the pool should be adjusted (positive, negative or zero)
*/
def capacity(delegates: Seq[ActorRef]): Int
/**
* Provides the results of the selector, one or more actors, to which an incoming message is forwarded.
* This method returns an iterator since a selector might return more than one actor to handle the message.
* You might want to do this to perform redundant processing of particularly error-prone messages.
* @param _delegates The current sequence of pooled actors
* @return a list of actors to which the message will be delivered
*/
def select(delegates: Seq[ActorRef]): Seq[ActorRef]
}
/**
* A default implementation of a pool that:
* First, invokes the pool's capacitor that tells it, based on the current delegate count
* and its own heuristic by how many delegates the pool should be resized. Resizing can
* can be incremental, decremental or flat. If there is a change to capacity, new delegates
* are added or existing ones are removed. Removed actors are sent the PoisonPill message.
* New actors are automatically started and linked. The pool supervises the actors and will
* use the fault handling strategy specified by the mixed-in ActorPoolSupervisionConfig.
* Pooled actors may be any lifecycle. If you're testing pool sizes during runtime, take a
* look at the unit tests... Any delegate with a <b>Permanent</b> lifecycle will be
* restarted and the pool size will be level with what it was prior to the fault. In just
* about every other case, e.g. the delegates are <b>Temporary</b> or the delegate cannot be
* restarted within the time interval specified in the fault handling strategy, the pool will
* be temporarily shy by that actor (it will have been removed by not back-filled). The
* back-fill if any is required, will occur on the next message [as usual].
*
* Second, invokes the pool's selector that returns a list of delegates that are to receive
* the incoming message. Selectors may return more than one actor. If <i>partialFill</i>
* is true then it might also the case that fewer than number of desired actors will be
* returned. If <i>partialFill</i> is false, the selector may return duplicate actors to
* reach the desired <i>selectionCount</i>.
*
* Lastly, routes by forwarding the incoming message to each delegate in the selected set.
*/
trait DefaultActorPool extends ActorPool { this: Actor
import ActorPool._
protected[akka] var _delegates = Vector[ActorRef]()
val defaultProps: Props = Props.default.withDispatcher(this.context.dispatcher.id)
override def preStart() {
resizeIfAppropriate()
}
override def postStop() {
_delegates foreach evict
_delegates = Vector.empty
}
protected def _route(): Actor.Receive = {
// for testing...
case Stat
sender ! Stats(_delegates length)
case Terminated(victim)
_delegates = _delegates filterNot { victim == }
case msg
resizeIfAppropriate()
select(_delegates) foreach { _ forward msg }
}
protected def resizeIfAppropriate() {
val requestedCapacity = capacity(_delegates)
val newDelegates = requestedCapacity match {
case qty if qty > 0
_delegates ++ Vector.fill(requestedCapacity)(context.watch(instance(defaultProps)))
case qty if qty < 0
_delegates.splitAt(_delegates.length + requestedCapacity) match {
case (keep, abandon)
abandon foreach evict
keep
}
case _ _delegates //No change
}
_delegates = newDelegates
}
}
/**
* Selectors
*
* These traits define how, when a message needs to be routed, delegate(s) are chosen from the pool.
* Note that it's acceptable to return more than one actor to handle a given message.
*/
/**
* Returns the set of delegates with the least amount of message backlog.
*/
trait SmallestMailboxSelector {
/**
* @return the number of delegates that will receive each message
*/
def selectionCount: Int
/**
* If there aren't enough delegates to provide the selectionCount, either
* send the message to fewer, or send the message selectionCount times
* including more than once to some of the delegates. This setting does
* not matter if you configure selectionCount to always be less than or
* equal to the number of delegates in the pool.
* @return true to send to fewer delegates or false to send to duplicate delegates
*/
def partialFill: Boolean
def select(delegates: Seq[ActorRef]): Seq[ActorRef] = {
var set: Seq[ActorRef] = Nil
var take = if (partialFill) math.min(selectionCount, delegates.length) else selectionCount
def mailboxSize(a: ActorRef): Int = a match {
case l: LocalActorRef l.underlying.mailbox.numberOfMessages
case _ Int.MaxValue //Non-local actors mailbox size is unknown, so consider them lowest priority
}
while (take > 0) {
set = delegates.sortWith((a, b) mailboxSize(a) < mailboxSize(b)).take(take) ++ set //Question, doesn't this risk selecting the same actor multiple times?
take -= set.size
}
set
}
}
/**
* Returns the set of delegates that occur sequentially 'after' the last delegate from the previous selection
*/
trait RoundRobinSelector {
private var _last: Int = -1;
/**
* @return the number of delegates that will receive each message
*/
def selectionCount: Int
/**
* If there aren't enough delegates to provide the selectionCount, either
* send the message to fewer, or send the message selectionCount times
* including more than once to some of the delegates. This setting does
* not matter if you configure selectionCount to always be less than or
* equal to the number of delegates in the pool.
* @return true to send to fewer delegates or false to send to duplicate delegates
*/
def partialFill: Boolean
def select(delegates: Seq[ActorRef]): Seq[ActorRef] = {
val length = delegates.length
val take = if (partialFill) math.min(selectionCount, length)
else selectionCount
val set =
for (i 0 until take) yield {
_last = (_last + 1) % length
delegates(_last)
}
set
}
}
/**
* Capacitors
*
* These traits define how to alter the size of the pool according to some desired behavior.
* Capacitors are required (minimally) by the pool to establish bounds on the number of delegates
* that may exist in the pool.
*/
/**
* Ensures a fixed number of delegates in the pool
*/
trait FixedSizeCapacitor {
/**
* @return the fixed number of delegates the pool should have
*/
def limit: Int
def capacity(delegates: Seq[ActorRef]): Int = (limit - delegates.size) max 0
}
/**
* Constrains the number of delegates to a bounded range.
* You probably don't want to use this trait directly,
* instead look at [[akka.routing.CapacityStrategy]] and [[akka.routing.BoundedCapacityStrategy]].
* To use this trait you have to implement _eval() which is provided by
* [[akka.routing.BoundedCapacityStrategy]] in terms of pressure() and filter()
* methods.
*/
trait BoundedCapacitor {
/**
* @return the fewest delegates the pool should ever have
*/
def lowerBound: Int
/**
* @return the most delegates the pool should ever have
*/
def upperBound: Int
def capacity(delegates: Seq[ActorRef]): Int = {
val current = delegates length
val delta = _eval(delegates)
val proposed = current + delta
if (proposed < lowerBound) delta + (lowerBound - proposed)
else if (proposed > upperBound) delta - (proposed - upperBound)
else delta
}
/**
* This method is defined when you mix in [[akka.routing.CapacityStrategy]]; it
* returns the "raw" proposed delta which is then clamped by
* lowerBound and upperBound.
* @return proposed delta ignoring bounds
*/
protected def _eval(delegates: Seq[ActorRef]): Int
}
/**
* Implements pressure() to return the number of delegates with overly-full mailboxes,
* where the pressureThreshold method defines what counts as overly-full.
*/
trait MailboxPressureCapacitor {
/**
* The pressure will be the number of delegates with at least
* pressureThreshold messages in their mailbox.
* @return mailbox size that counts as pressure
*/
def pressureThreshold: Int
def pressure(delegates: Seq[ActorRef]): Int =
delegates count {
case a: LocalActorRef a.underlying.mailbox.numberOfMessages > pressureThreshold
case _ false
}
}
/**
* Implements pressure() to return the number of actors currently processing a
* message.
* In other words, this capacitor counts how many
* delegates are tied up actively processing a message
*/
trait ActiveActorsPressureCapacitor {
def pressure(delegates: Seq[ActorRef]): Int =
delegates count {
case a: LocalActorRef
val cell = a.underlying
cell.mailbox.isScheduled && cell.currentMessage != null
case _ false
}
}
/**
* A [[akka.routing.CapacityStrategy]] implements methods pressure() and filter(), where
* pressure() returns the number of "busy" delegates, and filter() computes
* a proposed delta (positive, negative, or zero) in the size of the delegate
* pool.
*/
trait CapacityStrategy {
import ActorPool._
/**
* This method returns the number of delegates considered busy, or 'pressure level',
* which will be fed into the capacitor and evaluated against the established threshhold.
* For instance, in general, if the current pressure level exceeds the capacity of the
* pool, new delegates will be added.
* @param delegates the current pool of delegates
* @return number of busy delegates, between 0 and delegates.length
*/
def pressure(delegates: Seq[ActorRef]): Int
/**
* This method can be used to smooth the response of the capacitor by considering
* the current pressure and current capacity.
*
* @param pressure current number of busy delegates
* @param capacity current number of delegates
* @return proposed change in the capacity
*/
def filter(pressure: Int, capacity: Int): Int
/**
* Overrides the _eval() method in [[akka.routing.BoundedCapacity]],
* using filter and pressure to compute a proposed delta.
* @param delegates current delegates
* @return proposed delta in capacity
*/
protected def _eval(delegates: Seq[ActorRef]): Int = filter(pressure(delegates), delegates.size)
}
/**
* Use this trait to setup a pool that uses a fixed delegate count.
*/
trait FixedCapacityStrategy extends FixedSizeCapacitor
/**
* Use this trait to setup a pool that may have a variable number of
* delegates but always within an established upper and lower limit.
*
* If mix this into your pool implementation, you must also provide a
* PressureCapacitor and a Filter.
*/
trait BoundedCapacityStrategy extends CapacityStrategy with BoundedCapacitor
/**
* Filters
* These traits compute a proposed capacity delta from the pressure (pressure
* is the number of busy delegates) and the current capacity.
*/
/**
* The basic filter trait that composes ramp-up and and back-off subfiltering.
* filter() is defined to be the sum of rampup() and backoff().
*/
trait Filter {
/**
* Computes a proposed positive (or zero) capacity delta.
* @param pressure the current number of busy delegates
* @param capacity the current number of total delegates
* @return proposed increase in capacity
*/
def rampup(pressure: Int, capacity: Int): Int
/**
* Computes a proposed negative (or zero) capacity delta.
* @param pressure the current number of busy delegates
* @param capacity the current number of total delegates
* @return proposed decrease in capacity (as a negative number)
*/
def backoff(pressure: Int, capacity: Int): Int
// pass through both filters just to be sure any internal counters
// are updated consistently. ramping up is always + and backing off
// is always - and each should return 0 otherwise...
def filter(pressure: Int, capacity: Int): Int =
rampup(pressure, capacity) + backoff(pressure, capacity)
}
/**
* This trait is a convenient shorthand to use the [[akka.routing.BasicRampup]]
* and [[akka.routing.BasicBackoff]] subfilters together.
*/
trait BasicFilter extends Filter with BasicRampup with BasicBackoff
/**
* Filter performs steady incremental growth using only the basic ramp-up subfilter.
* The pool of delegates never gets smaller, only larger.
*/
trait BasicNoBackoffFilter extends BasicRampup {
def filter(pressure: Int, capacity: Int): Int = rampup(pressure, capacity)
}
/**
* Basic incremental growth as a percentage of the current pool capacity.
* Whenever pressure reaches capacity (i.e. all delegates are busy),
* the capacity is increased by a percentage.
*/
trait BasicRampup {
/**
* Percentage to increase capacity whenever all delegates are busy.
* For example, 0.2 would increase 20%, etc.
* @return percentage increase in capacity when delegates are all busy.
*/
def rampupRate: Double
def rampup(pressure: Int, capacity: Int): Int =
if (pressure < capacity) 0 else math.ceil(rampupRate * capacity) toInt
}
/**
* Basic decrement as a percentage of the current pool capacity.
* Whenever pressure as a percentage of capacity falls below the
* backoffThreshold, capacity is reduced by the backoffRate.
*/
trait BasicBackoff {
/**
* Fraction of capacity the pool has to fall below before backing off.
* For example, if this is 0.7, then we'll remove some delegates when
* less than 70% of delegates are busy.
* @return fraction of busy delegates where we start to backoff
*/
def backoffThreshold: Double
/**
* Fraction of delegates to be removed when the pool reaches the
* backoffThreshold.
* @return percentage of delegates to remove
*/
def backoffRate: Double
def backoff(pressure: Int, capacity: Int): Int =
if (capacity > 0 && pressure / capacity < backoffThreshold) math.ceil(-1.0 * backoffRate * capacity) toInt else 0
}
/**
* This filter tracks the average pressure over the lifetime of the pool (or since last reset) and
* will begin to reduce capacity once this value drops below the provided threshold. The number of
* delegates to cull from the pool is determined by some scaling factor (the backoffRate) multiplied
* by the difference in capacity and pressure.
*
* In essence, [[akka.routing.RunningMeanBackoff]] works the same way as [[akka.routing.BasicBackoff]]
* except that it uses
* a running mean pressure and capacity rather than the current pressure and capacity.
*/
trait RunningMeanBackoff {
/**
* Fraction of mean capacity the pool has to fall below before backing off.
* For example, if this is 0.7, then we'll remove some delegates when
* less than 70% of delegates are busy on average.
* @return fraction of busy delegates where we start to backoff
*/
def backoffThreshold: Double
/**
* The fraction of delegates to be removed when the running mean reaches the
* backoffThreshold.
* @return percentage reduction in capacity
*/
def backoffRate: Double
private var _pressure: Double = 0.0
private var _capacity: Double = 0.0
def backoff(pressure: Int, capacity: Int): Int = {
_pressure += pressure
_capacity += capacity
if (capacity > 0 && pressure / capacity < backoffThreshold
&& _capacity > 0 && _pressure / _capacity < backoffThreshold) //Why does the entire clause need to be true?
math.floor(-1.0 * backoffRate * (capacity - pressure)).toInt
else 0
}
/**
* Resets the running mean pressure and capacity.
* This is never invoked by the library, you have to do
* it by hand if there are points in time where it makes
* sense.
*/
def backoffReset {
_pressure = 0.0
_capacity = 0.0
}
}

View file

@ -4,12 +4,16 @@
package akka.routing
import akka.actor._
import java.util.concurrent.atomic.AtomicInteger
import scala.collection.JavaConversions._
import akka.dispatch.{ Future, Promise }
import java.util.concurrent.atomic.AtomicLong
import java.util.concurrent.atomic.AtomicBoolean
import java.util.concurrent.TimeUnit
import akka.util.{ Duration, Timeout }
import akka.util.duration._
import com.typesafe.config.Config
import akka.config.ConfigurationException
import akka.dispatch.Promise
import akka.pattern.AskSupport
import scala.collection.JavaConversions.iterableAsScalaIterable
/**
* A RoutedActorRef is an ActorRef that has a set of connected ActorRef and it uses a Router to
@ -22,11 +26,41 @@ private[akka] class RoutedActorRef(_system: ActorSystemImpl, _props: Props, _sup
_supervisor,
_path) {
private val routeeProps = _props.copy(routerConfig = NoRouter)
private val resizeProgress = new AtomicBoolean
private val resizeCounter = new AtomicLong
@volatile
private[akka] var _routees: Vector[ActorRef] = _ // this MUST be initialized during createRoute
private var _routees: IndexedSeq[ActorRef] = IndexedSeq.empty[ActorRef] // this MUST be initialized during createRoute
def routees = _routees
val route = _props.routerConfig.createRoute(_props.copy(routerConfig = NoRouter), actorContext, this)
/**
* Adds the routees to existing routees.
* Adds death watch of the routees so that they are removed when terminated.
* Not thread safe, but intended to be called from protected points, such as
* `RouterConfig.createRoute` and `Resizer.resize`
*/
private[akka] def addRoutees(newRoutees: IndexedSeq[ActorRef]) {
_routees = _routees ++ newRoutees
// subscribe to Terminated messages for all route destinations, to be handled by Router actor
newRoutees foreach underlying.watch
}
/**
* Adds the routees to existing routees.
* Removes death watch of the routees. Doesn't stop the routees.
* Not thread safe, but intended to be called from protected points, such as
* `Resizer.resize`
*/
private[akka] def removeRoutees(abandonedRoutees: IndexedSeq[ActorRef]) {
_routees = _routees diff abandonedRoutees
abandonedRoutees foreach underlying.unwatch
}
private val routeeProvider = _props.routerConfig.createRouteeProvider(actorContext)
val route = _props.routerConfig.createRoute(routeeProps, routeeProvider)
// initial resize, before message send
resize()
def applyRoute(sender: ActorRef, message: Any): Iterable[Destination] = message match {
case _: AutoReceivedMessage Nil
@ -39,15 +73,16 @@ private[akka] class RoutedActorRef(_system: ActorSystemImpl, _props: Props, _sup
else Nil
}
if (_props.routerConfig.resizer.isEmpty && _routees.isEmpty)
throw new ActorInitializationException("router " + _props.routerConfig + " did not register routees!")
_routees match {
case null throw new ActorInitializationException("router " + _props.routerConfig + " did not register routees!")
case x
_routees = x // volatile write to publish the route before sending messages
// subscribe to Terminated messages for all route destinations, to be handled by Router actor
_routees foreach underlying.watch
case x _routees = x // volatile write to publish the route before sending messages
}
override def !(message: Any)(implicit sender: ActorRef = null): Unit = {
resize()
val s = if (sender eq null) underlying.system.deadLetters else sender
val msg = message match {
@ -60,6 +95,18 @@ private[akka] class RoutedActorRef(_system: ActorSystemImpl, _props: Props, _sup
case refs refs foreach (p p.recipient.!(msg)(p.sender))
}
}
def resize() {
for (r _props.routerConfig.resizer) {
if (r.isTimeForResize(resizeCounter.getAndIncrement()) && resizeProgress.compareAndSet(false, true)) {
try {
r.resize(routeeProps, routeeProvider)
} finally {
resizeProgress.set(false)
}
}
}
}
}
/**
@ -75,40 +122,120 @@ private[akka] class RoutedActorRef(_system: ActorSystemImpl, _props: Props, _sup
* do the locking yourself!
*
* '''Caution:''' Please note that the [[akka.routing.Router]] which needs to
* be returned by `apply()` should not send a message to itself in its
* be returned by `createActor()` should not send a message to itself in its
* constructor or `preStart()` or publish its self reference from there: if
* someone tries sending a message to that reference before the constructor of
* RoutedActorRef has returned, there will be a `NullPointerException`!
*/
trait RouterConfig {
def createRoute(props: Props, actorContext: ActorContext, ref: RoutedActorRef): Route
def createRoute(routeeProps: Props, routeeProvider: RouteeProvider): Route
def createRouteeProvider(context: ActorContext) = new RouteeProvider(context, resizer)
def createActor(): Router = new Router {}
def adaptFromDeploy(deploy: Option[Deploy]): RouterConfig = {
deploy match {
case Some(Deploy(_, _, _, NoRouter, _)) this
case Some(Deploy(_, _, _, r, _)) r
case _ this
case Some(Deploy(_, _, NoRouter, _)) this
case Some(Deploy(_, _, r, _)) r
case _ this
}
}
protected def toAll(sender: ActorRef, routees: Iterable[ActorRef]): Iterable[Destination] = routees.map(Destination(sender, _))
protected def createRoutees(props: Props, context: ActorContext, nrOfInstances: Int, routees: Iterable[String]): Vector[ActorRef] = (nrOfInstances, routees) match {
case (0, Nil) throw new IllegalArgumentException("Insufficient information - missing configuration.")
case (x, Nil) (1 to x).map(_ context.actorOf(props))(scala.collection.breakOut)
case (_, xs) xs.map(context.actorFor(_))(scala.collection.breakOut)
/**
* Routers with dynamically resizable number of routees return the [[akka.routing.Resizer]]
* to use.
*/
def resizer: Option[Resizer] = None
}
/**
* Factory and registry for routees of the router.
* Uses `context.actorOf` to create routees from nrOfInstances property
* and `context.actorFor` lookup routees from paths.
*/
class RouteeProvider(val context: ActorContext, val resizer: Option[Resizer]) {
/**
* Adds the routees to the router.
* Adds death watch of the routees so that they are removed when terminated.
* Not thread safe, but intended to be called from protected points, such as
* `RouterConfig.createRoute` and `Resizer.resize`.
*/
def registerRoutees(routees: IndexedSeq[ActorRef]): Unit = {
routedRef.addRoutees(routees)
}
protected def createAndRegisterRoutees(props: Props, context: ActorContext, nrOfInstances: Int, routees: Iterable[String]): Unit = {
registerRoutees(context, createRoutees(props, context, nrOfInstances, routees))
/**
* Adds the routees to the router.
* Adds death watch of the routees so that they are removed when terminated.
* Not thread safe, but intended to be called from protected points, such as
* `RouterConfig.createRoute` and `Resizer.resize`.
* Java API.
*/
def registerRoutees(routees: java.util.List[ActorRef]): Unit = {
import scala.collection.JavaConverters._
registerRoutees(routees.asScala.toIndexedSeq)
}
protected def registerRoutees(context: ActorContext, routees: Vector[ActorRef]): Unit = {
context.self.asInstanceOf[RoutedActorRef]._routees = routees
/**
* Removes routees from the router. This method doesn't stop the routees.
* Removes death watch of the routees.
* Not thread safe, but intended to be called from protected points, such as
* `Resizer.resize`.
*/
def unregisterRoutees(routees: IndexedSeq[ActorRef]): Unit = {
routedRef.removeRoutees(routees)
}
def createRoutees(props: Props, nrOfInstances: Int, routees: Iterable[String]): IndexedSeq[ActorRef] =
(nrOfInstances, routees) match {
case (x, Nil) if x <= 0
throw new IllegalArgumentException(
"Must specify nrOfInstances or routees for [%s]" format context.self.path.toString)
case (x, Nil) (1 to x).map(_ context.actorOf(props))(scala.collection.breakOut)
case (_, xs) xs.map(context.actorFor(_))(scala.collection.breakOut)
}
def createAndRegisterRoutees(props: Props, nrOfInstances: Int, routees: Iterable[String]): Unit = {
if (resizer.isEmpty) {
registerRoutees(createRoutees(props, nrOfInstances, routees))
}
}
/**
* All routees of the router
*/
def routees: IndexedSeq[ActorRef] = routedRef.routees
private def routedRef = context.self.asInstanceOf[RoutedActorRef]
}
/**
* Java API for a custom router factory.
* @see akka.routing.RouterConfig
*/
abstract class CustomRouterConfig extends RouterConfig {
override def createRoute(props: Props, routeeProvider: RouteeProvider): Route = {
// as a bonus, this prevents closing of props and context in the returned Route PartialFunction
val customRoute = createCustomRoute(props, routeeProvider)
{
case (sender, message) customRoute.destinationsFor(sender, message)
}
}
def createCustomRoute(props: Props, routeeProvider: RouteeProvider): CustomRoute
}
trait CustomRoute {
def destinationsFor(sender: ActorRef, message: Any): java.lang.Iterable[Destination]
}
/**
@ -126,7 +253,7 @@ trait Router extends Actor {
final def receive = ({
case Terminated(child)
ref._routees = ref._routees filterNot (_ == child)
ref.removeRoutees(IndexedSeq(child))
if (ref.routees.isEmpty) context.stop(self)
}: Receive) orElse routerReceive
@ -170,40 +297,52 @@ case class Destination(sender: ActorRef, recipient: ActorRef)
* Oxymoron style.
*/
case object NoRouter extends RouterConfig {
def createRoute(props: Props, actorContext: ActorContext, ref: RoutedActorRef): Route = null
def createRoute(props: Props, routeeProvider: RouteeProvider): Route = null
}
/**
* Router configuration which has no default, i.e. external configuration is required.
*/
case object FromConfig extends RouterConfig {
def createRoute(props: Props, actorContext: ActorContext, ref: RoutedActorRef): Route =
throw new ConfigurationException("router " + ref + " needs external configuration from file (e.g. application.conf)")
def createRoute(props: Props, routeeProvider: RouteeProvider): Route =
throw new ConfigurationException("router " + routeeProvider.context.self + " needs external configuration from file (e.g. application.conf)")
}
/**
* Java API: Router configuration which has no default, i.e. external configuration is required.
*/
case class FromConfig() extends RouterConfig {
def createRoute(props: Props, actorContext: ActorContext, ref: RoutedActorRef): Route =
throw new ConfigurationException("router " + ref + " needs external configuration from file (e.g. application.conf)")
def createRoute(props: Props, routeeProvider: RouteeProvider): Route =
throw new ConfigurationException("router " + routeeProvider.context.self + " needs external configuration from file (e.g. application.conf)")
}
object RoundRobinRouter {
def apply(routees: Iterable[ActorRef]) = new RoundRobinRouter(routees = routees map (_.path.toString))
/**
* Java API to create router with the supplied 'routees' actors.
*/
def create(routees: java.lang.Iterable[ActorRef]): RoundRobinRouter = {
import scala.collection.JavaConverters._
apply(routees.asScala)
}
}
/**
* A Router that uses round-robin to select a connection. For concurrent calls, round robin is just a best effort.
* <br>
* Please note that providing both 'nrOfInstances' and 'routees' does not make logical sense as this means
* that the round robin should both create new actors and use the 'routees' actor(s).
* that the router should both create new actors and use the 'routees' actor(s).
* In this case the 'nrOfInstances' will be ignored and the 'routees' will be used.
* <br>
* <b>The</b> configuration parameter trumps the constructor arguments. This means that
* if you provide either 'nrOfInstances' or 'routees' to during instantiation they will
* be ignored if the 'nrOfInstances' is defined in the configuration file for the actor being used.
* if you provide either 'nrOfInstances' or 'routees' during instantiation they will
* be ignored if the router is defined in the configuration file for the actor being used.
*
* @param routees string representation of the actor paths of the routees that will be looked up
* using `actorFor` in [[akka.actor.ActorRefProvider]]
*/
case class RoundRobinRouter(nrOfInstances: Int = 0, routees: Iterable[String] = Nil) extends RouterConfig with RoundRobinLike {
case class RoundRobinRouter(nrOfInstances: Int = 0, routees: Iterable[String] = Nil, override val resizer: Option[Resizer] = None)
extends RouterConfig with RoundRobinLike {
/**
* Constructor that sets nrOfInstances to be created.
@ -216,10 +355,18 @@ case class RoundRobinRouter(nrOfInstances: Int = 0, routees: Iterable[String] =
/**
* Constructor that sets the routees to be used.
* Java API
* @param routeePaths string representation of the actor paths of the routees that will be looked up
* using `actorFor` in [[akka.actor.ActorRefProvider]]
*/
def this(t: java.util.Collection[String]) = {
this(routees = collectionAsScalaIterable(t))
def this(routeePaths: java.lang.Iterable[String]) = {
this(routees = iterableAsScalaIterable(routeePaths))
}
/**
* Constructor that sets the resizer to be used.
* Java API
*/
def this(resizer: Resizer) = this(resizer = Some(resizer))
}
trait RoundRobinLike { this: RouterConfig
@ -228,19 +375,20 @@ trait RoundRobinLike { this: RouterConfig ⇒
def routees: Iterable[String]
def createRoute(props: Props, context: ActorContext, ref: RoutedActorRef): Route = {
createAndRegisterRoutees(props, context, nrOfInstances, routees)
def createRoute(props: Props, routeeProvider: RouteeProvider): Route = {
routeeProvider.createAndRegisterRoutees(props, nrOfInstances, routees)
val next = new AtomicInteger(0)
val next = new AtomicLong(0)
def getNext(): ActorRef = {
ref.routees(next.getAndIncrement % ref.routees.size)
val _routees = routeeProvider.routees
_routees((next.getAndIncrement % _routees.size).asInstanceOf[Int])
}
{
case (sender, message)
message match {
case Broadcast(msg) toAll(sender, ref.routees)
case Broadcast(msg) toAll(sender, routeeProvider.routees)
case msg List(Destination(sender, getNext()))
}
}
@ -249,19 +397,31 @@ trait RoundRobinLike { this: RouterConfig ⇒
object RandomRouter {
def apply(routees: Iterable[ActorRef]) = new RandomRouter(routees = routees map (_.path.toString))
/**
* Java API to create router with the supplied 'routees' actors.
*/
def create(routees: java.lang.Iterable[ActorRef]): RandomRouter = {
import scala.collection.JavaConverters._
apply(routees.asScala)
}
}
/**
* A Router that randomly selects one of the target connections to send a message to.
* <br>
* Please note that providing both 'nrOfInstances' and 'routees' does not make logical sense as this means
* that the random router should both create new actors and use the 'routees' actor(s).
* that the router should both create new actors and use the 'routees' actor(s).
* In this case the 'nrOfInstances' will be ignored and the 'routees' will be used.
* <br>
* <b>The</b> configuration parameter trumps the constructor arguments. This means that
* if you provide either 'nrOfInstances' or 'routees' to during instantiation they will
* be ignored if the 'nrOfInstances' is defined in the configuration file for the actor being used.
* if you provide either 'nrOfInstances' or 'routees' during instantiation they will
* be ignored if the router is defined in the configuration file for the actor being used.
*
* @param routees string representation of the actor paths of the routees that will be looked up
* using `actorFor` in [[akka.actor.ActorRefProvider]]
*/
case class RandomRouter(nrOfInstances: Int = 0, routees: Iterable[String] = Nil) extends RouterConfig with RandomLike {
case class RandomRouter(nrOfInstances: Int = 0, routees: Iterable[String] = Nil, override val resizer: Option[Resizer] = None)
extends RouterConfig with RandomLike {
/**
* Constructor that sets nrOfInstances to be created.
@ -274,10 +434,18 @@ case class RandomRouter(nrOfInstances: Int = 0, routees: Iterable[String] = Nil)
/**
* Constructor that sets the routees to be used.
* Java API
* @param routeePaths string representation of the actor paths of the routees that will be looked up
* using `actorFor` in [[akka.actor.ActorRefProvider]]
*/
def this(t: java.util.Collection[String]) = {
this(routees = collectionAsScalaIterable(t))
def this(routeePaths: java.lang.Iterable[String]) = {
this(routees = iterableAsScalaIterable(routeePaths))
}
/**
* Constructor that sets the resizer to be used.
* Java API
*/
def this(resizer: Resizer) = this(resizer = Some(resizer))
}
trait RandomLike { this: RouterConfig
@ -292,38 +460,60 @@ trait RandomLike { this: RouterConfig ⇒
override def initialValue = SecureRandom.getInstance("SHA1PRNG")
}
def createRoute(props: Props, context: ActorContext, ref: RoutedActorRef): Route = {
createAndRegisterRoutees(props, context, nrOfInstances, routees)
def createRoute(props: Props, routeeProvider: RouteeProvider): Route = {
routeeProvider.createAndRegisterRoutees(props, nrOfInstances, routees)
def getNext(): ActorRef = {
ref.routees(random.get.nextInt(ref.routees.size))
val _routees = routeeProvider.routees
_routees(random.get.nextInt(_routees.size))
}
{
case (sender, message)
message match {
case Broadcast(msg) toAll(sender, ref.routees)
case Broadcast(msg) toAll(sender, routeeProvider.routees)
case msg List(Destination(sender, getNext()))
}
}
}
}
object BroadcastRouter {
def apply(routees: Iterable[ActorRef]) = new BroadcastRouter(routees = routees map (_.path.toString))
object SmallestMailboxRouter {
def apply(routees: Iterable[ActorRef]) = new SmallestMailboxRouter(routees = routees map (_.path.toString))
/**
* Java API to create router with the supplied 'routees' actors.
*/
def create(routees: java.lang.Iterable[ActorRef]): SmallestMailboxRouter = {
import scala.collection.JavaConverters._
apply(routees.asScala)
}
}
/**
* A Router that uses broadcasts a message to all its connections.
* A Router that tries to send to the non-suspended routee with fewest messages in mailbox.
* The selection is done in this order:
* <ul>
* <li>pick any idle routee (not processing message) with empty mailbox</li>
* <li>pick any routee with empty mailbox</li>
* <li>pick routee with fewest pending messages in mailbox</li>
* <li>pick any remote routee, remote actors are consider lowest priority,
* since their mailbox size is unknown</li>
* </ul>
*
* <br>
* Please note that providing both 'nrOfInstances' and 'routees' does not make logical sense as this means
* that the random router should both create new actors and use the 'routees' actor(s).
* that the router should both create new actors and use the 'routees' actor(s).
* In this case the 'nrOfInstances' will be ignored and the 'routees' will be used.
* <br>
* <b>The</b> configuration parameter trumps the constructor arguments. This means that
* if you provide either 'nrOfInstances' or 'routees' to during instantiation they will
* be ignored if the 'nrOfInstances' is defined in the configuration file for the actor being used.
* if you provide either 'nrOfInstances' or 'routees' during instantiation they will
* be ignored if the router is defined in the configuration file for the actor being used.
*
* @param routees string representation of the actor paths of the routees that will be looked up
* using `actorFor` in [[akka.actor.ActorRefProvider]]
*/
case class BroadcastRouter(nrOfInstances: Int = 0, routees: Iterable[String] = Nil) extends RouterConfig with BroadcastLike {
case class SmallestMailboxRouter(nrOfInstances: Int = 0, routees: Iterable[String] = Nil, override val resizer: Option[Resizer] = None)
extends RouterConfig with SmallestMailboxLike {
/**
* Constructor that sets nrOfInstances to be created.
@ -336,10 +526,163 @@ case class BroadcastRouter(nrOfInstances: Int = 0, routees: Iterable[String] = N
/**
* Constructor that sets the routees to be used.
* Java API
* @param routeePaths string representation of the actor paths of the routees that will be looked up
* using `actorFor` in [[akka.actor.ActorRefProvider]]
*/
def this(t: java.util.Collection[String]) = {
this(routees = collectionAsScalaIterable(t))
def this(routeePaths: java.lang.Iterable[String]) = {
this(routees = iterableAsScalaIterable(routeePaths))
}
/**
* Constructor that sets the resizer to be used.
* Java API
*/
def this(resizer: Resizer) = this(resizer = Some(resizer))
}
trait SmallestMailboxLike { this: RouterConfig
import java.security.SecureRandom
def nrOfInstances: Int
def routees: Iterable[String]
private val random = new ThreadLocal[SecureRandom] {
override def initialValue = SecureRandom.getInstance("SHA1PRNG")
}
/**
* Returns true if the actor is currently processing a message.
* It will always return false for remote actors.
* Method is exposed to subclasses to be able to implement custom
* routers based on mailbox and actor internal state.
*/
protected def isProcessingMessage(a: ActorRef): Boolean = a match {
case x: LocalActorRef
val cell = x.underlying
cell.mailbox.isScheduled && cell.currentMessage != null
case _ false
}
/**
* Returns true if the actor currently has any pending messages
* in the mailbox, i.e. the mailbox is not empty.
* It will always return false for remote actors.
* Method is exposed to subclasses to be able to implement custom
* routers based on mailbox and actor internal state.
*/
protected def hasMessages(a: ActorRef): Boolean = a match {
case x: LocalActorRef x.underlying.mailbox.hasMessages
case _ false
}
/**
* Returns true if the actor is currently suspended.
* It will always return false for remote actors.
* Method is exposed to subclasses to be able to implement custom
* routers based on mailbox and actor internal state.
*/
protected def isSuspended(a: ActorRef): Boolean = a match {
case x: LocalActorRef
val cell = x.underlying
cell.mailbox.isSuspended
case _ false
}
/**
* Returns the number of pending messages in the mailbox of the actor.
* It will always return 0 for remote actors.
* Method is exposed to subclasses to be able to implement custom
* routers based on mailbox and actor internal state.
*/
protected def numberOfMessages(a: ActorRef): Int = a match {
case x: LocalActorRef x.underlying.mailbox.numberOfMessages
case _ 0
}
def createRoute(props: Props, routeeProvider: RouteeProvider): Route = {
routeeProvider.createAndRegisterRoutees(props, nrOfInstances, routees)
def getNext(): ActorRef = {
// non-local actors mailbox size is unknown, so consider them lowest priority
val activeLocal = routeeProvider.routees collect { case l: LocalActorRef if !isSuspended(l) l }
// 1. anyone not processing message and with empty mailbox
activeLocal.find(a !isProcessingMessage(a) && !hasMessages(a)) getOrElse {
// 2. anyone with empty mailbox
activeLocal.find(a !hasMessages(a)) getOrElse {
// 3. sort on mailbox size
activeLocal.sortBy(a numberOfMessages(a)).headOption getOrElse {
// 4. no locals, just pick one, random
val _routees = routeeProvider.routees
_routees(random.get.nextInt(_routees.size))
}
}
}
}
{
case (sender, message)
message match {
case Broadcast(msg) toAll(sender, routeeProvider.routees)
case msg List(Destination(sender, getNext()))
}
}
}
}
object BroadcastRouter {
def apply(routees: Iterable[ActorRef]) = new BroadcastRouter(routees = routees map (_.path.toString))
/**
* Java API to create router with the supplied 'routees' actors.
*/
def create(routees: java.lang.Iterable[ActorRef]): BroadcastRouter = {
import scala.collection.JavaConverters._
apply(routees.asScala)
}
}
/**
* A Router that uses broadcasts a message to all its connections.
* <br>
* Please note that providing both 'nrOfInstances' and 'routees' does not make logical sense as this means
* that the router should both create new actors and use the 'routees' actor(s).
* In this case the 'nrOfInstances' will be ignored and the 'routees' will be used.
* <br>
* <b>The</b> configuration parameter trumps the constructor arguments. This means that
* if you provide either 'nrOfInstances' or 'routees' during instantiation they will
* be ignored if the router is defined in the configuration file for the actor being used.
*
* @param routees string representation of the actor paths of the routees that will be looked up
* using `actorFor` in [[akka.actor.ActorRefProvider]]
*/
case class BroadcastRouter(nrOfInstances: Int = 0, routees: Iterable[String] = Nil, override val resizer: Option[Resizer] = None)
extends RouterConfig with BroadcastLike {
/**
* Constructor that sets nrOfInstances to be created.
* Java API
*/
def this(nr: Int) = {
this(nrOfInstances = nr)
}
/**
* Constructor that sets the routees to be used.
* Java API
* @param routeePaths string representation of the actor paths of the routees that will be looked up
* using `actorFor` in [[akka.actor.ActorRefProvider]]
*/
def this(routeePaths: java.lang.Iterable[String]) = {
this(routees = iterableAsScalaIterable(routeePaths))
}
/**
* Constructor that sets the resizer to be used.
* Java API
*/
def this(resizer: Resizer) = this(resizer = Some(resizer))
}
trait BroadcastLike { this: RouterConfig
@ -348,30 +691,42 @@ trait BroadcastLike { this: RouterConfig ⇒
def routees: Iterable[String]
def createRoute(props: Props, context: ActorContext, ref: RoutedActorRef): Route = {
createAndRegisterRoutees(props, context, nrOfInstances, routees)
def createRoute(props: Props, routeeProvider: RouteeProvider): Route = {
routeeProvider.createAndRegisterRoutees(props, nrOfInstances, routees)
{
case (sender, message) toAll(sender, ref.routees)
case (sender, message) toAll(sender, routeeProvider.routees)
}
}
}
object ScatterGatherFirstCompletedRouter {
def apply(routees: Iterable[ActorRef], within: Duration) = new ScatterGatherFirstCompletedRouter(routees = routees map (_.path.toString), within = within)
/**
* Java API to create router with the supplied 'routees' actors.
*/
def create(routees: java.lang.Iterable[ActorRef], within: Duration): ScatterGatherFirstCompletedRouter = {
import scala.collection.JavaConverters._
apply(routees.asScala, within)
}
}
/**
* Simple router that broadcasts the message to all routees, and replies with the first response.
* <br>
* Please note that providing both 'nrOfInstances' and 'routees' does not make logical sense as this means
* that the random router should both create new actors and use the 'routees' actor(s).
* that the router should both create new actors and use the 'routees' actor(s).
* In this case the 'nrOfInstances' will be ignored and the 'routees' will be used.
* <br>
* <b>The</b> configuration parameter trumps the constructor arguments. This means that
* if you provide either 'nrOfInstances' or 'routees' to during instantiation they will
* be ignored if the 'nrOfInstances' is defined in the configuration file for the actor being used.
* if you provide either 'nrOfInstances' or 'routees' during instantiation they will
* be ignored if the router is defined in the configuration file for the actor being used.
*
* @param routees string representation of the actor paths of the routees that will be looked up
* using `actorFor` in [[akka.actor.ActorRefProvider]]
*/
case class ScatterGatherFirstCompletedRouter(nrOfInstances: Int = 0, routees: Iterable[String] = Nil, within: Duration)
case class ScatterGatherFirstCompletedRouter(nrOfInstances: Int = 0, routees: Iterable[String] = Nil, within: Duration,
override val resizer: Option[Resizer] = None)
extends RouterConfig with ScatterGatherFirstCompletedLike {
/**
@ -385,10 +740,18 @@ case class ScatterGatherFirstCompletedRouter(nrOfInstances: Int = 0, routees: It
/**
* Constructor that sets the routees to be used.
* Java API
* @param routeePaths string representation of the actor paths of the routees that will be looked up
* using `actorFor` in [[akka.actor.ActorRefProvider]]
*/
def this(t: java.util.Collection[String], w: Duration) = {
this(routees = collectionAsScalaIterable(t), within = w)
def this(routeePaths: java.lang.Iterable[String], w: Duration) = {
this(routees = iterableAsScalaIterable(routeePaths), within = w)
}
/**
* Constructor that sets the resizer to be used.
* Java API
*/
def this(resizer: Resizer, w: Duration) = this(resizer = Some(resizer), within = w)
}
trait ScatterGatherFirstCompletedLike { this: RouterConfig
@ -399,15 +762,240 @@ trait ScatterGatherFirstCompletedLike { this: RouterConfig ⇒
def within: Duration
def createRoute(props: Props, context: ActorContext, ref: RoutedActorRef): Route = {
createAndRegisterRoutees(props, context, nrOfInstances, routees)
def createRoute(props: Props, routeeProvider: RouteeProvider): Route = {
routeeProvider.createAndRegisterRoutees(props, nrOfInstances, routees)
{
case (sender, message)
val provider: ActorRefProvider = context.asInstanceOf[ActorCell].systemImpl.provider
val provider: ActorRefProvider = routeeProvider.context.asInstanceOf[ActorCell].systemImpl.provider
val asker = AskSupport.createAsker(provider, within)
asker.result.pipeTo(sender)
toAll(asker, ref.routees)
toAll(asker, routeeProvider.routees)
}
}
}
/**
* Routers with dynamically resizable number of routees is implemented by providing a Resizer
* implementation in [[akka.routing.RouterConfig]].
*/
trait Resizer {
/**
* Is it time for resizing. Typically implemented with modulo of nth message, but
* could be based on elapsed time or something else. The messageCounter starts with 0
* for the initial resize and continues with 1 for the first message. Make sure to perform
* initial resize before first message (messageCounter == 0), because there is no guarantee
* that resize will be done when concurrent messages are in play.
*/
def isTimeForResize(messageCounter: Long): Boolean
/**
* Decide if the capacity of the router need to be changed. Will be invoked when `isTimeForResize`
* returns true and no other resize is in progress.
* Create and register more routees with `routeeProvider.registerRoutees(newRoutees)
* or remove routees with `routeeProvider.unregisterRoutees(abandonedRoutees)` and
* sending [[akka.actor.PoisonPill]] to them.
*/
def resize(props: Props, routeeProvider: RouteeProvider)
}
case object DefaultResizer {
def apply(resizerConfig: Config): DefaultResizer =
DefaultResizer(
lowerBound = resizerConfig.getInt("lower-bound"),
upperBound = resizerConfig.getInt("upper-bound"),
pressureThreshold = resizerConfig.getInt("pressure-threshold"),
rampupRate = resizerConfig.getDouble("rampup-rate"),
backoffThreshold = resizerConfig.getDouble("backoff-threshold"),
backoffRate = resizerConfig.getDouble("backoff-rate"),
stopDelay = Duration(resizerConfig.getMilliseconds("stop-delay"), TimeUnit.MILLISECONDS),
messagesPerResize = resizerConfig.getInt("messages-per-resize"))
}
case class DefaultResizer(
/**
* The fewest number of routees the router should ever have.
*/
lowerBound: Int = 1,
/**
* The most number of routees the router should ever have.
* Must be greater than or equal to `lowerBound`.
*/
upperBound: Int = 10,
/**
* Threshold to evaluate if routee is considered to be busy (under pressure).
* Implementation depends on this value (default is 1).
* <ul>
* <li> 0: number of routees currently processing a message.</li>
* <li> 1: number of routees currently processing a message has
* some messages in mailbox.</li>
* <li> > 1: number of routees with at least the configured `pressureThreshold`
* messages in their mailbox. Note that estimating mailbox size of
* default UnboundedMailbox is O(N) operation.</li>
* </ul>
*/
pressureThreshold: Int = 1,
/**
* Percentage to increase capacity whenever all routees are busy.
* For example, 0.2 would increase 20% (rounded up), i.e. if current
* capacity is 6 it will request an increase of 2 more routees.
*/
rampupRate: Double = 0.2,
/**
* Minimum fraction of busy routees before backing off.
* For example, if this is 0.3, then we'll remove some routees only when
* less than 30% of routees are busy, i.e. if current capacity is 10 and
* 3 are busy then the capacity is unchanged, but if 2 or less are busy
* the capacity is decreased.
*
* Use 0.0 or negative to avoid removal of routees.
*/
backoffThreshold: Double = 0.3,
/**
* Fraction of routees to be removed when the resizer reaches the
* backoffThreshold.
* For example, 0.1 would decrease 10% (rounded up), i.e. if current
* capacity is 9 it will request an decrease of 1 routee.
*/
backoffRate: Double = 0.1,
/**
* When the resizer reduce the capacity the abandoned routee actors are stopped
* with PoisonPill after this delay. The reason for the delay is to give concurrent
* messages a chance to be placed in mailbox before sending PoisonPill.
* Use 0 seconds to skip delay.
*/
stopDelay: Duration = 1.second,
/**
* Number of messages between resize operation.
* Use 1 to resize before each message.
*/
messagesPerResize: Int = 10) extends Resizer {
/**
* Java API constructor for default values except bounds.
*/
def this(lower: Int, upper: Int) = this(lowerBound = lower, upperBound = upper)
if (lowerBound < 0) throw new IllegalArgumentException("lowerBound must be >= 0, was: [%s]".format(lowerBound))
if (upperBound < 0) throw new IllegalArgumentException("upperBound must be >= 0, was: [%s]".format(upperBound))
if (upperBound < lowerBound) throw new IllegalArgumentException("upperBound must be >= lowerBound, was: [%s] < [%s]".format(upperBound, lowerBound))
if (rampupRate < 0.0) throw new IllegalArgumentException("rampupRate must be >= 0.0, was [%s]".format(rampupRate))
if (backoffThreshold > 1.0) throw new IllegalArgumentException("backoffThreshold must be <= 1.0, was [%s]".format(backoffThreshold))
if (backoffRate < 0.0) throw new IllegalArgumentException("backoffRate must be >= 0.0, was [%s]".format(backoffRate))
if (messagesPerResize <= 0) throw new IllegalArgumentException("messagesPerResize must be > 0, was [%s]".format(messagesPerResize))
def isTimeForResize(messageCounter: Long): Boolean = (messageCounter % messagesPerResize == 0)
def resize(props: Props, routeeProvider: RouteeProvider) {
val currentRoutees = routeeProvider.routees
val requestedCapacity = capacity(currentRoutees)
if (requestedCapacity > 0) {
val newRoutees = routeeProvider.createRoutees(props, requestedCapacity, Nil)
routeeProvider.registerRoutees(newRoutees)
} else if (requestedCapacity < 0) {
val (keep, abandon) = currentRoutees.splitAt(currentRoutees.length + requestedCapacity)
routeeProvider.unregisterRoutees(abandon)
delayedStop(routeeProvider.context.system.scheduler, abandon)
}
}
/**
* Give concurrent messages a chance to be placed in mailbox before
* sending PoisonPill.
*/
protected def delayedStop(scheduler: Scheduler, abandon: IndexedSeq[ActorRef]) {
if (abandon.nonEmpty) {
if (stopDelay <= Duration.Zero) {
abandon foreach (_ ! PoisonPill)
} else {
scheduler.scheduleOnce(stopDelay) {
abandon foreach (_ ! PoisonPill)
}
}
}
}
/**
* Returns the overall desired change in resizer capacity. Positive value will
* add routees to the resizer. Negative value will remove routees from the
* resizer.
* @param routees The current actor in the resizer
* @return the number of routees by which the resizer should be adjusted (positive, negative or zero)
*/
def capacity(routees: IndexedSeq[ActorRef]): Int = {
val currentSize = routees.size
val delta = filter(pressure(routees), currentSize)
val proposed = currentSize + delta
if (proposed < lowerBound) delta + (lowerBound - proposed)
else if (proposed > upperBound) delta - (proposed - upperBound)
else delta
}
/**
* Number of routees considered busy, or above 'pressure level'.
*
* Implementation depends on the value of `pressureThreshold`
* (default is 1).
* <ul>
* <li> 0: number of routees currently processing a message.</li>
* <li> 1: number of routees currently processing a message has
* some messages in mailbox.</li>
* <li> > 1: number of routees with at least the configured `pressureThreshold`
* messages in their mailbox. Note that estimating mailbox size of
* default UnboundedMailbox is O(N) operation.</li>
* </ul>
*
* @param routees the current resizer of routees
* @return number of busy routees, between 0 and routees.size
*/
def pressure(routees: IndexedSeq[ActorRef]): Int = {
routees count {
case a: LocalActorRef
val cell = a.underlying
pressureThreshold match {
case 1 cell.mailbox.isScheduled && cell.currentMessage != null
case i if i < 1 cell.mailbox.isScheduled && cell.currentMessage != null
case threshold cell.mailbox.numberOfMessages >= threshold
}
case x
false
}
}
/**
* This method can be used to smooth the capacity delta by considering
* the current pressure and current capacity.
*
* @param pressure current number of busy routees
* @param capacity current number of routees
* @return proposed change in the capacity
*/
def filter(pressure: Int, capacity: Int): Int = {
rampup(pressure, capacity) + backoff(pressure, capacity)
}
/**
* Computes a proposed positive (or zero) capacity delta using
* the configured `rampupRate`.
* @param pressure the current number of busy routees
* @param capacity the current number of total routees
* @return proposed increase in capacity
*/
def rampup(pressure: Int, capacity: Int): Int =
if (pressure < capacity) 0 else math.ceil(rampupRate * capacity) toInt
/**
* Computes a proposed negative (or zero) capacity delta using
* the configured `backoffThreshold` and `backoffRate`
* @param pressure the current number of busy routees
* @param capacity the current number of total routees
* @return proposed decrease in capacity (as a negative number)
*/
def backoff(pressure: Int, capacity: Int): Int =
if (backoffThreshold > 0.0 && backoffRate > 0.0 && capacity > 0 && pressure.toDouble / capacity < backoffThreshold)
math.floor(-1.0 * backoffRate * capacity) toInt
else 0
}

View file

@ -116,25 +116,6 @@ class Serialization(val system: ActorSystemImpl) extends Extension {
def serializerOf(serializerFQN: String): Either[Exception, Serializer] =
ReflectiveAccess.createInstance(serializerFQN, ReflectiveAccess.noParams, ReflectiveAccess.noArgs)
/**
* FIXME implement support for this
*/
private def serializerForBestMatchClass(cl: Class[_]): Either[Exception, Serializer] = {
if (bindings.isEmpty)
Left(NoSerializerFoundException("No mapping serializer found for " + cl))
else {
bindings find {
case (clazzName, _)
ReflectiveAccess.getClassFor(clazzName) match {
case Right(clazz) clazz.isAssignableFrom(cl)
case _ false
}
} map {
case (_, ser) serializerOf(ser)
} getOrElse Left(NoSerializerFoundException("No mapping serializer found for " + cl))
}
}
/**
* A Map of serializer from alias to implementation (class implementing akka.serialization.Serializer)
* By default always contains the following mapping: "default" -> akka.serialization.JavaSerializer

View file

@ -108,7 +108,7 @@ class BoundedBlockingQueue[E <: AnyRef](
throw ie
}
false
// FIXME catching all and continue isn't good for OOME, ticket #1418
// TODO catching all and continue isn't good for OOME, ticket #1418
case e
notFull.signal()
result = e
@ -235,7 +235,7 @@ class BoundedBlockingQueue[E <: AnyRef](
if (backing.removeAll(c)) {
val sz = backing.size()
if (sz < maxCapacity) notFull.signal()
if (sz > 0) notEmpty.signal() //FIXME needed??
if (sz > 0) notEmpty.signal()
true
} else false
} finally {
@ -248,7 +248,7 @@ class BoundedBlockingQueue[E <: AnyRef](
try {
if (backing.retainAll(c)) {
val sz = backing.size()
if (sz < maxCapacity) notFull.signal() //FIXME needed??
if (sz < maxCapacity) notFull.signal()
if (sz > 0) notEmpty.signal()
true
} else false

View file

@ -325,7 +325,7 @@ class FiniteDuration(val length: Long, val unit: TimeUnit) extends Duration {
case Duration(x, NANOSECONDS) x + " nanoseconds"
}
def printHMS = "%02d:%02d:%06.3f".format(toHours, toMinutes % 60, toMillis / 1000. % 60)
def printHMS = "%02d:%02d:%06.3f".format(toHours, toMinutes % 60, toMillis / 1000d % 60)
def compare(other: Duration) =
if (other.finite_?) {

View file

@ -21,7 +21,7 @@ object JMX {
case e: InstanceAlreadyExistsException
Some(mbeanServer.getObjectInstance(name))
case e: Exception
system.eventStream.publish(Error(e, "JMX", "Error when registering mbean [%s]".format(mbean)))
system.eventStream.publish(Error(e, "JMX", this.getClass, "Error when registering mbean [%s]".format(mbean)))
None
}
@ -29,6 +29,6 @@ object JMX {
mbeanServer.unregisterMBean(mbean)
} catch {
case e: InstanceNotFoundException {}
case e: Exception system.eventStream.publish(Error(e, "JMX", "Error while unregistering mbean [%s]".format(mbean)))
case e: Exception system.eventStream.publish(Error(e, "JMX", this.getClass, "Error while unregistering mbean [%s]".format(mbean)))
}
}

View file

@ -1,74 +0,0 @@
/**
* Copyright (C) 2009-2011 Typesafe Inc. <http://www.typesafe.com>
*/
package akka.util
import akka.actor.Actor
import java.util.concurrent.ConcurrentSkipListSet
import akka.actor.{ ActorInitializationException, ActorRef }
/**
* A manager for listener actors. Intended for mixin by observables.
*/
trait ListenerManagement { this: Actor
private val listeners = new ConcurrentSkipListSet[ActorRef]
/**
* Specifies whether listeners should be started when added and stopped when removed or not
*/
protected def manageLifeCycleOfListeners: Boolean = true
/**
* Adds the <code>listener</code> this this registry's listener list.
* The <code>listener</code> is started by this method if manageLifeCycleOfListeners yields true.
*/
def addListener(listener: ActorRef) {
listeners add listener
}
/**
* Removes the <code>listener</code> this this registry's listener list.
* The <code>listener</code> is stopped by this method if manageLifeCycleOfListeners yields true.
*/
def removeListener(listener: ActorRef) {
listeners remove listener
if (manageLifeCycleOfListeners) context.stop(listener)
}
/*
* Returns whether there are any listeners currently
*/
def hasListeners: Boolean = !listeners.isEmpty
/**
* Checks if a specific listener is registered. Pruned eventually when isTerminated==true in notify.
*/
def hasListener(listener: ActorRef): Boolean = listeners.contains(listener)
protected[akka] def notifyListeners(message: Any) {
if (hasListeners) {
val msg = message
val iterator = listeners.iterator
while (iterator.hasNext) {
val listener = iterator.next
if (listener.isTerminated) iterator.remove()
else listener ! msg
}
}
}
/**
* Execute <code>f</code> with each listener as argument.
*/
protected[akka] def foreachListener(f: (ActorRef) Unit) {
val iterator = listeners.iterator
while (iterator.hasNext) {
val listener = iterator.next
if (listener.isTerminated) iterator.remove()
else f(listener)
}
}
}

View file

@ -148,6 +148,11 @@ class Switch(startAsOn: Boolean = false) {
if (switch.get) on else off
}
/**
* Executes the given code while holding this switchs lock, i.e. protected from concurrent modification of the switch status.
*/
def locked[T](code: T) = synchronized { code }
/**
* Returns whether the switch is IMMEDIATELY on (no locking)
*/

View file

@ -100,7 +100,7 @@ class ConsumerScalaTest extends WordSpec with BeforeAndAfterAll with MustMatcher
"receiving an in-out message exchange" must {
"lead to a TimeoutException" in {
service.awaitEndpointActivation(1) {
actorOf(Props(creator = () new TestBlocker("direct:publish-test-5"), timeout = Timeout(1000)))
actorOf(Props(creator = () new TestBlocker("direct:publish-test-5")))
} must be(true)
try {

View file

@ -4,4 +4,3 @@ akka.event-handler-level = "WARNING"
akka.actor.deployment.service-test.router = "round-robin"
akka.actor.deployment.service-test.cluster.preferred-nodes = ["node:node2","node:node3"]
akka.actor.deployment.service-test.nr-of-instances = 2
akka.remote.client.buffering.retry-message-send-on-failure = false

View file

@ -3,4 +3,3 @@ akka.event-handler-level = "WARNING"
akka.actor.deployment.service-test.router = "round-robin"
akka.actor.deployment.service-test.cluster.preferred-nodes = ["node:node2","node:node3"]
akka.actor.deployment.service-test.nr-of-instances = 2
akka.remote.client.buffering.retry-message-send-on-failure = false

View file

@ -3,4 +3,3 @@ akka.event-handler-level = "WARNING"
akka.actor.deployment.service-test.router = "round-robin"
akka.actor.deployment.service-test.cluster.preferred-nodes = ["node:node2","node:node3"]
akka.actor.deployment.service-test.nr-of-instances = 2
akka.remote.client.buffering.retry-message-send-on-failure = false

View file

@ -17,8 +17,7 @@ object RoundRobin1ReplicaMultiJvmSpec {
class HelloWorld extends Actor with Serializable {
def receive = {
case "Hello"
reply("World from node [" + Config.nodename + "]")
case "Hello" reply("World from node [" + Config.nodename + "]")
}
}

View file

@ -1,22 +0,0 @@
Benchmarks
==========
Scalability, Throughput and Latency benchmark
---------------------------------------------
Simple Trading system.
- `Here is the result with some graphs <https://github.com/patriknw/akka-sample-trading/wiki/Results>`_
- `Here is the code <http://github.com/patriknw/akka-sample-trading>`_
Compares:
- Scala library Actors
- Fire-forget
- Request-reply
- Akka
- Request-reply
- Fire-forget with default dispatcher
- Fire-forget with Hawt dispatcher

View file

@ -4,7 +4,6 @@ Additional Information
.. toctree::
:maxdepth: 2
benchmarks
recipes
companies-using-akka
third-party-integrations

View file

@ -22,17 +22,17 @@ You can add it as a plugin by adding the following to your plugins/build.sbt::
resolvers += Classpaths.typesafeResolver
addSbtPlugin("com.typesafe.sbtmultijvm" % "sbt-multi-jvm" % "0.1.7")
addSbtPlugin("com.typesafe.sbtmultijvm" % "sbt-multi-jvm" % "0.1.9")
You can then add multi-JVM testing to a project by including the ``MultiJvm``
settings and config. For example, here is how the akka-cluster project adds
settings and config. For example, here is how the akka-remote project adds
multi-JVM testing::
import MultiJvmPlugin.{ MultiJvm, extraOptions }
lazy val cluster = Project(
id = "akka-cluster",
base = file("akka-cluster"),
id = "akka-remote",
base = file("akka-remote"),
settings = defaultSettings ++ MultiJvmPlugin.settings ++ Seq(
extraOptions in MultiJvm <<= (sourceDirectory in MultiJvm) { src =>
(name: String) => (src ** (name + ".conf")).get.headOption.map("-Dconfig.file=" + _.absolutePath).toSeq
@ -52,26 +52,26 @@ Running tests
The multi-jvm tasks are similar to the normal tasks: ``test``, ``test-only``,
and ``run``, but are under the ``multi-jvm`` configuration.
So in Akka, to run all the multi-JVM tests in the akka-cluster project use (at
So in Akka, to run all the multi-JVM tests in the akka-remote project use (at
the sbt prompt):
.. code-block:: none
akka-cluster/multi-jvm:test
akka-remote/multi-jvm:test
Or one can change to the ``akka-cluster`` project first, and then run the
Or one can change to the ``akka-remote`` project first, and then run the
tests:
.. code-block:: none
project akka-cluster
project akka-remote
multi-jvm:test
To run individual tests use ``test-only``:
.. code-block:: none
multi-jvm:test-only akka.cluster.deployment.Deployment
multi-jvm:test-only akka.remote.RandomRoutedRemoteActor
More than one test name can be listed to run multiple specific
tests. Tab-completion in sbt makes it easy to complete the test names.
@ -81,7 +81,7 @@ options after the test names and ``--``. For example:
.. code-block:: none
multi-jvm:test-only akka.cluster.deployment.Deployment -- -Dsome.option=something
multi-jvm:test-only akka.remote.RandomRoutedRemoteActor -- -Dsome.option=something
Creating application tests
@ -159,21 +159,20 @@ You can define specific JVM options for each of the spawned JVMs. You do that by
a file named after the node in the test with suffix ``.opts`` and put them in the same
directory as the test.
For example, to feed the JVM options ``-Dakka.cluster.nodename=node1`` and
``-Dakka.remote.port=9991`` to the ``SampleMultiJvmNode1`` let's create three ``*.opts`` files
and add the options to them.
For example, to feed the JVM options ``-Dakka.remote.port=9991`` to the ``SampleMultiJvmNode1``
let's create three ``*.opts`` files and add the options to them.
``SampleMultiJvmNode1.opts``::
-Dakka.cluster.nodename=node1 -Dakka.remote.port=9991
-Dakka.remote.port=9991
``SampleMultiJvmNode2.opts``::
-Dakka.cluster.nodename=node2 -Dakka.remote.port=9992
-Dakka.remote.port=9992
``SampleMultiJvmNode3.opts``::
-Dakka.cluster.nodename=node3 -Dakka.remote.port=9993
-Dakka.remote.port=9993
Overriding configuration options
@ -188,15 +187,15 @@ For example, to override the configuration option ``akka.cluster.name`` let's cr
``SampleMultiJvmNode1.conf``::
akka.cluster.name = "test-cluster"
akka.remote.port = 9991
``SampleMultiJvmNode2.conf``::
akka.cluster.name = "test-cluster"
akka.remote.port = 9992
``SampleMultiJvmNode3.conf``::
akka.cluster.name = "test-cluster"
akka.remote.port = 9993
ScalaTest
@ -234,18 +233,23 @@ To run just these tests you would call ``multi-jvm:test-only sample.Spec`` at
the sbt prompt.
ZookeeperBarrier
================
Barriers
========
When running multi-JVM tests it's common to need to coordinate timing across
nodes. To do this there is a ZooKeeper-based double-barrier (there is both an
entry barrier and an exit barrier). ClusterNodes also have support for creating
barriers easily. To wait at the entry use the ``enter`` method. To wait at the
exit use the ``leave`` method. It's also possible t pass a block of code which
nodes. To do this, multi-JVM test framework has the notion of a double-barrier
(there is both an entry barrier and an exit barrier).
To wait at the entry use the ``enter`` method. To wait at the
exit use the ``leave`` method. It's also possible to pass a block of code which
will be run between the barriers.
When creating a barrier you pass it a name and the number of nodes that are
expected to arrive at the barrier. You can also pass a timeout. The default
There are 2 implementations of the barrier: one is used for coordinating JVMs
running on a single machine and is based on local files, another used in a distributed
scenario (see below) and is based on apache ZooKeeper. These two cases
are differentiated with ``test.hosts`` property defined. The choice for a proper barrier
implementation is made in ``AkkaRemoteSpec`` which is a base class for all multi-JVM tests.
When creating a barrier you pass it a name. You can also pass a timeout. The default
timeout is 60 seconds.
Here is an example of coordinating the starting of two nodes and then running
@ -259,75 +263,51 @@ something in coordination::
import akka.cluster._
object SampleMultiJvmSpec {
object SampleMultiJvmSpec extends AbstractRemoteActorMultiJvmSpec {
val NrOfNodes = 2
def commonConfig = ConfigFactory.parseString("""
// Declare your configuration here.
""")
}
class SampleMultiJvmNode1 extends WordSpec with MustMatchers with BeforeAndAfterAll {
class SampleMultiJvmNode1 extends AkkaRemoteSpec(SampleMultiJvmSpec.nodeConfigs(0))
with WordSpec with MustMatchers {
import SampleMultiJvmSpec._
override def beforeAll() = {
Cluster.startLocalCluster()
}
override def afterAll() = {
Cluster.shutdownLocalCluster()
}
"A cluster" must {
"have jvm options" in {
System.getProperty("akka.cluster.nodename", "") must be("node1")
System.getProperty("akka.remote.port", "") must be("9991")
akka.config.Config.config.getString("test.name", "") must be("node1")
}
"be able to start all nodes" in {
LocalCluster.barrier("start", NrOfNodes) {
Cluster.node.start()
}
Cluster.node.isRunning must be(true)
Cluster.node.shutdown()
barrier("start")
println("All nodes are started!")
barrier("end")
}
}
}
class SampleMultiJvmNode2 extends WordSpec with MustMatchers {
class SampleMultiJvmNode2 extends AkkaRemoteSpec(SampleMultiJvmSpec.nodeConfigs(1))
with WordSpec with MustMatchers {
import SampleMultiJvmSpec._
"A cluster" must {
"have jvm options" in {
System.getProperty("akka.cluster.nodename", "") must be("node2")
System.getProperty("akka.remote.port", "") must be("9992")
akka.config.Config.config.getString("test.name", "") must be("node2")
}
"be able to start all nodes" in {
LocalCluster.barrier("start", NrOfNodes) {
Cluster.node.start()
}
Cluster.node.isRunning must be(true)
Cluster.node.shutdown()
barrier("start")
println("All nodes are started!")
barrier("end")
}
}
}
An example output from this would be:
.. code-block:: none
> multi-jvm:test-only sample.Sample
...
[info] Starting JVM-Node1 for example.SampleMultiJvmNode1
[info] Starting JVM-Node2 for example.SampleMultiJvmNode2
[JVM-Node1] Loading config [akka.conf] from the application classpath.
[JVM-Node2] Loading config [akka.conf] from the application classpath.
...
[JVM-Node2] Hello from node 2
[JVM-Node1] Hello from node 1
[success]
NetworkFailureTest
==================
@ -348,3 +328,39 @@ you are on another platform you might need to install it yourself. Here is a
port:
http://info.iet.unipi.it/~luigi/dummynet
Running tests on many machines
==============================
The same tests that are run on a single machine using sbt-multi-jvm can be run on multiple
machines using schoir (read the same as ``esquire``) plugin. The plugin is included just like sbt-multi-jvm::
resolvers += Classpaths.typesafeResolver
addSbtPlugin("com.typesafe.schoir" % "schoir" % "0.1.1")
The interaction with the plugin is through ``schoir:master`` input task. This input task optionally accepts the
path to the file with the following properties::
git.url=git@github.com:jboner/akka.git
external.addresses.for.ssh=host1:port1,...,hostN:portN
internal.host.names=host1,...,hostN
Alternative to specifying the property file, one can set respective settings in the build file::
gitUrl := "git@github.com:jboner/akka.git",
machinesExt := List(InetAddress("host1", port1)),
machinesInt := List("host1")
The reason the first property is called ``git.url`` is that the plugin sets up a temporary remote branch on git
to test against the local working copy. After the tests are finished the changes are regained and the branch
is deleted.
Each test machine starts a node in zookeeper server ensemble that can be used for synchronization. Since
the server is started on a fixed port, it's not currently possible to run more than one test session on the
same machine at the same time.
The machines that are used for testing (slaves) should have ssh access to the outside world and be able to talk
to each other with the internal addresses given. On the master machine ssh client is required. Obviosly git
and sbt should be installed on both master and slave machines.

File diff suppressed because it is too large Load diff

View file

@ -1,105 +0,0 @@
.. _http-module:
HTTP
====
.. sidebar:: Contents
.. contents:: :local:
When deploying in a servlet container:
--------------------------------------------
If you deploy Akka in a JEE container, don't forget to create an Akka initialization and cleanup hook:
.. code-block:: scala
package com.my //<--- your own package
import akka.util.AkkaLoader
import akka.cluster.BootableRemoteActorService
import akka.actor.BootableActorLoaderService
import javax.servlet.{ServletContextListener, ServletContextEvent}
/**
* This class can be added to web.xml mappings as a listener to start and postStop Akka.
*<web-app>
* ...
* <listener>
* <listener-class>com.my.Initializer</listener-class>
* </listener>
* ...
*</web-app>
*/
class Initializer extends ServletContextListener {
lazy val loader = new AkkaLoader
def contextDestroyed(e: ServletContextEvent): Unit = loader.shutdown
def contextInitialized(e: ServletContextEvent): Unit =
loader.boot(true, new BootableActorLoaderService with BootableRemoteActorService) //<--- Important
// loader.boot(true, new BootableActorLoaderService {}) // If you don't need akka-remote
}
For Java users, it's currently only possible to use BootableActorLoaderService, but you'll need to use: akka.actor.DefaultBootableActorLoaderService
Then you just declare it in your web.xml:
.. code-block:: xml
<web-app>
...
<listener>
<listener-class>your.package.Initializer</listener-class>
</listener>
...
</web-app>
Adapting your own Akka Initializer for the Servlet Container
------------------------------------------------------------
If you want to use akka-camel or any other modules that have their own "Bootable"'s you'll need to write your own Initializer, which is _ultra_ simple, see below for an example on how to include Akka-camel.
.. code-block:: scala
package com.my //<--- your own package
import akka.cluster.BootableRemoteActorService
import akka.actor.BootableActorLoaderService
import akka.camel.CamelService
import javax.servlet.{ServletContextListener, ServletContextEvent}
/**
* This class can be added to web.xml mappings as a listener to start and postStop Akka.
*<web-app>
* ...
* <listener>
* <listener-class>com.my.Initializer</listener-class>
* </listener>
* ...
*</web-app>
*/
class Initializer extends ServletContextListener {
lazy val loader = new AkkaLoader
def contextDestroyed(e: ServletContextEvent): Unit = loader.shutdown
def contextInitialized(e: ServletContextEvent): Unit =
loader.boot(true, new BootableActorLoaderService with BootableRemoteActorService with CamelService) //<--- Important
}
Using Akka with the Pinky REST/MVC framework
--------------------------------------------
Pinky has a slick Akka integration. Read more `here <http://wiki.github.com/pk11/pinky/release-13>`_
jetty-run in SBT
----------------
If you want to use jetty-run in SBT you need to exclude the version of Jetty that is bundled in akka-http:
.. code-block:: scala
override def ivyXML =
<dependencies>
<dependency org="com.typesafe.akka" name="akka-http" rev="AKKA_VERSION_GOES_HERE">
<exclude module="jetty"/>
</dependency>
</dependencies>

View file

@ -1,40 +0,0 @@
.. _microkernel:
#############
Microkernel
#############
Run the microkernel
===================
To start the kernel use the scripts in the ``bin`` directory.
All services are configured in the :ref:`configuration` file in the ``config`` directory.
Services you want to be started up automatically should be listed in the list of ``boot`` classes in
the :ref:`configuration`.
Put your application in the ``deploy`` directory.
Akka Home
---------
Note that the microkernel needs to know where the Akka home is (the base
directory of the microkernel). The above scripts do this for you. Otherwise, you
can set Akka home by:
* Specifying the ``AKKA_HOME`` environment variable
* Specifying the ``-Dakka.home`` java option
.. _hello-microkernel:
Hello Microkernel
=================
There is a very simple Akka Mist sample project included in the microkernel
``deploy`` directory. Start the microkernel with the start script and then go to
http://localhost:9998 to say Hello to the microkernel.

View file

@ -97,8 +97,8 @@ Each actor path has an address component, describing the protocol and location
by which the corresponding actor is reachable, followed by the names of the
actors in the hierarchy from the root up. Examples are::
"akka://my-system/app/service-a/worker1" // purely local
"akka://my-system@serv.example.com:5678/app/service-b" // local or remote
"akka://my-system/user/service-a/worker1" // purely local
"akka://my-system@serv.example.com:5678/user/service-b" // local or remote
"cluster://my-cluster/service-c" // clustered (Future Extension)
Here, ``akka`` is the default remote protocol for the 2.0 release, and others

View file

@ -1,3 +1,5 @@
.. _jmm:
Akka and the Java Memory Model
================================

View file

@ -1,4 +1,3 @@
.. _deployment-scenarios:
###################################
@ -28,7 +27,7 @@ Actors as services
^^^^^^^^^^^^^^^^^^
The simplest way you can use Akka is to use the actors as services in your Web
application. All thats needed to do that is to put the Akka charts as well as
application. All thats needed to do that is to put the Akka jars as well as
its dependency jars into ``WEB-INF/lib``. You also need to put the :ref:`configuration`
file in the ``$AKKA_HOME/config`` directory. Now you can create your
Actors as regular services referenced from your Web application. You should also

View file

@ -4,6 +4,7 @@
package akka.docs.actor;
//#imports
import akka.dispatch.*;
import akka.actor.*;
import akka.japi.*;
@ -103,15 +104,14 @@ public class TypedActorDocTestBase {
try {
//#typed-actor-create1
Squarer mySquarer =
TypedActor.get(system).typedActorOf(Squarer.class, SquarerImpl.class, new Props());
TypedActor.get(system).typedActorOf(new TypedProps<SquarerImpl>(Squarer.class, SquarerImpl.class));
//#typed-actor-create1
//#typed-actor-create2
Squarer otherSquarer =
TypedActor.get(system).typedActorOf(Squarer.class,
TypedActor.get(system).typedActorOf(new TypedProps<SquarerImpl>(Squarer.class,
new Creator<SquarerImpl>() {
public SquarerImpl create() { return new SquarerImpl("foo"); }
},
new Props(),
}),
"name");
//#typed-actor-create2

View file

@ -29,6 +29,14 @@ import akka.japi.Procedure;
import akka.actor.Terminated;
//#import-watch
//#import-gracefulStop
import static akka.pattern.Patterns.gracefulStop;
import akka.dispatch.Future;
import akka.dispatch.Await;
import akka.util.Duration;
import akka.actor.ActorTimeoutException;
//#import-gracefulStop
import akka.actor.Props;
import akka.actor.UntypedActor;
import akka.actor.UntypedActorFactory;
@ -59,7 +67,6 @@ public class UntypedActorDocTestBase {
return new MyUntypedActor();
}
});
Props props5 = props4.withTimeout(new Timeout(1000));
//#creating-props-config
}
@ -102,8 +109,7 @@ public class UntypedActorDocTestBase {
public void propsActorOf() {
ActorSystem system = ActorSystem.create("MySystem");
//#creating-props
ActorRef myActor = system.actorOf(new Props(MyUntypedActor.class).withDispatcher("my-dispatcher"),
"myactor");
ActorRef myActor = system.actorOf(new Props(MyUntypedActor.class).withDispatcher("my-dispatcher"), "myactor");
//#creating-props
myActor.tell("test");
system.shutdown();
@ -176,6 +182,23 @@ public class UntypedActorDocTestBase {
system.shutdown();
}
@Test
public void usePatternsGracefulStop() {
ActorSystem system = ActorSystem.create("MySystem");
ActorRef actorRef = system.actorOf(new Props(MyUntypedActor.class));
//#gracefulStop
try {
Future<Boolean> stopped = gracefulStop(actorRef, Duration.create(5, TimeUnit.SECONDS), system);
Await.result(stopped, Duration.create(6, TimeUnit.SECONDS));
// the actor has been stopped
} catch (ActorTimeoutException e) {
// the actor wasn't stopped within 5 seconds
}
//#gracefulStop
system.shutdown();
}
public static class MyActor extends UntypedActor {
public MyActor(String s) {
@ -266,6 +289,7 @@ public class UntypedActorDocTestBase {
}
}
}
//#hot-swap-actor
//#watch

View file

@ -0,0 +1,5 @@
package akka.docs.jrouting;
import org.scalatest.junit.JUnitSuite
class CustomRouterDocTest extends CustomRouterDocTestBase with JUnitSuite

View file

@ -0,0 +1,145 @@
/**
* Copyright (C) 2009-2012 Typesafe Inc. <http://www.typesafe.com>
*/
package akka.docs.jrouting;
import java.util.List;
import java.util.Arrays;
import org.junit.After;
import org.junit.Before;
import org.junit.Test;
import static org.junit.Assert.assertEquals;
import akka.actor.*;
import akka.routing.*;
import akka.util.Duration;
import akka.util.Timeout;
import akka.dispatch.Await;
import akka.dispatch.Future;
import akka.testkit.AkkaSpec;
import com.typesafe.config.ConfigFactory;
import static akka.pattern.Patterns.ask;
import static akka.docs.jrouting.CustomRouterDocTestBase.DemocratActor;
import static akka.docs.jrouting.CustomRouterDocTestBase.RepublicanActor;
import static akka.docs.jrouting.CustomRouterDocTestBase.Message.*;
public class CustomRouterDocTestBase {
ActorSystem system;
@Before
public void setUp() {
system = ActorSystem.create("MySystem", AkkaSpec.testConf());
}
@After
public void tearDown() {
system.shutdown();
}
//#crTest
@Test
public void countVotesAsIntendedNotAsInFlorida() {
ActorRef routedActor = system.actorOf(new Props().withRouter(new VoteCountRouter()));
routedActor.tell(DemocratVote);
routedActor.tell(DemocratVote);
routedActor.tell(RepublicanVote);
routedActor.tell(DemocratVote);
routedActor.tell(RepublicanVote);
Timeout timeout = new Timeout(Duration.parse("1 seconds"));
Future<Object> democratsResult = ask(routedActor, DemocratCountResult, timeout);
Future<Object> republicansResult = ask(routedActor, RepublicanCountResult, timeout);
assertEquals(3, Await.result(democratsResult, timeout.duration()));
assertEquals(2, Await.result(republicansResult, timeout.duration()));
}
//#crTest
//#CustomRouter
//#crMessages
enum Message {
DemocratVote, DemocratCountResult, RepublicanVote, RepublicanCountResult
}
//#crMessages
//#crActors
public static class DemocratActor extends UntypedActor {
int counter = 0;
public void onReceive(Object msg) {
switch ((Message) msg) {
case DemocratVote:
counter++;
break;
case DemocratCountResult:
getSender().tell(counter, getSelf());
break;
default:
unhandled(msg);
}
}
}
public static class RepublicanActor extends UntypedActor {
int counter = 0;
public void onReceive(Object msg) {
switch ((Message) msg) {
case RepublicanVote:
counter++;
break;
case RepublicanCountResult:
getSender().tell(counter, getSelf());
break;
default:
unhandled(msg);
}
}
}
//#crActors
//#crRouter
public static class VoteCountRouter extends CustomRouterConfig {
//#crRoute
@Override
public CustomRoute createCustomRoute(Props props, RouteeProvider routeeProvider) {
final ActorRef democratActor = routeeProvider.context().actorOf(new Props(DemocratActor.class), "d");
final ActorRef republicanActor = routeeProvider.context().actorOf(new Props(RepublicanActor.class), "r");
List<ActorRef> routees = Arrays.asList(new ActorRef[] { democratActor, republicanActor });
//#crRegisterRoutees
routeeProvider.registerRoutees(routees);
//#crRegisterRoutees
//#crRoutingLogic
return new CustomRoute() {
@Override
public Iterable<Destination> destinationsFor(ActorRef sender, Object msg) {
switch ((Message) msg) {
case DemocratVote:
case DemocratCountResult:
return Arrays.asList(new Destination[] { new Destination(sender, democratActor) });
case RepublicanVote:
case RepublicanCountResult:
return Arrays.asList(new Destination[] { new Destination(sender, republicanActor) });
default:
throw new IllegalArgumentException("Unknown message: " + msg);
}
}
};
//#crRoutingLogic
}
//#crRoute
}
//#crRouter
//#CustomRouter
}

View file

@ -0,0 +1,48 @@
/**
* Copyright (C) 2009-2012 Typesafe Inc. <http://www.typesafe.com>
*/
package akka.docs.jrouting;
import java.io.Serializable;
import akka.actor.UntypedActor;
//#fibonacciActor
public class FibonacciActor extends UntypedActor {
public void onReceive(Object msg) {
if (msg instanceof FibonacciNumber) {
FibonacciNumber fibonacciNumber = (FibonacciNumber) msg;
getSender().tell(fibonacci(fibonacciNumber.getNbr()));
} else {
unhandled(msg);
}
}
private int fibonacci(int n) {
return fib(n, 1, 0);
}
private int fib(int n, int b, int a) {
if (n == 0)
return a;
// recursion
return fib(n - 1, a + b, b);
}
public static class FibonacciNumber implements Serializable {
private static final long serialVersionUID = 1L;
private final int nbr;
public FibonacciNumber(int nbr) {
this.nbr = nbr;
}
public int getNbr() {
return nbr;
}
}
}
//#fibonacciActor

View file

@ -0,0 +1,69 @@
/**
* Copyright (C) 2009-2012 Typesafe Inc. <http://www.typesafe.com>
*/
package akka.docs.jrouting;
import akka.routing.ScatterGatherFirstCompletedRouter;
import akka.routing.BroadcastRouter;
import akka.routing.RandomRouter;
import akka.routing.RoundRobinRouter;
import akka.routing.SmallestMailboxRouter;
import akka.actor.UntypedActor;
import akka.actor.ActorRef;
import akka.actor.Props;
import akka.util.Duration;
import akka.util.Timeout;
import akka.dispatch.Future;
import akka.dispatch.Await;
//#parentActor
public class ParentActor extends UntypedActor {
public void onReceive(Object msg) {
if (msg.equals("rrr")) {
//#roundRobinRouter
ActorRef roundRobinRouter = getContext().actorOf(
new Props(PrintlnActor.class).withRouter(new RoundRobinRouter(5)), "router");
for (int i = 1; i <= 10; i++) {
roundRobinRouter.tell(i, getSelf());
}
//#roundRobinRouter
} else if (msg.equals("rr")) {
//#randomRouter
ActorRef randomRouter = getContext().actorOf(new Props(PrintlnActor.class).withRouter(new RandomRouter(5)),
"router");
for (int i = 1; i <= 10; i++) {
randomRouter.tell(i, getSelf());
}
//#randomRouter
} else if (msg.equals("smr")) {
//#smallestMailboxRouter
ActorRef smallestMailboxRouter = getContext().actorOf(
new Props(PrintlnActor.class).withRouter(new SmallestMailboxRouter(5)), "router");
for (int i = 1; i <= 10; i++) {
smallestMailboxRouter.tell(i, getSelf());
}
//#smallestMailboxRouter
} else if (msg.equals("br")) {
//#broadcastRouter
ActorRef broadcastRouter = getContext().actorOf(new Props(PrintlnActor.class).withRouter(new BroadcastRouter(5)),
"router");
broadcastRouter.tell("this is a broadcast message", getSelf());
//#broadcastRouter
} else if (msg.equals("sgfcr")) {
//#scatterGatherFirstCompletedRouter
ActorRef scatterGatherFirstCompletedRouter = getContext().actorOf(
new Props(FibonacciActor.class).withRouter(new ScatterGatherFirstCompletedRouter(5, Duration
.parse("2 seconds"))), "router");
Timeout timeout = getContext().system().settings().ActorTimeout();
Future<Object> futureResult = akka.pattern.Patterns.ask(
scatterGatherFirstCompletedRouter, new FibonacciActor.FibonacciNumber(10), timeout);
int result = (Integer) Await.result(futureResult, timeout.duration());
//#scatterGatherFirstCompletedRouter
System.out.println(String.format("The result of calculating Fibonacci for 10 is %d", result));
} else {
unhandled(msg);
}
}
}
//#parentActor

View file

@ -0,0 +1,15 @@
/**
* Copyright (C) 2009-2012 Typesafe Inc. <http://www.typesafe.com>
*/
package akka.docs.jrouting;
import akka.actor.UntypedActor;
//#printlnActor
public class PrintlnActor extends UntypedActor {
public void onReceive(Object msg) {
System.out.println(String.format("Received message '%s' in actor %s", msg, getSelf().path().name()));
}
}
//#printlnActor

View file

@ -0,0 +1,58 @@
/**
* Copyright (C) 2009-2012 Typesafe Inc. <http://www.typesafe.com>
*/
package akka.docs.jrouting;
import akka.routing.FromConfig;
import akka.actor.ActorRef;
import akka.actor.Props;
import akka.actor.UntypedActor;
import akka.actor.ActorSystem;
import com.typesafe.config.ConfigFactory;
import com.typesafe.config.Config;
public class RouterViaConfigExample {
public static class ExampleActor extends UntypedActor {
public void onReceive(Object msg) {
if (msg instanceof Message) {
Message message = (Message) msg;
System.out.println(String.format("Received %s in router %s", message.getNbr(), getSelf().path().name()));
} else {
unhandled(msg);
}
}
public static class Message {
private final int nbr;
public Message(int nbr) {
this.nbr = nbr;
}
public int getNbr() {
return nbr;
}
}
}
public static void main(String... args) {
Config config = ConfigFactory.parseString("akka.actor.deployment {\n" + " /router {\n"
+ " router = round-robin\n" + " nr-of-instances = 5\n" + " }\n" + "}\n");
ActorSystem system = ActorSystem.create("Example", config);
//#configurableRouting
ActorRef router = system.actorOf(new Props(ExampleActor.class).withRouter(new FromConfig()), "router");
//#configurableRouting
for (int i = 1; i <= 10; i++) {
router.tell(new ExampleActor.Message(i));
}
//#configurableRoutingWithResizer
ActorRef router2 = system.actorOf(new Props(ExampleActor.class).withRouter(new FromConfig()), "router2");
//#configurableRoutingWithResizer
for (int i = 1; i <= 10; i++) {
router2.tell(new ExampleActor.Message(i));
}
}
}

View file

@ -0,0 +1,71 @@
/**
* Copyright (C) 2009-2012 Typesafe Inc. <http://www.typesafe.com>
*/
package akka.docs.jrouting;
import akka.routing.RoundRobinRouter;
import akka.routing.DefaultResizer;
import akka.actor.ActorRef;
import akka.actor.Props;
import akka.actor.UntypedActor;
import akka.actor.ActorSystem;
import java.util.Arrays;
public class RouterViaProgramExample {
public static class ExampleActor extends UntypedActor {
public void onReceive(Object msg) {
if (msg instanceof Message) {
Message message = (Message) msg;
System.out.println(String.format("Received %s in router %s", message.getNbr(), getSelf().path().name()));
} else {
unhandled(msg);
}
}
public static class Message {
private final int nbr;
public Message(int nbr) {
this.nbr = nbr;
}
public int getNbr() {
return nbr;
}
}
}
public static void main(String... args) {
ActorSystem system = ActorSystem.create("RPE");
//#programmaticRoutingNrOfInstances
int nrOfInstances = 5;
ActorRef router1 = system.actorOf(new Props(ExampleActor.class).withRouter(new RoundRobinRouter(nrOfInstances)));
//#programmaticRoutingNrOfInstances
for (int i = 1; i <= 6; i++) {
router1.tell(new ExampleActor.Message(i));
}
//#programmaticRoutingRoutees
ActorRef actor1 = system.actorOf(new Props(ExampleActor.class));
ActorRef actor2 = system.actorOf(new Props(ExampleActor.class));
ActorRef actor3 = system.actorOf(new Props(ExampleActor.class));
Iterable<ActorRef> routees = Arrays.asList(new ActorRef[] { actor1, actor2, actor3 });
ActorRef router2 = system.actorOf(new Props(ExampleActor.class).withRouter(RoundRobinRouter.create(routees)));
//#programmaticRoutingRoutees
for (int i = 1; i <= 6; i++) {
router2.tell(new ExampleActor.Message(i));
}
//#programmaticRoutingWithResizer
int lowerBound = 2;
int upperBound = 15;
DefaultResizer resizer = new DefaultResizer(lowerBound, upperBound);
ActorRef router3 = system.actorOf(new Props(ExampleActor.class).withRouter(new RoundRobinRouter(nrOfInstances)));
//#programmaticRoutingWithResizer
for (int i = 1; i <= 6; i++) {
router3.tell(new ExampleActor.Message(i));
}
}
}

View file

@ -166,7 +166,7 @@ if not specified otherwise.
default-dispatcher {
# If negative (or zero) then an unbounded mailbox is used (default)
# If positive then a bounded mailbox is used and the capacity is set to the number specified
task-queue-size = 1000
mailbox-capacity = 1000
}
}
}

View file

@ -17,8 +17,13 @@ as illustrated in this example:
.. includecode:: code/akka/docs/event/LoggingDocTestBase.java
:include: imports,my-actor
The second parameter to the ``Logging.getLogger`` is the source of this logging channel.
The source object is translated to a String according to the following rules:
The first parameter to ``Logging.getLogger`` could also be any
:class:`LoggingBus`, specifically ``system.eventStream()``; in the demonstrated
case, the actor systems address is included in the ``akkaSource``
representation of the log source (see `Logging Thread and Akka Source in MDC`_)
while in the second case this is not automatically done. The second parameter
to ``Logging.getLogger`` is the source of this logging channel. The source
object is translated to a String according to the following rules:
* if it is an Actor or ActorRef, its path is used
* in case of a String it is used as is
@ -28,6 +33,13 @@ The source object is translated to a String according to the following rules:
The log message may contain argument placeholders ``{}``, which will be substituted if the log level
is enabled.
The Java :class:`Class` of the log source is also included in the generated
:class:`LogEvent`. In case of a simple string this is replaced with a “marker”
class :class:`akka.event.DummyClassForStringSources` in order to allow special
treatment of this case, e.g. in the SLF4J event listener which will then use
the string instead of the class name for looking up the logger instance to
use.
Event Handler
=============
@ -83,8 +95,8 @@ creating the ``LoggingAdapter`` correspond to the name of the SL4FJ logger.
loglevel = "DEBUG"
}
Logging thread in MDC
---------------------
Logging Thread and Akka Source in MDC
-------------------------------------
Since the logging is done asynchronously the thread in which the logging was performed is captured in
Mapped Diagnostic Context (MDC) with attribute name ``sourceThread``.
@ -96,3 +108,22 @@ With Logback the thread name is available with ``%X{sourceThread}`` specifier wi
</layout>
</appender>
.. note::
It will probably be a good idea to use the ``sourceThread`` MDC value also in
non-Akka parts of the application in order to have this property consistently
available in the logs.
Another helpful facility is that Akka captures the actors address when
instantiating a logger within it, meaning that the full instance identification
is available for associating log messages e.g. with members of a router. This
information is available in the MDC with attribute name ``akkaSource``::
<appender name="STDOUT" class="ch.qos.logback.core.ConsoleAppender">
<layout>
<pattern>%date{ISO8601} %-5level %logger{36} %X{akkaSource} - %msg%n</pattern>
</layout>
</appender>
For more details on what this attribute contains—also for non-actors—please see
`How to Log`_.

View file

@ -9,39 +9,42 @@ For an introduction of remoting capabilities of Akka please see :ref:`remoting`.
Preparing your ActorSystem for Remoting
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
The Akka remoting is a separate jar file. Make sure that you have a dependency from your project to this jar::
The Akka remoting is a separate jar file. Make sure that you have the following dependency in your project::
akka-remote.jar
<dependency>
<groupId>com.typesafe.akka</groupId>
<artifactId>akka-remote</artifactId>
<version>2.0-SNAPSHOT</version>
</dependency>
First of all you have to change the actor provider from ``LocalActorRefProvider`` to ``RemoteActorRefProvider``::
To enable remote capabilities in your Akka project you should, at a minimum, add the following changes
to your ``application.conf`` file::
akka {
actor {
provider = "akka.remote.RemoteActorRefProvider"
provider = "akka.remote.RemoteActorRefProvider"
}
}
After that you must also add the following settings::
akka {
remote {
transport = "akka.remote.netty.NettyRemoteSupport"
server {
# The hostname or ip to bind the remoting to,
# InetAddress.getLocalHost.getHostAddress is used if empty
hostname = ""
# The default remote server port clients should connect to.
# Default is 2552 (AKKA)
hostname = "127.0.0.1"
port = 2552
}
}
}
}
These are the bare minimal settings that must exist in order to get started with remoting.
There are, of course, more properties that can be tweaked. We refer to the following
As you can see in the example above there are four things you need to add to get started:
* Change provider from ``akka.actor.LocalActorRefProvider`` to ``akka.remote.RemoteActorRefProvider``
* Add host name - the machine you want to run the actor system on
* Add port number - the port the actor system should listen on
The example above only illustrates the bare minimum of properties you have to add to enable remoting.
There are lots of more properties that are related to remoting in Akka. We refer to the following
reference file for more information:
* `reference.conf of akka-remote <https://github.com/jboner/akka/blob/master/akka-remote/src/main/resources/reference.conf#L39>`_
.. literalinclude:: ../../akka-remote/src/main/resources/reference.conf
:language: none
Looking up Remote Actors
^^^^^^^^^^^^^^^^^^^^^^^^
@ -66,7 +69,7 @@ The "app" in this case refers to the name of the ``ActorSystem``::
actor {
deployment {
/serviceA/retrieval {
remote = “akka://app@10.0.0.1:2552”
remote = "akka://app@10.0.0.1:2552"
}
}
}
@ -103,10 +106,10 @@ This is also done via configuration::
actor {
deployment {
/serviceA/aggregation {
router = “round-robin”
router = "round-robin"
nr-of-instances = 10
routees {
nodes = [“akka://app@10.0.0.2:2552”, “akka://app@10.0.0.3:2552”]
target {
nodes = ["akka://app@10.0.0.2:2552", "akka://app@10.0.0.3:2552"]
}
}
}

View file

@ -4,9 +4,270 @@
Routing (Java)
==============
This part of the documentation is not done.
.. sidebar:: Contents
We continuously strive to add and improve the documentation so you may want to have a
look at the `snapshot repository <http://akka.io/docs/akka/snapshot/>`_.
.. contents:: :local:
Akka-core includes some building blocks to build more complex message flow handlers, they are listed and explained below:
Router
------
A Router is an actor that routes incoming messages to outbound actors.
The router routes the messages sent to it to its underlying actors called 'routees'.
Akka comes with some defined routers out of the box, but as you will see in this chapter it
is really easy to create your own. The routers shipped with Akka are:
* ``akka.routing.RoundRobinRouter``
* ``akka.routing.RandomRouter``
* ``akka.routing.SmallestMailboxRouter``
* ``akka.routing.BroadcastRouter``
* ``akka.routing.ScatterGatherFirstCompletedRouter``
Routers Explained
^^^^^^^^^^^^^^^^^
This is an example of how to create a router that is defined in configuration:
.. includecode:: ../scala/code/akka/docs/routing/RouterViaConfigExample.scala#config
.. includecode:: code/akka/docs/jrouting/RouterViaConfigExample.java#configurableRouting
This is an example of how to programatically create a router and set the number of routees it should create:
.. includecode:: code/akka/docs/jrouting/RouterViaProgramExample.java#programmaticRoutingNrOfInstances
You can also give the router already created routees as in:
.. includecode:: code/akka/docs/jrouting/RouterViaProgramExample.java#programmaticRoutingRoutees
When you create a router programatically you define the number of routees *or* you pass already created routees to it.
If you send both parameters to the router *only* the latter will be used, i.e. ``nrOfInstances`` is disregarded.
*It is also worth pointing out that if you define the ``router`` in the configuration file then this value will be used
instead of any programmatically sent parameters.*
Once you have the router actor it is just to send messages to it as you would to any actor:
.. code-block:: java
router.tell(new MyMsg());
The router will apply its behavior to the message it receives and forward it to the routees.
Router usage
^^^^^^^^^^^^
In this section we will describe how to use the different router types.
First we need to create some actors that will be used in the examples:
.. includecode:: code/akka/docs/jrouting/PrintlnActor.java#printlnActor
and
.. includecode:: code/akka/docs/jrouting/FibonacciActor.java#fibonacciActor
RoundRobinRouter
****************
Routes in a `round-robin <http://en.wikipedia.org/wiki/Round-robin>`_ fashion to its routees.
Code example:
.. includecode:: code/akka/docs/jrouting/ParentActor.java#roundRobinRouter
When run you should see a similar output to this:
.. code-block:: scala
Received message '1' in actor $b
Received message '2' in actor $c
Received message '3' in actor $d
Received message '6' in actor $b
Received message '4' in actor $e
Received message '8' in actor $d
Received message '5' in actor $f
Received message '9' in actor $e
Received message '10' in actor $f
Received message '7' in actor $c
If you look closely to the output you can see that each of the routees received two messages which
is exactly what you would expect from a round-robin router to happen.
(The name of an actor is automatically created in the format ``$letter`` unless you specify it -
hence the names printed above.)
RandomRouter
************
As the name implies this router type selects one of its routees randomly and forwards
the message it receives to this routee.
This procedure will happen each time it receives a message.
Code example:
.. includecode:: code/akka/docs/jrouting/ParentActor.java#randomRouter
When run you should see a similar output to this:
.. code-block:: scala
Received message '1' in actor $e
Received message '2' in actor $c
Received message '4' in actor $b
Received message '5' in actor $d
Received message '3' in actor $e
Received message '6' in actor $c
Received message '7' in actor $d
Received message '8' in actor $e
Received message '9' in actor $d
Received message '10' in actor $d
The result from running the random router should be different, or at least random, every time you run it.
Try to run it a couple of times to verify its behavior if you don't trust us.
SmallestMailboxRouter
*********************
A Router that tries to send to the non-suspended routee with fewest messages in mailbox.
The selection is done in this order:
* pick any idle routee (not processing message) with empty mailbox
* pick any routee with empty mailbox
* pick routee with fewest pending messages in mailbox
* pick any remote routee, remote actors are consider lowest priority,
since their mailbox size is unknown
Code example:
.. includecode:: code/akka/docs/jrouting/ParentActor.java#smallestMailboxRouter
BroadcastRouter
***************
A broadcast router forwards the message it receives to *all* its routees.
Code example:
.. includecode:: code/akka/docs/jrouting/ParentActor.java#broadcastRouter
When run you should see a similar output to this:
.. code-block:: scala
Received message 'this is a broadcast message' in actor $f
Received message 'this is a broadcast message' in actor $d
Received message 'this is a broadcast message' in actor $e
Received message 'this is a broadcast message' in actor $c
Received message 'this is a broadcast message' in actor $b
As you can see here above each of the routees, five in total, received the broadcast message.
ScatterGatherFirstCompletedRouter
*********************************
The ScatterGatherFirstCompletedRouter will send the message on to all its routees as a future.
It then waits for first result it gets back. This result will be sent back to original sender.
Code example:
.. includecode:: code/akka/docs/jrouting/ParentActor.java#scatterGatherFirstCompletedRouter
When run you should see this:
.. code-block:: scala
The result of calculating Fibonacci for 10 is 55
From the output above you can't really see that all the routees performed the calculation, but they did!
The result you see is from the first routee that returned its calculation to the router.
Broadcast Messages
^^^^^^^^^^^^^^^^^^
There is a special type of message that will be sent to all routees regardless of the router.
This message is called ``Broadcast`` and is used in the following manner:
.. code-block:: java
router.tell(new Broadcast("Watch out for Davy Jones' locker"));
Only the actual message is forwarded to the routees, i.e. "Watch out for Davy Jones' locker" in the example above.
It is up to the routee implementation whether to handle the broadcast message or not.
Dynamically Resizable Routers
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
All routers can be used with a fixed number of routees or with a resize strategy to adjust the number
of routees dynamically.
This is an example of how to create a resizable router that is defined in configuration:
.. includecode:: ../scala/code/akka/docs/routing/RouterViaConfigExample.scala#config-resize
.. includecode:: code/akka/docs/jrouting/RouterViaConfigExample.java#configurableRoutingWithResizer
Several more configuration options are availble and described in ``akka.actor.deployment.default.resizer``
section of the reference :ref:`configuration`.
This is an example of how to programatically create a resizable router:
.. includecode:: code/akka/docs/jrouting/RouterViaProgramExample.java#programmaticRoutingWithResizer
*It is also worth pointing out that if you define the ``router`` in the configuration file then this value
will be used instead of any programmatically sent parameters.*
Custom Router
^^^^^^^^^^^^^
You can also create your own router should you not find any of the ones provided by Akka sufficient for your needs.
In order to roll your own router you have to fulfill certain criteria which are explained in this section.
The router created in this example is a simple vote counter. It will route the votes to specific vote counter actors.
In this case we only have two parties the Republicans and the Democrats. We would like a router that forwards all
democrat related messages to the Democrat actor and all republican related messages to the Republican actor.
We begin with defining the class:
.. includecode:: code/akka/docs/jrouting/CustomRouterDocTestBase.java#crRouter
:exclude: crRoute
The next step is to implement the ``createCustomRoute`` method in the class just defined:
.. includecode:: code/akka/docs/jrouting/CustomRouterDocTestBase.java#crRoute
As you can see above we start off by creating the routees and put them in a collection.
Make sure that you don't miss to implement the line below as it is *really* important.
It registers the routees internally and failing to call this method will
cause a ``ActorInitializationException`` to be thrown when the router is used.
Therefore always make sure to do the following in your custom router:
.. includecode:: code/akka/docs/jrouting/CustomRouterDocTestBase.java#crRegisterRoutees
The routing logic is where your magic sauce is applied. In our example it inspects the message types
and forwards to the correct routee based on this:
.. includecode:: code/akka/docs/jrouting/CustomRouterDocTestBase.java#crRoutingLogic
As you can see above what's returned in the ``CustomRoute`` function, which defines the mapping
from incoming sender/message to a ``List`` of ``Destination(sender, routee)``.
The sender is what "parent" the routee should see - changing this could be useful if you for example want
another actor than the original sender to intermediate the result of the routee (if there is a result).
For more information about how to alter the original sender we refer to the source code of
`ScatterGatherFirstCompletedRouter <https://github.com/jboner/akka/blob/master/akka-actor/src/main/scala/akka/routing/Routing.scala#L375>`_
All in all the custom router looks like this:
.. includecode:: code/akka/docs/jrouting/CustomRouterDocTestBase.java#CustomRouter
If you are interested in how to use the VoteCountRouter it looks like this:
.. includecode:: code/akka/docs/jrouting/CustomRouterDocTestBase.java#crTest
Configured Custom Router
************************
It is possible to define configuration properties for custom routers. In the ``router`` property of the deployment
configuration you define the fully qualified class name of the router class. The router class must extend
``akka.routing.CustomRouterConfig`` and and have constructor with ``com.typesafe.config.Config`` parameter.
The deployment section of the configuration is passed to the constructor.
Custom Resizer
**************
A router with dynamically resizable number of routees is implemented by providing a ``akka.routing.Resizer``
in ``resizer`` method of the ``RouterConfig``. See ``akka.routing.DefaultResizer`` for inspiration
of how to write your own resize strategy.
You can also get some ideas of the routing by looking at the corresponding :ref:`routing-scala` documentation.

View file

@ -21,7 +21,7 @@ Usage
Configuration
-------------
For Akka to know which ``Serializer`` to use for what, you need edit your Akka Configuration,
For Akka to know which ``Serializer`` to use for what, you need edit your :ref:`configuration`,
in the "akka.actor.serializers"-section you bind names to implementations of the ``akka.serialization.Serializer``
you wish to use, like this:
@ -90,5 +90,5 @@ which is done by extending ``akka.serialization.JSerializer``, like this:
:include: imports,my-own-serializer
:exclude: ...
Then you only need to fill in the blanks, bind it to a name in your Akka Configuration and then
Then you only need to fill in the blanks, bind it to a name in your :ref:`configuration` and then
list which classes that should be serialized using it.

View file

@ -485,6 +485,16 @@ Use it like this:
.. includecode:: code/akka/docs/actor/UntypedActorDocTestBase.java
:include: import-actors,poison-pill
Graceful Stop
-------------
:meth:`gracefulStop` is useful if you need to wait for termination or compose ordered
termination of several actors:
.. includecode:: code/akka/docs/actor/UntypedActorDocTestBase.java
:include: import-gracefulStop,gracefulStop
.. _UntypedActor.HotSwap:
HotSwap

View file

@ -7,49 +7,8 @@ HTTP
.. contents:: :local:
When deploying in a servlet container:
--------------------------------------------
If you deploy Akka in a JEE container, don't forget to create an Akka initialization and cleanup hook:
.. code-block:: scala
package com.my //<--- your own package
import akka.util.AkkaLoader
import akka.cluster.BootableRemoteActorService
import akka.actor.BootableActorLoaderService
import javax.servlet.{ServletContextListener, ServletContextEvent}
/**
* This class can be added to web.xml mappings as a listener to start and postStop Akka.
*<web-app>
* ...
* <listener>
* <listener-class>com.my.Initializer</listener-class>
* </listener>
* ...
*</web-app>
*/
class Initializer extends ServletContextListener {
lazy val loader = new AkkaLoader
def contextDestroyed(e: ServletContextEvent): Unit = loader.shutdown
def contextInitialized(e: ServletContextEvent): Unit =
loader.boot(true, new BootableActorLoaderService with BootableRemoteActorService) //<--- Important
// loader.boot(true, new BootableActorLoaderService {}) // If you don't need akka-remote
}
For Java users, it's currently only possible to use BootableActorLoaderService, but you'll need to use: akka.actor.DefaultBootableActorLoaderService
Play!
-----
Then you just declare it in your web.xml:
.. code-block:: xml
<web-app>
...
<listener>
<listener-class>your.package.Initializer</listener-class>
</listener>
...
</web-app>
Akka will recommend using `Play! Mini <https://github.com/typesafehub/play2-mini>`_

View file

@ -4,6 +4,10 @@
Migration Guide 1.3.x to 2.0.x
################################
.. sidebar:: Contents
.. contents:: :local:
Actors
======
@ -13,9 +17,177 @@ significant amount of time.
Detailed migration guide will be written.
Migration Kit
=============
Nobody likes a big refactoring that takes several days to complete until
anything is able to run again. Therefore we provide a migration kit that
makes it possible to do the migration changes in smaller steps.
The migration kit only covers the most common usage of Akka. It is not intended
as a final solution. The whole migration kit is deprecated and will be removed in
Akka 2.1.
The migration kit is provided in separate jar files. Add the following dependency::
"com.typesafe.akka" % "akka-actor-migration" % "2.0-SNAPSHOT"
The first step of the migration is to do some trivial replacements.
Search and replace the following (be careful with the non qualified names):
==================================== ====================================
Search Replace with
==================================== ====================================
``akka.actor.Actor`` ``akka.actor.OldActor``
``extends Actor`` ``extends OldActor``
``akka.actor.Scheduler`` ``akka.actor.OldScheduler``
``Scheduler`` ``OldScheduler``
``akka.event.EventHandler`` ``akka.event.OldEventHandler``
``EventHandler`` ``OldEventHandler``
``akka.config.Config`` ``akka.config.OldConfig``
``Config`` ``OldConfig``
==================================== ====================================
For Scala users the migration kit also contains some implicit conversions to be
able to use some old methods. These conversions are useful from tests or other
code used outside actors.
::
import akka.migration._
Thereafter you need to fix compilation errors that are not handled by the migration
kit, such as:
* Definition of supervisors
* Definition of dispatchers
* ActorRegistry
When everything compiles you continue by replacing/removing the ``OldXxx`` classes
one-by-one from the migration kit with appropriate migration.
When using the migration kit there will be one global actor system, which loads
the configuration ``akka.conf`` from the same locations as in Akka 1.x.
This means that while you are using the migration kit you should not create your
own ``ActorSystem``, but instead use the ``akka.actor.GlobalActorSystem``.
In order to voluntarily exit the JVM you must ``shutdown`` the ``GlobalActorSystem``
Last task of the migration would be to create your own ``ActorSystem``.
Unordered Collection of Migration Items
=======================================
Creating and starting actors
----------------------------
Actors are created by passing in a ``Props`` instance into the actorOf factory method in
a ``ActorRefProvider``, which is the ``ActorSystem`` or ``ActorContext``.
Use the system to create top level actors. Use the context to
create actors from other actors. The difference is how the supervisor hierarchy is arranged.
When using the context the current actor will be supervisor of the created child actor.
When using the system it will be a top level actor, that is supervised by the system
(internal guardian actor).
``ActorRef.start()`` has been removed. Actors are now started automatically when created.
Remove all invocations of ``ActorRef.start()``.
v1.3::
val myActor = Actor.actorOf[MyActor]
myActor.start()
v2.0::
// top level actor
val firstActor = system.actorOf(Props[FirstActor], name = "first")
// child actor
class FirstActor extends Actor {
val myActor = context.actorOf(Props[MyActor], name = "myactor")
Documentation:
* :ref:`actors-scala`
* :ref:`untyped-actors-java`
Stopping actors
---------------
``ActorRef.stop()`` has been moved. Use ``ActorSystem`` or ``ActorContext`` to stop actors.
v1.3::
actorRef.stop()
self.stop()
actorRef ! PoisonPill
v2.0::
context.stop(someChild)
context.stop(self)
system.stop(actorRef)
actorRef ! PoisonPill
*Stop all actors*
v1.3::
ActorRegistry.shutdownAll()
v2.0::
system.shutdown()
Documentation:
* :ref:`actors-scala`
* :ref:`untyped-actors-java`
Identifying Actors
------------------
In v1.3 actors have ``uuid`` and ``id`` field. In v2.0 each actor has a unique logical ``path``.
The ``ActorRegistry`` has been replaced by actor paths and lookup with
``actorFor`` in ``ActorRefProvider`` (``ActorSystem`` or ``ActorContext``).
v1.3::
val actor = Actor.registry.actorFor(uuid)
val actors = Actor.registry.actorsFor(id)
v2.0::
val actor = context.actorFor("/user/serviceA/aggregator")
Documentation:
* :ref:`addressing`
* :ref:`actors-scala`
* :ref:`untyped-actors-java`
Reply to messages
-----------------
``self.channel`` has been replaced with unified reply mechanism using ``sender`` (Scala)
or ``getSender()`` (Java). This works for both tell (!) and ask (?).
v1.3::
self.channel ! result
self.channel tryTell result
self.reply(result)
self.tryReply(result)
v2.0::
sender ! result
Documentation:
* :ref:`actors-scala`
* :ref:`untyped-actors-java`
``ActorRef.ask()``
------------------
@ -27,3 +199,186 @@ determines when the actor will stop itself and hence closes the window for a
reply to be received; it is independent of the timeout applied when awaiting
completion of the :class:`Future`, however, the actor will complete the
:class:`Future` with an :class:`AskTimeoutException` when it stops itself.
Documentation:
* :ref:`actors-scala`
* :ref:`untyped-actors-java`
ActorPool
---------
The ActorPool has been replaced by dynamically resizable routers.
Documentation:
* :ref:`routing-scala`
* :ref:`routing-java`
``UntypedActor.getContext()`` (Java API only)
---------------------------------------------
``getContext()`` in the Java API for UntypedActor is renamed to
``getSelf()``.
v1.3::
actorRef.tell("Hello", getContext());
v2.0::
actorRef.tell("Hello", getSelf());
Documentation:
* :ref:`untyped-actors-java`
Logging
-------
EventHandler API has been replaced by LoggingAdapter, which publish log messages
to the event bus. You can still plugin your own actor as event listener with the
``akka.event-handlers`` configuration property.
v1.3::
EventHandler.error(exception, this, message)
EventHandler.warning(this, message)
EventHandler.info(this, message)
EventHandler.debug(this, message)
EventHandler.debug(this, "Processing took %s ms".format(duration))
v2.0::
import akka.event.Logging
val log = Logging(context.system, this)
log.error(exception, message)
log.warning(message)
log.info(message)
log.debug(message)
log.debug("Processing took {} ms", duration)
Documentation:
* :ref:`logging-scala`
* :ref:`logging-java`
* :ref:`event-bus-scala`
* :ref:`event-bus-java`
Supervision
-----------
Akka v2.0 implements parental supervision. Actors can only be created by other actors — where the top-level
actor is provided by the library — and each created actor is supervised by its parent.
In contrast to the special supervision relationship between parent and child, each actor may monitor any
other actor for termination.
v1.3::
self.link(actorRef)
self.unlink(actorRef)
v2.0::
class WatchActor extends Actor {
val actorRef = ...
// Terminated message will be delivered when the actorRef actor
// is stopped
context.watch(actorRef)
val supervisedChild = context.actorOf(Props[ChildActor])
def receive = {
case Terminated(`actorRef`) ⇒ ...
}
}
Note that ``link`` in v1.3 established a supervision relation, which ``watch`` doesn't.
``watch`` is only a way to get notification, ``Terminated`` message, when the monitored
actor has been stopped.
*Refererence to the supervisor*
v1.3::
self.supervisor
v2.0::
context.parent
*Fault handling strategy*
v1.3::
val supervisor = Supervisor(
SupervisorConfig(
AllForOneStrategy(List(classOf[Exception]), 3, 1000),
Supervise(
actorOf[MyActor1],
Permanent) ::
Supervise(
actorOf[MyActor2],
Permanent) ::
Nil))
v2.0::
val strategy = OneForOneStrategy({
case _: ArithmeticException ⇒ Resume
case _: NullPointerException ⇒ Restart
case _: IllegalArgumentException ⇒ Stop
case _: Exception ⇒ Escalate
}: Decider, maxNrOfRetries = Some(10), withinTimeRange = Some(60000))
val supervisor = system.actorOf(Props[Supervisor].withFaultHandler(strategy), "supervisor")
Documentation:
* :ref:`supervision`
* :ref:`fault-tolerance-java`
* :ref:`fault-tolerance-scala`
* :ref:`actors-scala`
* :ref:`untyped-actors-java`
Spawn
-----
``spawn`` has been removed and can be implemented like this, if needed. Be careful to not
access any shared mutable state closed over by the body.
::
def spawn(body: ⇒ Unit) {
system.actorOf(Props(ctx ⇒ { case "go" ⇒ try body finally ctx.stop(ctx.self) })) ! "go"
}
Documentation:
* :ref:`jmm`
HotSwap
-------
In v2.0 ``become`` and ``unbecome`` metods are located in ``ActorContext``, i.e. ``context.become`` and ``context.unbecome``.
The special ``HotSwap`` and ``RevertHotswap`` messages in v1.3 has been removed. Similar can be
implemented with your own message and using ``context.become`` and ``context.unbecome``
in the actor receiving the message.
* :ref:`actors-scala`
* :ref:`untyped-actors-java`
More to be written
------------------
* Futures
* Dispatchers
* STM
* TypedActors
* Routing
* Remoting
* Scheduler
* Configuration
* ...?

View file

@ -538,6 +538,15 @@ stop the actor when the message is processed. ``PoisonPill`` is enqueued as
ordinary messages and will be handled after messages that were already queued
in the mailbox.
Graceful Stop
-------------
:meth:`gracefulStop` is useful if you need to wait for termination or compose ordered
termination of several actors:
.. includecode:: code/akka/docs/actor/ActorDocSpec.scala#gracefulStop
.. _Actor.HotSwap:
Become/Unbecome

View file

@ -7,14 +7,11 @@ package akka.docs.actor
import akka.actor.Actor
import akka.actor.Props
import akka.event.Logging
import akka.dispatch.Future
//#imports1
//#imports2
import akka.dispatch.Future
import akka.actor.ActorSystem
//#imports2
import org.scalatest.{ BeforeAndAfterAll, WordSpec }
import org.scalatest.matchers.MustMatchers
import akka.testkit._
@ -114,7 +111,6 @@ object SwapperApp extends App {
//#swapper
//#receive-orElse
import akka.actor.Actor.Receive
abstract class GenericActor extends Actor {
// to be defined in subclassing actor
@ -169,10 +165,10 @@ class ActorDocSpec extends AkkaSpec(Map("akka.loglevel" -> "INFO")) {
system.eventStream.subscribe(testActor, classOf[Logging.Info])
myActor ! "test"
expectMsgPF(1 second) { case Logging.Info(_, "received test") true }
expectMsgPF(1 second) { case Logging.Info(_, _, "received test") true }
myActor ! "unknown"
expectMsgPF(1 second) { case Logging.Info(_, "received unknown message") true }
expectMsgPF(1 second) { case Logging.Info(_, _, "received unknown message") true }
system.eventStream.unsubscribe(testActor)
system.eventStream.publish(TestEvent.UnMute(filter))
@ -201,11 +197,9 @@ class ActorDocSpec extends AkkaSpec(Map("akka.loglevel" -> "INFO")) {
val props3 = Props(new MyActor)
val props4 = Props(
creator = { () new MyActor },
dispatcher = "my-dispatcher",
timeout = Timeout(100))
dispatcher = "my-dispatcher")
val props5 = props1.withCreator(new MyActor)
val props6 = props5.withDispatcher("my-dispatcher")
val props7 = props6.withTimeout(Timeout(100))
//#creating-props-config
}
@ -321,4 +315,22 @@ class ActorDocSpec extends AkkaSpec(Map("akka.loglevel" -> "INFO")) {
a ! "kill"
expectMsg("finished")
}
"using pattern gracefulStop" in {
val actorRef = system.actorOf(Props[MyActor])
//#gracefulStop
import akka.pattern.gracefulStop
import akka.dispatch.Await
import akka.actor.ActorTimeoutException
try {
val stopped: Future[Boolean] = gracefulStop(actorRef, 5 seconds)(system)
Await.result(stopped, 6 seconds)
// the actor has been stopped
} catch {
case e: ActorTimeoutException // the actor wasn't stopped within 5 seconds
}
//#gracefulStop
}
}

View file

@ -6,7 +6,7 @@ package akka.docs.actor
//#imports
import akka.dispatch.{ Promise, Future, Await }
import akka.util.duration._
import akka.actor.{ ActorContext, TypedActor, Props }
import akka.actor.{ ActorContext, TypedActor, TypedProps }
//#imports
@ -100,14 +100,11 @@ class TypedActorDocSpec extends AkkaSpec(Map("akka.loglevel" -> "INFO")) {
"create a typed actor" in {
//#typed-actor-create1
val mySquarer: Squarer =
TypedActor(system).typedActorOf[Squarer, SquarerImpl]()
TypedActor(system).typedActorOf(TypedProps[SquarerImpl]())
//#typed-actor-create1
//#typed-actor-create2
val otherSquarer: Squarer =
TypedActor(system).typedActorOf(classOf[Squarer],
new SquarerImpl("foo"),
Props(),
"name")
TypedActor(system).typedActorOf(TypedProps(classOf[Squarer], new SquarerImpl("foo")), "name")
//#typed-actor-create2
//#typed-actor-calls
@ -145,7 +142,7 @@ class TypedActorDocSpec extends AkkaSpec(Map("akka.loglevel" -> "INFO")) {
"supercharge" in {
//#typed-actor-supercharge-usage
val awesomeFooBar = TypedActor(system).typedActorOf[Foo with Bar, FooBar]()
val awesomeFooBar: Foo with Bar = TypedActor(system).typedActorOf(TypedProps[FooBar]())
awesomeFooBar.doFoo(10)
val f = awesomeFooBar.doBar("yes")

View file

@ -41,10 +41,8 @@ object DispatcherDocSpec {
type = Dispatcher
core-pool-size-factor = 8.0
max-pool-size-factor = 16.0
# Specifies the bounded capacity of the task queue
task-queue-size = 100
# Specifies which type of task queue will be used, can be "array" or "linked" (default)
task-queue-type = "array"
# Specifies the bounded capacity of the mailbox queue
mailbox-capacity = 100
throughput = 3
}
//#my-bounded-config

View file

@ -38,15 +38,33 @@ object LoggingDocSpec {
class MyEventListener extends Actor {
def receive = {
case InitializeLogger(_) sender ! LoggerInitialized
case Error(cause, logSource, message) // ...
case Warning(logSource, message) // ...
case Info(logSource, message) // ...
case Debug(logSource, message) // ...
case InitializeLogger(_) sender ! LoggerInitialized
case Error(cause, logSource, logClass, message) // ...
case Warning(logSource, logClass, message) // ...
case Info(logSource, logClass, message) // ...
case Debug(logSource, logClass, message) // ...
}
}
//#my-event-listener
//#my-source
import akka.event.LogSource
import akka.actor.ActorSystem
object MyType {
implicit val logSource: LogSource[AnyRef] = new LogSource[AnyRef] {
def genString(o: AnyRef): String = o.getClass.getName
override def getClazz(o: AnyRef): Class[_] = o.getClass
}
}
class MyType(system: ActorSystem) {
import MyType._
import akka.event.Logging
val log = Logging(system, this)
}
//#my-source
}
class LoggingDocSpec extends AkkaSpec {

View file

@ -1,24 +0,0 @@
/**
* Copyright (C) 2009-2011 Typesafe Inc. <http://www.typesafe.com>
*/
package akka.docs.routing
import akka.routing.{ BasicNoBackoffFilter, SmallestMailboxSelector, DefaultActorPool }
import akka.actor.{ ActorRef, Props, Actor }
//#testPool
class TestPool extends Actor with DefaultActorPool with SmallestMailboxSelector with BasicNoBackoffFilter {
def capacity(delegates: Seq[ActorRef]) = 5
protected def receive = _route
def rampupRate = 0.1
def selectionCount = 1
def partialFill = true
def instance(defaults: Props) = context.actorOf(defaults.withCreator(new Actor {
def receive = {
case _ // do something
}
}))
}
//#testPool

View file

@ -1,26 +0,0 @@
/**
* Copyright (C) 2009-2011 Typesafe Inc. <http://www.typesafe.com>
*/
package akka.docs.routing
import akka.actor.ActorRef
//#boundedCapacitor
trait BoundedCapacitor {
def lowerBound: Int
def upperBound: Int
def capacity(delegates: Seq[ActorRef]): Int = {
val current = delegates length
var delta = _eval(delegates)
val proposed = current + delta
if (proposed < lowerBound) delta += (lowerBound - proposed)
else if (proposed > upperBound) delta -= (proposed - upperBound)
delta
}
protected def _eval(delegates: Seq[ActorRef]): Int
}
//#boundedCapacitor

View file

@ -1,19 +0,0 @@
/**
* Copyright (C) 2009-2011 Typesafe Inc. <http://www.typesafe.com>
*/
package akka.docs.routing
import akka.routing.ActorPool
import akka.actor.ActorRef
//#capacityStrategy
trait CapacityStrategy {
import ActorPool._
def pressure(delegates: Seq[ActorRef]): Int
def filter(pressure: Int, capacity: Int): Int
protected def _eval(delegates: Seq[ActorRef]): Int =
filter(pressure(delegates), delegates.size)
}
//#capacityStrategy

View file

@ -9,6 +9,7 @@ import akka.actor.{ Props, Actor }
import akka.util.duration._
import akka.dispatch.Await
import akka.pattern.ask
import akka.routing.SmallestMailboxRouter
case class FibonacciNumber(nbr: Int)
@ -47,7 +48,7 @@ class ParentActor extends Actor {
case "rrr"
//#roundRobinRouter
val roundRobinRouter =
context.actorOf(Props[PrintlnActor].withRouter(RoundRobinRouter()), "router")
context.actorOf(Props[PrintlnActor].withRouter(RoundRobinRouter(5)), "router")
1 to 10 foreach {
i roundRobinRouter ! i
}
@ -55,22 +56,30 @@ class ParentActor extends Actor {
case "rr"
//#randomRouter
val randomRouter =
context.actorOf(Props[PrintlnActor].withRouter(RandomRouter()), "router")
context.actorOf(Props[PrintlnActor].withRouter(RandomRouter(5)), "router")
1 to 10 foreach {
i randomRouter ! i
}
//#randomRouter
case "smr"
//#smallestMailboxRouter
val smallestMailboxRouter =
context.actorOf(Props[PrintlnActor].withRouter(SmallestMailboxRouter(5)), "router")
1 to 10 foreach {
i smallestMailboxRouter ! i
}
//#smallestMailboxRouter
case "br"
//#broadcastRouter
val broadcastRouter =
context.actorOf(Props[PrintlnActor].withRouter(BroadcastRouter()), "router")
context.actorOf(Props[PrintlnActor].withRouter(BroadcastRouter(5)), "router")
broadcastRouter ! "this is a broadcast message"
//#broadcastRouter
case "sgfcr"
//#scatterGatherFirstCompletedRouter
val scatterGatherFirstCompletedRouter = context.actorOf(
Props[FibonacciActor].withRouter(ScatterGatherFirstCompletedRouter(within = 2 seconds)),
"router")
Props[FibonacciActor].withRouter(ScatterGatherFirstCompletedRouter(
nrOfInstances = 5, within = 2 seconds)), "router")
implicit val timeout = context.system.settings.ActorTimeout
val futureResult = scatterGatherFirstCompletedRouter ? FibonacciNumber(10)
val result = Await.result(futureResult, timeout.duration)

Some files were not shown because too many files have changed in this diff Show more