2009-05-25 14:48:43 +02:00
|
|
|
|
/**
|
2012-01-19 18:21:06 +01:00
|
|
|
|
* Copyright (C) 2009-2012 Typesafe Inc. <http://www.typesafe.com>
|
2009-05-25 14:48:43 +02:00
|
|
|
|
*/
|
|
|
|
|
|
|
2010-10-26 12:49:25 +02:00
|
|
|
|
package akka.dispatch
|
2009-05-25 14:48:43 +02:00
|
|
|
|
|
2010-09-07 18:32:50 +02:00
|
|
|
|
import java.util.concurrent._
|
2012-06-29 16:06:26 +02:00
|
|
|
|
import akka.event.Logging.{ Error, LogEventException }
|
2010-10-26 12:49:25 +02:00
|
|
|
|
import akka.actor._
|
2011-11-14 16:03:26 +01:00
|
|
|
|
import akka.event.EventStream
|
2011-11-15 11:34:39 +01:00
|
|
|
|
import com.typesafe.config.Config
|
2011-12-27 16:22:24 +01:00
|
|
|
|
import akka.serialization.SerializationExtension
|
2012-07-22 15:33:18 +02:00
|
|
|
|
import akka.util.{ Unsafe, Index }
|
|
|
|
|
|
import scala.annotation.tailrec
|
2012-06-26 18:19:55 +02:00
|
|
|
|
import scala.concurrent.forkjoin.{ ForkJoinTask, ForkJoinPool }
|
2012-09-21 14:50:06 +02:00
|
|
|
|
import scala.concurrent.duration.Duration
|
2012-07-22 15:33:18 +02:00
|
|
|
|
import scala.concurrent.{ ExecutionContext, Await, Awaitable }
|
|
|
|
|
|
import scala.util.control.NonFatal
|
2012-09-21 14:50:06 +02:00
|
|
|
|
import scala.concurrent.duration.FiniteDuration
|
2010-02-23 19:49:01 +01:00
|
|
|
|
|
2012-06-13 17:57:56 +02:00
|
|
|
|
final case class Envelope private (val message: Any, val sender: ActorRef)
|
|
|
|
|
|
|
|
|
|
|
|
object Envelope {
|
|
|
|
|
|
def apply(message: Any, sender: ActorRef, system: ActorSystem): Envelope = {
|
2011-12-27 16:36:30 +01:00
|
|
|
|
val msg = message.asInstanceOf[AnyRef]
|
|
|
|
|
|
if (msg eq null) throw new InvalidMessageException("Message is null")
|
2011-12-27 17:30:05 +01:00
|
|
|
|
if (system.settings.SerializeAllMessages && !msg.isInstanceOf[NoSerializationVerificationNeeded]) {
|
2011-12-27 16:36:30 +01:00
|
|
|
|
val ser = SerializationExtension(system)
|
2012-09-06 03:17:51 +02:00
|
|
|
|
ser.deserialize(ser.serialize(msg).get, msg.getClass).get
|
2011-12-27 16:36:30 +01:00
|
|
|
|
}
|
2012-06-13 17:57:56 +02:00
|
|
|
|
new Envelope(message, sender)
|
2011-12-27 16:22:24 +01:00
|
|
|
|
}
|
2011-09-20 18:34:21 +02:00
|
|
|
|
}
|
|
|
|
|
|
|
2012-08-21 13:43:57 +02:00
|
|
|
|
/**
|
|
|
|
|
|
* This message is sent directly after the Supervise system message in order
|
|
|
|
|
|
* to form a barrier wrt. the first real message sent by the child, so that e.g.
|
|
|
|
|
|
* Failed() cannot overtake Supervise(). Processing this does nothing.
|
2012-08-21 14:45:13 +02:00
|
|
|
|
*
|
2012-08-21 14:10:37 +02:00
|
|
|
|
* Detailed explanation:
|
2012-08-21 14:45:13 +02:00
|
|
|
|
*
|
|
|
|
|
|
* The race happens because Supervise and Failed may be queued between the
|
2012-08-21 14:10:37 +02:00
|
|
|
|
* parent's check for system messages and dequeue(). Thus, if the parent
|
|
|
|
|
|
* processes the NullMessage first (by way of that tiny race window), it is
|
|
|
|
|
|
* guaranteed to then find the Supervise system message in its mailbox prior
|
|
|
|
|
|
* to turning its attention to the next real message.
|
2012-08-21 13:43:57 +02:00
|
|
|
|
*/
|
|
|
|
|
|
case object NullMessage extends AutoReceivedMessage
|
|
|
|
|
|
|
2012-05-16 17:04:13 +02:00
|
|
|
|
/**
|
|
|
|
|
|
* INTERNAL API
|
|
|
|
|
|
*/
|
|
|
|
|
|
private[akka] object SystemMessage {
|
2011-10-18 18:06:17 +02:00
|
|
|
|
@tailrec
|
|
|
|
|
|
final def size(list: SystemMessage, acc: Int = 0): Int = {
|
|
|
|
|
|
if (list eq null) acc else size(list.next, acc + 1)
|
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
@tailrec
|
|
|
|
|
|
final def reverse(list: SystemMessage, acc: SystemMessage = null): SystemMessage = {
|
|
|
|
|
|
if (list eq null) acc else {
|
|
|
|
|
|
val next = list.next
|
|
|
|
|
|
list.next = acc
|
|
|
|
|
|
reverse(next, list)
|
|
|
|
|
|
}
|
|
|
|
|
|
}
|
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
/**
|
|
|
|
|
|
* System messages are handled specially: they form their own queue within
|
|
|
|
|
|
* each actor’s mailbox. This queue is encoded in the messages themselves to
|
|
|
|
|
|
* avoid extra allocations and overhead. The next pointer is a normal var, and
|
|
|
|
|
|
* it does not need to be volatile because in the enqueuing method its update
|
|
|
|
|
|
* is immediately succeeded by a volatile write and all reads happen after the
|
|
|
|
|
|
* volatile read in the dequeuing thread. Afterwards, the obtained list of
|
|
|
|
|
|
* system messages is handled in a single thread only and not ever passed around,
|
|
|
|
|
|
* hence no further synchronization is needed.
|
|
|
|
|
|
*
|
2012-05-16 17:04:13 +02:00
|
|
|
|
* INTERNAL API
|
|
|
|
|
|
*
|
2011-10-18 18:06:17 +02:00
|
|
|
|
* ➡➡➡ NEVER SEND THE SAME SYSTEM MESSAGE OBJECT TO TWO ACTORS ⬅⬅⬅
|
|
|
|
|
|
*/
|
2012-05-16 17:04:13 +02:00
|
|
|
|
private[akka] sealed trait SystemMessage extends PossiblyHarmful {
|
2011-12-08 14:44:05 +01:00
|
|
|
|
@transient
|
2011-10-18 18:06:17 +02:00
|
|
|
|
var next: SystemMessage = _
|
2009-05-25 14:48:43 +02:00
|
|
|
|
}
|
2012-05-16 17:04:13 +02:00
|
|
|
|
|
|
|
|
|
|
/**
|
|
|
|
|
|
* INTERNAL API
|
|
|
|
|
|
*/
|
2012-08-09 16:43:12 +02:00
|
|
|
|
private[akka] case class Create(uid: Int) extends SystemMessage // send to self from Dispatcher.register
|
2012-05-16 17:04:13 +02:00
|
|
|
|
/**
|
|
|
|
|
|
* INTERNAL API
|
|
|
|
|
|
*/
|
|
|
|
|
|
private[akka] case class Recreate(cause: Throwable) extends SystemMessage // sent to self from ActorCell.restart
|
|
|
|
|
|
/**
|
|
|
|
|
|
* INTERNAL API
|
|
|
|
|
|
*/
|
|
|
|
|
|
private[akka] case class Suspend() extends SystemMessage // sent to self from ActorCell.suspend
|
|
|
|
|
|
/**
|
|
|
|
|
|
* INTERNAL API
|
|
|
|
|
|
*/
|
2012-08-08 14:13:52 +02:00
|
|
|
|
private[akka] case class Resume(causedByFailure: Throwable) extends SystemMessage // sent to self from ActorCell.resume
|
2012-05-16 17:04:13 +02:00
|
|
|
|
/**
|
|
|
|
|
|
* INTERNAL API
|
|
|
|
|
|
*/
|
|
|
|
|
|
private[akka] case class Terminate() extends SystemMessage // sent to self from ActorCell.stop
|
|
|
|
|
|
/**
|
|
|
|
|
|
* INTERNAL API
|
|
|
|
|
|
*/
|
2012-11-23 17:53:33 +01:00
|
|
|
|
private[akka] case class Supervise(child: ActorRef, async: Boolean, uid: Int) extends SystemMessage // sent to supervisor ActorRef from ActorCell.start
|
2012-05-16 17:04:13 +02:00
|
|
|
|
/**
|
|
|
|
|
|
* INTERNAL API
|
|
|
|
|
|
*/
|
|
|
|
|
|
private[akka] case class ChildTerminated(child: ActorRef) extends SystemMessage // sent to supervisor from ActorCell.doTerminate
|
|
|
|
|
|
/**
|
|
|
|
|
|
* INTERNAL API
|
|
|
|
|
|
*/
|
2012-06-02 14:49:28 +02:00
|
|
|
|
private[akka] case class Watch(watchee: ActorRef, watcher: ActorRef) extends SystemMessage // sent to establish a DeathWatch
|
2012-05-16 17:04:13 +02:00
|
|
|
|
/**
|
|
|
|
|
|
* INTERNAL API
|
|
|
|
|
|
*/
|
2012-06-02 14:49:28 +02:00
|
|
|
|
private[akka] case class Unwatch(watchee: ActorRef, watcher: ActorRef) extends SystemMessage // sent to tear down a DeathWatch
|
2012-06-04 12:18:30 +02:00
|
|
|
|
/**
|
|
|
|
|
|
* INTERNAL API
|
|
|
|
|
|
*/
|
|
|
|
|
|
private[akka] case object NoMessage extends SystemMessage // switched into the mailbox to signal termination
|
2009-12-13 12:29:18 +01:00
|
|
|
|
|
2012-08-03 23:33:45 +02:00
|
|
|
|
final case class TaskInvocation(eventStream: EventStream, runnable: Runnable, cleanup: () ⇒ Unit) extends Batchable {
|
|
|
|
|
|
final override def isBatchable: Boolean = runnable match {
|
|
|
|
|
|
case b: Batchable ⇒ b.isBatchable
|
|
|
|
|
|
case _: scala.concurrent.OnCompleteRunnable ⇒ true
|
|
|
|
|
|
case _ ⇒ false
|
|
|
|
|
|
}
|
|
|
|
|
|
|
2012-05-16 17:04:13 +02:00
|
|
|
|
def run(): Unit =
|
|
|
|
|
|
try runnable.run() catch {
|
|
|
|
|
|
case NonFatal(e) ⇒ eventStream.publish(Error(e, "TaskInvocation", this.getClass, e.getMessage))
|
|
|
|
|
|
} finally cleanup()
|
2011-02-25 15:20:58 -07:00
|
|
|
|
}
|
|
|
|
|
|
|
2012-05-16 17:04:13 +02:00
|
|
|
|
/**
|
|
|
|
|
|
* INTERNAL API
|
|
|
|
|
|
*/
|
2012-02-13 18:14:35 +01:00
|
|
|
|
private[akka] trait LoadMetrics { self: Executor ⇒
|
|
|
|
|
|
def atFullThrottle(): Boolean
|
2011-12-30 13:48:31 +01:00
|
|
|
|
}
|
|
|
|
|
|
|
2012-05-16 17:04:13 +02:00
|
|
|
|
/**
|
|
|
|
|
|
* INTERNAL API
|
|
|
|
|
|
*/
|
2012-05-11 15:00:37 +02:00
|
|
|
|
private[akka] object MessageDispatcher {
|
2011-11-16 16:09:58 +01:00
|
|
|
|
val UNSCHEDULED = 0 //WARNING DO NOT CHANGE THE VALUE OF THIS: It relies on the faster init of 0 in AbstractMessageDispatcher
|
2011-05-18 08:37:58 +02:00
|
|
|
|
val SCHEDULED = 1
|
2010-10-25 00:01:31 +02:00
|
|
|
|
val RESCHEDULED = 2
|
2011-10-11 16:05:48 +02:00
|
|
|
|
|
2012-02-28 15:48:02 +01:00
|
|
|
|
// dispatcher debugging helper using println (see below)
|
|
|
|
|
|
// since this is a compile-time constant, scalac will elide code behind if (MessageDispatcher.debug) (RK checked with 2.9.1)
|
2012-05-23 15:17:49 +02:00
|
|
|
|
final val debug = false // Deliberately without type ascription to make it a compile-time constant
|
2012-02-28 15:48:02 +01:00
|
|
|
|
lazy val actors = new Index[MessageDispatcher, ActorRef](16, _ compareTo _)
|
|
|
|
|
|
def printActors: Unit = if (debug) {
|
|
|
|
|
|
for {
|
|
|
|
|
|
d ← actors.keys
|
2012-06-13 15:44:24 +02:00
|
|
|
|
a ← { println(d + " inhabitants: " + d.inhabitants); actors.valueIterator(d) }
|
2012-02-28 15:48:02 +01:00
|
|
|
|
} {
|
2012-05-03 21:14:47 +02:00
|
|
|
|
val status = if (a.isTerminated) " (terminated)" else " (alive)"
|
2012-02-28 15:48:02 +01:00
|
|
|
|
val messages = a match {
|
2012-06-13 17:57:56 +02:00
|
|
|
|
case r: ActorRefWithCell ⇒ " " + r.underlying.numberOfMessages + " messages"
|
|
|
|
|
|
case _ ⇒ " " + a.getClass
|
2012-02-28 15:48:02 +01:00
|
|
|
|
}
|
|
|
|
|
|
val parent = a match {
|
|
|
|
|
|
case i: InternalActorRef ⇒ ", parent: " + i.getParent
|
|
|
|
|
|
case _ ⇒ ""
|
|
|
|
|
|
}
|
|
|
|
|
|
println(" -> " + a + status + messages + parent)
|
|
|
|
|
|
}
|
|
|
|
|
|
}
|
|
|
|
|
|
|
2011-12-30 13:48:31 +01:00
|
|
|
|
implicit def defaultDispatcher(implicit system: ActorSystem): MessageDispatcher = system.dispatcher
|
2010-10-25 00:01:31 +02:00
|
|
|
|
}
|
|
|
|
|
|
|
2012-08-03 23:33:45 +02:00
|
|
|
|
abstract class MessageDispatcher(val prerequisites: DispatcherPrerequisites) extends AbstractMessageDispatcher with BatchingExecutor with ExecutionContext {
|
2011-11-14 16:03:26 +01:00
|
|
|
|
|
2010-10-25 00:38:48 +02:00
|
|
|
|
import MessageDispatcher._
|
2012-05-11 15:00:37 +02:00
|
|
|
|
import AbstractMessageDispatcher.{ inhabitantsOffset, shutdownScheduleOffset }
|
2011-11-17 16:09:18 +01:00
|
|
|
|
import prerequisites._
|
2010-11-22 17:58:21 +01:00
|
|
|
|
|
2012-05-11 15:00:37 +02:00
|
|
|
|
@volatile private[this] var _inhabitantsDoNotCallMeDirectly: Long = _ // DO NOT TOUCH!
|
|
|
|
|
|
@volatile private[this] var _shutdownScheduleDoNotCallMeDirectly: Int = _ // DO NOT TOUCH!
|
|
|
|
|
|
|
|
|
|
|
|
@tailrec private final def addInhabitants(add: Long): Long = {
|
2012-05-11 16:19:46 +02:00
|
|
|
|
val c = inhabitants
|
2012-05-11 15:00:37 +02:00
|
|
|
|
val r = c + add
|
2012-05-11 16:19:46 +02:00
|
|
|
|
if (Unsafe.instance.compareAndSwapLong(this, inhabitantsOffset, c, r)) r else addInhabitants(add)
|
2012-05-11 15:00:37 +02:00
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
final def inhabitants: Long = Unsafe.instance.getLongVolatile(this, inhabitantsOffset)
|
|
|
|
|
|
|
|
|
|
|
|
private final def shutdownSchedule: Int = Unsafe.instance.getIntVolatile(this, shutdownScheduleOffset)
|
|
|
|
|
|
private final def updateShutdownSchedule(expect: Int, update: Int): Boolean = Unsafe.instance.compareAndSwapInt(this, shutdownScheduleOffset, expect, update)
|
|
|
|
|
|
|
2010-11-22 17:58:21 +01:00
|
|
|
|
/**
|
|
|
|
|
|
* Creates and returns a mailbox for the given actor.
|
|
|
|
|
|
*/
|
2012-06-13 17:57:56 +02:00
|
|
|
|
protected[akka] def createMailbox(actor: Cell): Mailbox //FIXME should this really be private[akka]?
|
2011-09-21 15:01:47 +02:00
|
|
|
|
|
2011-12-20 21:08:27 +01:00
|
|
|
|
/**
|
2011-12-27 16:22:24 +01:00
|
|
|
|
* Identifier of this dispatcher, corresponds to the full key
|
2011-12-21 19:02:06 +01:00
|
|
|
|
* of the dispatcher configuration.
|
2011-12-20 21:08:27 +01:00
|
|
|
|
*/
|
2011-12-21 19:02:06 +01:00
|
|
|
|
def id: String
|
2011-12-20 21:08:27 +01:00
|
|
|
|
|
2010-10-24 16:01:00 +02:00
|
|
|
|
/**
|
2012-02-13 15:33:31 +01:00
|
|
|
|
* Attaches the specified actor instance to this dispatcher, which includes
|
|
|
|
|
|
* scheduling it to run for the first time (Create() is expected to have
|
|
|
|
|
|
* been enqueued by the ActorCell upon mailbox creation).
|
2010-10-24 16:01:00 +02:00
|
|
|
|
*/
|
2012-02-13 15:33:31 +01:00
|
|
|
|
final def attach(actor: ActorCell): Unit = {
|
|
|
|
|
|
register(actor)
|
|
|
|
|
|
registerForExecution(actor.mailbox, false, true)
|
|
|
|
|
|
}
|
2010-10-24 15:22:28 +02:00
|
|
|
|
|
2010-10-24 16:01:00 +02:00
|
|
|
|
/**
|
2011-09-15 08:12:07 +02:00
|
|
|
|
* Detaches the specified actor instance from this dispatcher
|
2010-10-24 16:01:00 +02:00
|
|
|
|
*/
|
2012-06-29 16:06:26 +02:00
|
|
|
|
final def detach(actor: ActorCell): Unit = try unregister(actor) finally ifSensibleToDoSoThenScheduleShutdown()
|
2011-09-26 17:52:52 +02:00
|
|
|
|
|
2012-08-03 23:33:45 +02:00
|
|
|
|
final override protected def unbatchedExecute(r: Runnable): Unit = {
|
|
|
|
|
|
val invocation = TaskInvocation(eventStream, r, taskCleanup)
|
2012-05-11 15:00:37 +02:00
|
|
|
|
addInhabitants(+1)
|
2011-04-28 16:01:11 +02:00
|
|
|
|
try {
|
2011-11-16 15:54:14 +01:00
|
|
|
|
executeTask(invocation)
|
2011-04-28 16:01:11 +02:00
|
|
|
|
} catch {
|
2012-06-28 15:33:49 +02:00
|
|
|
|
case t: Throwable ⇒
|
2012-05-11 15:00:37 +02:00
|
|
|
|
addInhabitants(-1)
|
2012-01-31 14:44:14 +01:00
|
|
|
|
throw t
|
2011-02-28 11:48:51 -07:00
|
|
|
|
}
|
2011-04-27 20:45:39 -06:00
|
|
|
|
}
|
|
|
|
|
|
|
2012-06-29 16:06:26 +02:00
|
|
|
|
override def reportFailure(t: Throwable): Unit = t match {
|
2012-02-03 10:37:31 +01:00
|
|
|
|
case e: LogEventException ⇒ prerequisites.eventStream.publish(e.event)
|
|
|
|
|
|
case _ ⇒ prerequisites.eventStream.publish(Error(t, getClass.getName, getClass, t.getMessage))
|
|
|
|
|
|
}
|
|
|
|
|
|
|
2011-11-16 15:54:14 +01:00
|
|
|
|
@tailrec
|
2012-05-11 15:00:37 +02:00
|
|
|
|
private final def ifSensibleToDoSoThenScheduleShutdown(): Unit = inhabitants match {
|
2011-11-16 15:54:14 +01:00
|
|
|
|
case 0 ⇒
|
2012-05-11 15:00:37 +02:00
|
|
|
|
shutdownSchedule match {
|
2011-11-16 15:54:14 +01:00
|
|
|
|
case UNSCHEDULED ⇒
|
2012-06-04 11:46:59 +02:00
|
|
|
|
if (updateShutdownSchedule(UNSCHEDULED, SCHEDULED)) scheduleShutdownAction()
|
|
|
|
|
|
else ifSensibleToDoSoThenScheduleShutdown()
|
2011-11-16 15:54:14 +01:00
|
|
|
|
case SCHEDULED ⇒
|
2012-05-11 15:00:37 +02:00
|
|
|
|
if (updateShutdownSchedule(SCHEDULED, RESCHEDULED)) ()
|
2011-11-16 15:54:14 +01:00
|
|
|
|
else ifSensibleToDoSoThenScheduleShutdown()
|
2012-06-04 11:46:59 +02:00
|
|
|
|
case RESCHEDULED ⇒
|
2011-11-16 15:54:14 +01:00
|
|
|
|
}
|
2012-06-04 11:46:59 +02:00
|
|
|
|
case _ ⇒
|
2011-11-16 15:54:14 +01:00
|
|
|
|
}
|
|
|
|
|
|
|
2011-12-09 12:16:13 +01:00
|
|
|
|
private def scheduleShutdownAction(): Unit = {
|
|
|
|
|
|
// IllegalStateException is thrown if scheduler has been shutdown
|
2012-08-08 15:57:30 +02:00
|
|
|
|
try scheduler.scheduleOnce(shutdownTimeout, shutdownAction)(new ExecutionContext {
|
|
|
|
|
|
override def execute(runnable: Runnable): Unit = runnable.run()
|
|
|
|
|
|
override def reportFailure(t: Throwable): Unit = MessageDispatcher.this.reportFailure(t)
|
|
|
|
|
|
}) catch {
|
2011-12-09 12:16:13 +01:00
|
|
|
|
case _: IllegalStateException ⇒ shutdown()
|
|
|
|
|
|
}
|
|
|
|
|
|
}
|
|
|
|
|
|
|
2012-05-11 15:00:37 +02:00
|
|
|
|
private final val taskCleanup: () ⇒ Unit = () ⇒ if (addInhabitants(-1) == 0) ifSensibleToDoSoThenScheduleShutdown()
|
2011-02-25 15:20:58 -07:00
|
|
|
|
|
2011-07-15 09:39:04 +02:00
|
|
|
|
/**
|
2012-02-13 15:33:31 +01:00
|
|
|
|
* If you override it, you must call it. But only ever once. See "attach" for only invocation.
|
2012-05-16 17:04:13 +02:00
|
|
|
|
*
|
|
|
|
|
|
* INTERNAL API
|
2011-07-15 09:39:04 +02:00
|
|
|
|
*/
|
2011-09-27 17:41:02 +02:00
|
|
|
|
protected[akka] def register(actor: ActorCell) {
|
2012-02-28 15:48:02 +01:00
|
|
|
|
if (debug) actors.put(this, actor.self)
|
2012-05-11 15:00:37 +02:00
|
|
|
|
addInhabitants(+1)
|
2010-09-01 16:33:56 +02:00
|
|
|
|
}
|
2010-10-29 16:33:31 +02:00
|
|
|
|
|
2011-07-15 09:39:04 +02:00
|
|
|
|
/**
|
2011-11-16 16:19:56 +01:00
|
|
|
|
* If you override it, you must call it. But only ever once. See "detach" for the only invocation
|
2012-05-16 17:04:13 +02:00
|
|
|
|
*
|
|
|
|
|
|
* INTERNAL API
|
2011-07-15 09:39:04 +02:00
|
|
|
|
*/
|
2011-09-27 17:41:02 +02:00
|
|
|
|
protected[akka] def unregister(actor: ActorCell) {
|
2012-02-28 15:48:02 +01:00
|
|
|
|
if (debug) actors.remove(this, actor.self)
|
2012-05-11 15:00:37 +02:00
|
|
|
|
addInhabitants(-1)
|
2012-06-04 11:46:59 +02:00
|
|
|
|
val mailBox = actor.swapMailbox(deadLetterMailbox)
|
2012-10-16 12:06:03 +02:00
|
|
|
|
mailBox.becomeClosed()
|
2011-11-15 14:39:43 +01:00
|
|
|
|
mailBox.cleanUp()
|
2010-10-24 01:18:59 +02:00
|
|
|
|
}
|
|
|
|
|
|
|
2010-10-25 00:01:31 +02:00
|
|
|
|
private val shutdownAction = new Runnable {
|
2011-11-16 15:54:14 +01:00
|
|
|
|
@tailrec
|
|
|
|
|
|
final def run() {
|
2012-05-11 15:00:37 +02:00
|
|
|
|
shutdownSchedule match {
|
2011-11-16 15:54:14 +01:00
|
|
|
|
case SCHEDULED ⇒
|
|
|
|
|
|
try {
|
2012-05-11 15:00:37 +02:00
|
|
|
|
if (inhabitants == 0) shutdown() //Warning, racy
|
2011-11-16 15:54:14 +01:00
|
|
|
|
} finally {
|
2012-05-11 15:00:37 +02:00
|
|
|
|
while (!updateShutdownSchedule(shutdownSchedule, UNSCHEDULED)) {}
|
2011-11-16 15:54:14 +01:00
|
|
|
|
}
|
|
|
|
|
|
case RESCHEDULED ⇒
|
2012-05-11 15:00:37 +02:00
|
|
|
|
if (updateShutdownSchedule(RESCHEDULED, SCHEDULED)) scheduleShutdownAction()
|
2011-11-16 15:54:14 +01:00
|
|
|
|
else run()
|
2012-06-04 11:46:59 +02:00
|
|
|
|
case UNSCHEDULED ⇒
|
2011-11-16 15:54:14 +01:00
|
|
|
|
}
|
2010-10-25 00:01:31 +02:00
|
|
|
|
}
|
|
|
|
|
|
}
|
|
|
|
|
|
|
2010-11-12 14:04:06 +01:00
|
|
|
|
/**
|
2011-11-21 10:48:21 +01:00
|
|
|
|
* When the dispatcher no longer has any actors registered, how long will it wait until it shuts itself down,
|
2011-12-20 13:40:29 +01:00
|
|
|
|
* defaulting to your akka configs "akka.actor.default-dispatcher.shutdown-timeout" or default specified in
|
2011-12-06 16:49:39 +01:00
|
|
|
|
* reference.conf
|
2012-05-16 17:04:13 +02:00
|
|
|
|
*
|
|
|
|
|
|
* INTERNAL API
|
2010-11-12 14:04:06 +01:00
|
|
|
|
*/
|
2012-09-18 09:58:30 +02:00
|
|
|
|
protected[akka] def shutdownTimeout: FiniteDuration
|
2010-10-25 00:01:31 +02:00
|
|
|
|
|
2010-10-24 16:01:00 +02:00
|
|
|
|
/**
|
|
|
|
|
|
* After the call to this method, the dispatcher mustn't begin any new message processing for the specified reference
|
|
|
|
|
|
*/
|
2011-10-18 15:09:35 +02:00
|
|
|
|
def suspend(actor: ActorCell): Unit = {
|
|
|
|
|
|
val mbox = actor.mailbox
|
2012-04-03 00:37:09 +02:00
|
|
|
|
if ((mbox.actor eq actor) && (mbox.dispatcher eq this))
|
2012-07-13 12:25:26 +02:00
|
|
|
|
mbox.suspend()
|
2011-10-18 15:09:35 +02:00
|
|
|
|
}
|
2010-10-24 16:01:00 +02:00
|
|
|
|
|
|
|
|
|
|
/*
|
|
|
|
|
|
* After the call to this method, the dispatcher must begin any new message processing for the specified reference
|
|
|
|
|
|
*/
|
2011-10-18 15:09:35 +02:00
|
|
|
|
def resume(actor: ActorCell): Unit = {
|
2011-09-23 09:33:53 +02:00
|
|
|
|
val mbox = actor.mailbox
|
2012-07-13 12:25:26 +02:00
|
|
|
|
if ((mbox.actor eq actor) && (mbox.dispatcher eq this) && mbox.resume())
|
2011-10-18 15:09:35 +02:00
|
|
|
|
registerForExecution(mbox, false, false)
|
2011-09-23 09:33:53 +02:00
|
|
|
|
}
|
2010-09-01 16:33:56 +02:00
|
|
|
|
|
2011-09-26 19:52:49 +02:00
|
|
|
|
/**
|
2012-05-16 17:04:13 +02:00
|
|
|
|
* Will be called when the dispatcher is to queue an invocation for execution
|
|
|
|
|
|
*
|
|
|
|
|
|
* INTERNAL API
|
2011-09-26 19:52:49 +02:00
|
|
|
|
*/
|
2011-10-18 16:44:35 +02:00
|
|
|
|
protected[akka] def systemDispatch(receiver: ActorCell, invocation: SystemMessage)
|
2011-09-26 19:52:49 +02:00
|
|
|
|
|
2010-10-24 16:01:00 +02:00
|
|
|
|
/**
|
2012-05-16 17:04:13 +02:00
|
|
|
|
* Will be called when the dispatcher is to queue an invocation for execution
|
|
|
|
|
|
*
|
|
|
|
|
|
* INTERNAL API
|
2010-10-24 16:01:00 +02:00
|
|
|
|
*/
|
2011-10-19 13:19:44 +02:00
|
|
|
|
protected[akka] def dispatch(receiver: ActorCell, invocation: Envelope)
|
2011-09-21 15:01:47 +02:00
|
|
|
|
|
|
|
|
|
|
/**
|
2011-09-23 13:14:17 +02:00
|
|
|
|
* Suggest to register the provided mailbox for execution
|
2012-05-16 17:04:13 +02:00
|
|
|
|
*
|
|
|
|
|
|
* INTERNAL API
|
2011-09-21 15:01:47 +02:00
|
|
|
|
*/
|
2011-09-23 13:14:17 +02:00
|
|
|
|
protected[akka] def registerForExecution(mbox: Mailbox, hasMessageHint: Boolean, hasSystemMessageHint: Boolean): Boolean
|
2011-09-21 15:01:47 +02:00
|
|
|
|
|
|
|
|
|
|
// TODO check whether this should not actually be a property of the mailbox
|
2012-05-16 17:04:13 +02:00
|
|
|
|
/**
|
|
|
|
|
|
* INTERNAL API
|
|
|
|
|
|
*/
|
2011-09-21 15:01:47 +02:00
|
|
|
|
protected[akka] def throughput: Int
|
2012-05-16 17:04:13 +02:00
|
|
|
|
|
|
|
|
|
|
/**
|
|
|
|
|
|
* INTERNAL API
|
|
|
|
|
|
*/
|
2011-11-21 10:48:21 +01:00
|
|
|
|
protected[akka] def throughputDeadlineTime: Duration
|
2010-10-24 01:18:59 +02:00
|
|
|
|
|
2012-05-16 17:04:13 +02:00
|
|
|
|
/**
|
|
|
|
|
|
* INTERNAL API
|
|
|
|
|
|
*/
|
|
|
|
|
|
@inline protected[akka] final val isThroughputDeadlineTimeDefined = throughputDeadlineTime.toMillis > 0
|
2011-10-07 11:34:07 +02:00
|
|
|
|
|
2012-05-16 17:04:13 +02:00
|
|
|
|
/**
|
|
|
|
|
|
* INTERNAL API
|
|
|
|
|
|
*/
|
2011-08-30 15:50:52 +02:00
|
|
|
|
protected[akka] def executeTask(invocation: TaskInvocation)
|
2011-02-25 15:20:58 -07:00
|
|
|
|
|
2010-10-24 16:01:00 +02:00
|
|
|
|
/**
|
|
|
|
|
|
* Called one time every time an actor is detached from this dispatcher and this dispatcher has no actors left attached
|
2011-11-16 15:54:14 +01:00
|
|
|
|
* Must be idempotent
|
2012-05-16 17:04:13 +02:00
|
|
|
|
*
|
|
|
|
|
|
* INTERNAL API
|
2010-10-24 16:01:00 +02:00
|
|
|
|
*/
|
2011-08-30 15:50:52 +02:00
|
|
|
|
protected[akka] def shutdown(): Unit
|
2010-10-29 16:33:31 +02:00
|
|
|
|
}
|
2011-03-04 20:55:12 +01:00
|
|
|
|
|
2012-05-16 17:04:13 +02:00
|
|
|
|
/**
|
|
|
|
|
|
* An ExecutorServiceConfigurator is a class that given some prerequisites and a configuration can create instances of ExecutorService
|
|
|
|
|
|
*/
|
2012-01-30 13:44:56 +01:00
|
|
|
|
abstract class ExecutorServiceConfigurator(config: Config, prerequisites: DispatcherPrerequisites) extends ExecutorServiceFactoryProvider
|
|
|
|
|
|
|
2011-03-04 20:55:12 +01:00
|
|
|
|
/**
|
2011-12-21 21:24:57 +01:00
|
|
|
|
* Base class to be used for hooking in new dispatchers into Dispatchers.
|
2011-03-04 20:55:12 +01:00
|
|
|
|
*/
|
2011-12-20 21:08:27 +01:00
|
|
|
|
abstract class MessageDispatcherConfigurator(val config: Config, val prerequisites: DispatcherPrerequisites) {
|
|
|
|
|
|
|
2011-04-12 15:40:09 +02:00
|
|
|
|
/**
|
2011-12-20 21:08:27 +01:00
|
|
|
|
* Returns an instance of MessageDispatcher given the configuration.
|
2011-12-21 19:02:06 +01:00
|
|
|
|
* Depending on the needs the implementation may return a new instance for
|
|
|
|
|
|
* each invocation or return the same instance every time.
|
2011-04-12 15:40:09 +02:00
|
|
|
|
*/
|
2011-12-20 21:08:27 +01:00
|
|
|
|
def dispatcher(): MessageDispatcher
|
2011-03-04 20:55:12 +01:00
|
|
|
|
|
2011-12-20 21:08:27 +01:00
|
|
|
|
/**
|
|
|
|
|
|
* Returns a factory for the [[akka.dispatch.Mailbox]] given the configuration.
|
2011-12-21 19:37:18 +01:00
|
|
|
|
* Default implementation instantiate the [[akka.dispatch.MailboxType]] specified
|
2012-02-09 20:40:09 +01:00
|
|
|
|
* as FQCN in mailbox-type config property. If mailbox-type is unspecified (empty)
|
2011-12-21 19:37:18 +01:00
|
|
|
|
* then [[akka.dispatch.UnboundedMailbox]] is used when capacity is < 1,
|
|
|
|
|
|
* otherwise [[akka.dispatch.BoundedMailbox]].
|
2011-12-20 21:08:27 +01:00
|
|
|
|
*/
|
|
|
|
|
|
def mailboxType(): MailboxType = {
|
2012-02-09 20:40:09 +01:00
|
|
|
|
config.getString("mailbox-type") match {
|
2011-12-19 20:36:06 +01:00
|
|
|
|
case "" ⇒
|
2012-02-21 16:40:34 +01:00
|
|
|
|
if (config.getInt("mailbox-capacity") < 1) UnboundedMailbox()
|
2012-02-26 21:26:25 +01:00
|
|
|
|
else new BoundedMailbox(prerequisites.settings, config)
|
2012-02-21 16:40:34 +01:00
|
|
|
|
case "unbounded" ⇒ UnboundedMailbox()
|
2012-02-26 21:26:25 +01:00
|
|
|
|
case "bounded" ⇒ new BoundedMailbox(prerequisites.settings, config)
|
2011-12-21 19:37:18 +01:00
|
|
|
|
case fqcn ⇒
|
2012-10-30 15:08:41 +01:00
|
|
|
|
val args = List(classOf[ActorSystem.Settings] -> prerequisites.settings, classOf[Config] -> config)
|
2012-09-06 03:17:51 +02:00
|
|
|
|
prerequisites.dynamicAccess.createInstanceFor[MailboxType](fqcn, args).recover({
|
|
|
|
|
|
case exception ⇒
|
2011-12-21 19:37:18 +01:00
|
|
|
|
throw new IllegalArgumentException(
|
|
|
|
|
|
("Cannot instantiate MailboxType [%s], defined in [%s], " +
|
2012-02-26 21:26:25 +01:00
|
|
|
|
"make sure it has constructor with [akka.actor.ActorSystem.Settings, com.typesafe.config.Config] parameters")
|
2011-12-21 19:37:18 +01:00
|
|
|
|
.format(fqcn, config.getString("id")), exception)
|
2012-09-06 03:17:51 +02:00
|
|
|
|
}).get
|
2011-05-18 08:37:58 +02:00
|
|
|
|
}
|
2011-03-04 20:55:12 +01:00
|
|
|
|
}
|
|
|
|
|
|
|
2012-01-30 13:44:56 +01:00
|
|
|
|
def configureExecutor(): ExecutorServiceConfigurator = {
|
|
|
|
|
|
config.getString("executor") match {
|
2012-01-31 10:12:45 +01:00
|
|
|
|
case null | "" | "fork-join-executor" ⇒ new ForkJoinExecutorConfigurator(config.getConfig("fork-join-executor"), prerequisites)
|
|
|
|
|
|
case "thread-pool-executor" ⇒ new ThreadPoolExecutorConfigurator(config.getConfig("thread-pool-executor"), prerequisites)
|
2012-01-30 13:44:56 +01:00
|
|
|
|
case fqcn ⇒
|
2012-10-30 15:08:41 +01:00
|
|
|
|
val args = List(
|
Bye-bye ReflectiveAccess, introducing PropertyMaster, see #1750
- PropertyMaster is the only place in Akka which calls
ClassLoader.getClass (apart from kernel, which might be special)
- all PropertyMaster methods (there are only three) take a ClassManifest
of what is to be constructed, and they verify that the obtained object
is actually compatible with the required type
Other stuff:
- noticed that I had forgotten to change to ExtendedActorSystem when
constructing Extensions by ExtensionKey (damn you, reflection!)
- moved Serializer.currentSystem into JavaSerializer, because that’s the
only one needing it (it’s only used in readResolve() methods)
- Serializers are constructed now with one-arg constructor taking
ExtendedActorSystem (if that exists, otherwise no-arg as before), to
allow JavaSerializer to do its magic; possibly necessary for others as
well
- Removed all Option[ClassLoader] signatures
- made it so that the ActorSystem will try context class loader, then
the class loader which loaded the class actually calling into
ActorSystem.apply, then the loader which loaded ActorSystemImpl
- for the second of the above I added a (reflectively accessed hopefully
safe) facility for getting caller Class[_] objects by using
sun.reflect.Reflection; this is optional an defaults to None, e.g. on
Android, which means that getting the caller’s classloader is done on
a best effort basis (there’s nothing we can do because a StackTrace
does not contain actual Class[_] objects).
- refactored DurableMailbox to contain the owner val and use that
instead of declaring that in all subclasses
2012-02-09 11:56:43 +01:00
|
|
|
|
classOf[Config] -> config,
|
|
|
|
|
|
classOf[DispatcherPrerequisites] -> prerequisites)
|
2012-09-06 03:17:51 +02:00
|
|
|
|
prerequisites.dynamicAccess.createInstanceFor[ExecutorServiceConfigurator](fqcn, args).recover({
|
|
|
|
|
|
case exception ⇒ throw new IllegalArgumentException(
|
2012-01-31 10:12:45 +01:00
|
|
|
|
("""Cannot instantiate ExecutorServiceConfigurator ("executor = [%s]"), defined in [%s],
|
2012-01-30 16:34:25 +01:00
|
|
|
|
make sure it has an accessible constructor with a [%s,%s] signature""")
|
2012-01-31 10:12:45 +01:00
|
|
|
|
.format(fqcn, config.getString("id"), classOf[Config], classOf[DispatcherPrerequisites]), exception)
|
2012-09-06 03:17:51 +02:00
|
|
|
|
}).get
|
2012-01-30 13:44:56 +01:00
|
|
|
|
}
|
|
|
|
|
|
}
|
|
|
|
|
|
}
|
2011-03-04 20:55:12 +01:00
|
|
|
|
|
2012-01-30 13:44:56 +01:00
|
|
|
|
class ThreadPoolExecutorConfigurator(config: Config, prerequisites: DispatcherPrerequisites) extends ExecutorServiceConfigurator(config, prerequisites) {
|
2011-11-15 11:34:39 +01:00
|
|
|
|
|
2012-01-30 13:44:56 +01:00
|
|
|
|
val threadPoolConfig: ThreadPoolConfig = createThreadPoolConfigBuilder(config, prerequisites).config
|
|
|
|
|
|
|
2012-01-30 15:34:56 +01:00
|
|
|
|
protected def createThreadPoolConfigBuilder(config: Config, prerequisites: DispatcherPrerequisites): ThreadPoolConfigBuilder = {
|
2012-01-30 13:44:56 +01:00
|
|
|
|
ThreadPoolConfigBuilder(ThreadPoolConfig())
|
2011-12-06 20:33:25 +01:00
|
|
|
|
.setKeepAliveTime(Duration(config getMilliseconds "keep-alive-time", TimeUnit.MILLISECONDS))
|
|
|
|
|
|
.setAllowCoreThreadTimeout(config getBoolean "allow-core-timeout")
|
2011-12-07 11:25:27 +01:00
|
|
|
|
.setCorePoolSizeFromFactor(config getInt "core-pool-size-min", config getDouble "core-pool-size-factor", config getInt "core-pool-size-max")
|
|
|
|
|
|
.setMaxPoolSizeFromFactor(config getInt "max-pool-size-min", config getDouble "max-pool-size-factor", config getInt "max-pool-size-max")
|
2011-12-06 20:33:25 +01:00
|
|
|
|
.configure(
|
2012-12-11 16:24:12 +01:00
|
|
|
|
Some(config getInt "task-queue-size") flatMap {
|
2011-12-06 20:33:25 +01:00
|
|
|
|
case size if size > 0 ⇒
|
|
|
|
|
|
Some(config getString "task-queue-type") map {
|
|
|
|
|
|
case "array" ⇒ ThreadPoolConfig.arrayBlockingQueue(size, false) //TODO config fairness?
|
|
|
|
|
|
case "" | "linked" ⇒ ThreadPoolConfig.linkedBlockingQueue(size)
|
|
|
|
|
|
case x ⇒ throw new IllegalArgumentException("[%s] is not a valid task-queue-type [array|linked]!" format x)
|
2012-12-11 16:24:12 +01:00
|
|
|
|
} map { qf ⇒ (q: ThreadPoolConfigBuilder) ⇒ q.setQueueFactory(qf) }
|
2011-12-06 20:33:25 +01:00
|
|
|
|
case _ ⇒ None
|
2012-12-11 16:24:12 +01:00
|
|
|
|
})
|
2011-03-04 20:55:12 +01:00
|
|
|
|
}
|
2012-01-30 13:44:56 +01:00
|
|
|
|
|
2012-10-02 09:31:23 +02:00
|
|
|
|
def createExecutorServiceFactory(id: String, threadFactory: ThreadFactory): ExecutorServiceFactory =
|
|
|
|
|
|
threadPoolConfig.createExecutorServiceFactory(id, threadFactory)
|
2011-03-04 20:55:12 +01:00
|
|
|
|
}
|
2012-01-30 15:34:56 +01:00
|
|
|
|
|
2012-02-06 15:19:05 +01:00
|
|
|
|
object ForkJoinExecutorConfigurator {
|
2012-02-06 15:42:28 +01:00
|
|
|
|
|
|
|
|
|
|
/**
|
|
|
|
|
|
* INTERNAL AKKA USAGE ONLY
|
|
|
|
|
|
*/
|
|
|
|
|
|
final class AkkaForkJoinPool(parallelism: Int,
|
|
|
|
|
|
threadFactory: ForkJoinPool.ForkJoinWorkerThreadFactory,
|
|
|
|
|
|
unhandledExceptionHandler: Thread.UncaughtExceptionHandler)
|
2012-02-13 18:14:35 +01:00
|
|
|
|
extends ForkJoinPool(parallelism, threadFactory, unhandledExceptionHandler, true) with LoadMetrics {
|
2012-02-06 15:19:05 +01:00
|
|
|
|
override def execute(r: Runnable): Unit = r match {
|
|
|
|
|
|
case m: Mailbox ⇒ super.execute(new MailboxExecutionTask(m))
|
|
|
|
|
|
case other ⇒ super.execute(other)
|
|
|
|
|
|
}
|
2012-02-13 18:14:35 +01:00
|
|
|
|
|
|
|
|
|
|
def atFullThrottle(): Boolean = this.getActiveThreadCount() >= this.getParallelism()
|
2012-02-06 15:19:05 +01:00
|
|
|
|
}
|
2012-02-06 15:42:28 +01:00
|
|
|
|
|
|
|
|
|
|
/**
|
|
|
|
|
|
* INTERNAL AKKA USAGE ONLY
|
|
|
|
|
|
*/
|
|
|
|
|
|
final class MailboxExecutionTask(mailbox: Mailbox) extends ForkJoinTask[Unit] {
|
2012-02-06 15:19:05 +01:00
|
|
|
|
final override def setRawResult(u: Unit): Unit = ()
|
|
|
|
|
|
final override def getRawResult(): Unit = ()
|
|
|
|
|
|
final override def exec(): Boolean = try { mailbox.run; true } catch {
|
2012-06-28 15:33:49 +02:00
|
|
|
|
case anything: Throwable ⇒
|
2012-02-06 15:19:05 +01:00
|
|
|
|
val t = Thread.currentThread
|
|
|
|
|
|
t.getUncaughtExceptionHandler match {
|
|
|
|
|
|
case null ⇒
|
|
|
|
|
|
case some ⇒ some.uncaughtException(t, anything)
|
|
|
|
|
|
}
|
|
|
|
|
|
throw anything
|
|
|
|
|
|
}
|
|
|
|
|
|
}
|
2011-03-04 20:55:12 +01:00
|
|
|
|
}
|
2012-01-30 15:34:56 +01:00
|
|
|
|
|
|
|
|
|
|
class ForkJoinExecutorConfigurator(config: Config, prerequisites: DispatcherPrerequisites) extends ExecutorServiceConfigurator(config, prerequisites) {
|
2012-02-06 15:19:05 +01:00
|
|
|
|
import ForkJoinExecutorConfigurator._
|
2012-01-30 15:34:56 +01:00
|
|
|
|
|
2012-02-08 11:53:55 +01:00
|
|
|
|
def validate(t: ThreadFactory): ForkJoinPool.ForkJoinWorkerThreadFactory = t match {
|
2012-01-30 15:34:56 +01:00
|
|
|
|
case correct: ForkJoinPool.ForkJoinWorkerThreadFactory ⇒ correct
|
|
|
|
|
|
case x ⇒ throw new IllegalStateException("The prerequisites for the ForkJoinExecutorConfigurator is a ForkJoinPool.ForkJoinWorkerThreadFactory!")
|
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
class ForkJoinExecutorServiceFactory(val threadFactory: ForkJoinPool.ForkJoinWorkerThreadFactory,
|
|
|
|
|
|
val parallelism: Int) extends ExecutorServiceFactory {
|
2012-02-06 15:42:28 +01:00
|
|
|
|
def createExecutorService: ExecutorService = new AkkaForkJoinPool(parallelism, threadFactory, MonitorableThreadFactory.doNothing)
|
2012-01-30 15:34:56 +01:00
|
|
|
|
}
|
2012-02-08 11:53:55 +01:00
|
|
|
|
final def createExecutorServiceFactory(id: String, threadFactory: ThreadFactory): ExecutorServiceFactory = {
|
|
|
|
|
|
val tf = threadFactory match {
|
2012-02-08 14:03:31 +01:00
|
|
|
|
case m: MonitorableThreadFactory ⇒
|
|
|
|
|
|
// add the dispatcher id to the thread names
|
2012-12-06 22:50:40 +01:00
|
|
|
|
m.withName(m.name + "-" + id)
|
2012-02-08 14:03:31 +01:00
|
|
|
|
case other ⇒ other
|
2012-02-08 11:53:55 +01:00
|
|
|
|
}
|
2012-01-30 15:34:56 +01:00
|
|
|
|
new ForkJoinExecutorServiceFactory(
|
2012-02-08 11:53:55 +01:00
|
|
|
|
validate(tf),
|
2012-01-30 15:34:56 +01:00
|
|
|
|
ThreadPoolConfig.scaledPoolSize(
|
|
|
|
|
|
config.getInt("parallelism-min"),
|
|
|
|
|
|
config.getDouble("parallelism-factor"),
|
|
|
|
|
|
config.getInt("parallelism-max")))
|
2012-02-08 11:53:55 +01:00
|
|
|
|
}
|
2012-01-30 15:34:56 +01:00
|
|
|
|
}
|