Merge branch 'master' into nio-actor

This commit is contained in:
Derek Williams 2011-05-23 08:20:43 -06:00
commit ef40dbd317
63 changed files with 715 additions and 734 deletions

View file

@ -11,7 +11,7 @@ import org.scalatest.{ BeforeAndAfterAll, WordSpec, BeforeAndAfterEach }
import akka.actor.TypedActor._
import akka.japi.{ Option JOption }
import akka.util.Duration
import akka.dispatch.{ Dispatchers, Future, AlreadyCompletedFuture }
import akka.dispatch.{ Dispatchers, Future, KeptPromise }
import akka.routing.CyclicIterator
object TypedActorSpec {
@ -43,7 +43,7 @@ object TypedActorSpec {
def pigdog = "Pigdog"
def futurePigdog(): Future[String] = new AlreadyCompletedFuture(Right(pigdog))
def futurePigdog(): Future[String] = new KeptPromise(Right(pigdog))
def futurePigdog(delay: Long): Future[String] = {
Thread.sleep(delay)
futurePigdog
@ -51,7 +51,7 @@ object TypedActorSpec {
def futurePigdog(delay: Long, numbered: Int): Future[String] = {
Thread.sleep(delay)
new AlreadyCompletedFuture(Right(pigdog + numbered))
new KeptPromise(Right(pigdog + numbered))
}
def futureComposePigdogFrom(foo: Foo): Future[String] =
@ -264,7 +264,7 @@ class TypedActorSpec extends WordSpec with MustMatchers with BeforeAndAfterEach
"be able to use work-stealing dispatcher" in {
val config = Configuration(
Duration(6600, "ms"),
Dispatchers.newExecutorBasedEventDrivenWorkStealingDispatcher("pooled-dispatcher")
Dispatchers.newBalancingDispatcher("pooled-dispatcher")
.withNewThreadPoolWithLinkedBlockingQueueWithUnboundedCapacity
.setCorePoolSize(60)
.setMaxPoolSize(60)

View file

@ -16,7 +16,7 @@ class SupervisorMiscSpec extends WordSpec with MustMatchers {
val countDownLatch = new CountDownLatch(4)
val actor1 = Actor.actorOf(new Actor {
self.dispatcher = Dispatchers.newThreadBasedDispatcher(self)
self.dispatcher = Dispatchers.newPinnedDispatcher(self)
override def postRestart(cause: Throwable) { countDownLatch.countDown() }
protected def receive = {
@ -26,7 +26,7 @@ class SupervisorMiscSpec extends WordSpec with MustMatchers {
}).start()
val actor2 = Actor.actorOf(new Actor {
self.dispatcher = Dispatchers.newThreadBasedDispatcher(self)
self.dispatcher = Dispatchers.newPinnedDispatcher(self)
override def postRestart(cause: Throwable) { countDownLatch.countDown() }
protected def receive = {
@ -36,7 +36,7 @@ class SupervisorMiscSpec extends WordSpec with MustMatchers {
}).start()
val actor3 = Actor.actorOf(new Actor {
self.dispatcher = Dispatchers.newExecutorBasedEventDrivenDispatcher("test").build
self.dispatcher = Dispatchers.newDispatcher("test").build
override def postRestart(cause: Throwable) { countDownLatch.countDown() }
protected def receive = {
@ -46,7 +46,7 @@ class SupervisorMiscSpec extends WordSpec with MustMatchers {
}).start()
val actor4 = Actor.actorOf(new Actor {
self.dispatcher = Dispatchers.newThreadBasedDispatcher(self)
self.dispatcher = Dispatchers.newPinnedDispatcher(self)
override def postRestart(cause: Throwable) { countDownLatch.countDown() }
protected def receive = {

View file

@ -20,7 +20,7 @@ class ConfigSpec extends WordSpec with MustMatchers {
getString("akka.time-unit") must equal(Some("seconds"))
getString("akka.version") must equal(Some("2.0-SNAPSHOT"))
getString("akka.actor.default-dispatcher.type") must equal(Some("GlobalExecutorBasedEventDriven"))
getString("akka.actor.default-dispatcher.type") must equal(Some("GlobalDispatcher"))
getInt("akka.actor.default-dispatcher.keep-alive-time") must equal(Some(60))
getDouble("akka.actor.default-dispatcher.core-pool-size-factor") must equal(Some(1.0))
getDouble("akka.actor.default-dispatcher.max-pool-size-factor") must equal(Some(4.0))

View file

@ -344,12 +344,12 @@ abstract class ActorModelSpec extends JUnitSuite {
}
}
class ExecutorBasedEventDrivenDispatcherModelTest extends ActorModelSpec {
class DispatcherModelTest extends ActorModelSpec {
def newInterceptedDispatcher =
new ExecutorBasedEventDrivenDispatcher("foo") with MessageDispatcherInterceptor
new Dispatcher("foo") with MessageDispatcherInterceptor
}
class ExecutorBasedEventDrivenWorkStealingDispatcherModelTest extends ActorModelSpec {
class BalancingDispatcherModelTest extends ActorModelSpec {
def newInterceptedDispatcher =
new ExecutorBasedEventDrivenWorkStealingDispatcher("foo") with MessageDispatcherInterceptor
new BalancingDispatcher("foo") with MessageDispatcherInterceptor
}

View file

@ -21,15 +21,15 @@ object DispatchersSpec {
val executorbounds = "executor-bounds"
val allowcoretimeout = "allow-core-timeout"
val rejectionpolicy = "rejection-policy" // abort, caller-runs, discard-oldest, discard
val throughput = "throughput" // Throughput for ExecutorBasedEventDrivenDispatcher
val throughput = "throughput" // Throughput for Dispatcher
def instance(dispatcher: MessageDispatcher): (MessageDispatcher) Boolean = _ == dispatcher
def ofType[T <: MessageDispatcher: Manifest]: (MessageDispatcher) Boolean = _.getClass == manifest[T].erasure
def typesAndValidators: Map[String, (MessageDispatcher) Boolean] = Map(
"ExecutorBasedEventDrivenWorkStealing" -> ofType[ExecutorBasedEventDrivenWorkStealingDispatcher],
"ExecutorBasedEventDriven" -> ofType[ExecutorBasedEventDrivenDispatcher],
"GlobalExecutorBasedEventDriven" -> instance(globalExecutorBasedEventDrivenDispatcher))
"BalancingDispatcher" -> ofType[BalancingDispatcher],
"Dispatcher" -> ofType[Dispatcher],
"GlobalDispatcher" -> instance(globalDispatcher))
def validTypes = typesAndValidators.keys.toList

View file

@ -3,14 +3,14 @@ package akka.actor.dispatch
import java.util.concurrent.{ CountDownLatch, TimeUnit }
import org.scalatest.junit.JUnitSuite
import org.junit.Test
import akka.dispatch.{ Dispatchers, ExecutorBasedEventDrivenDispatcher }
import akka.dispatch.{ Dispatchers, Dispatcher }
import akka.actor.Actor
import Actor._
import java.util.concurrent.atomic.{ AtomicBoolean, AtomicInteger }
object ExecutorBasedEventDrivenDispatcherActorSpec {
object DispatcherActorSpec {
class TestActor extends Actor {
self.dispatcher = Dispatchers.newExecutorBasedEventDrivenDispatcher(self.uuid.toString).build
self.dispatcher = Dispatchers.newDispatcher(self.uuid.toString).build
def receive = {
case "Hello"
self.reply("World")
@ -23,14 +23,14 @@ object ExecutorBasedEventDrivenDispatcherActorSpec {
val oneWay = new CountDownLatch(1)
}
class OneWayTestActor extends Actor {
self.dispatcher = Dispatchers.newExecutorBasedEventDrivenDispatcher(self.uuid.toString).build
self.dispatcher = Dispatchers.newDispatcher(self.uuid.toString).build
def receive = {
case "OneWay" OneWayTestActor.oneWay.countDown()
}
}
}
class ExecutorBasedEventDrivenDispatcherActorSpec extends JUnitSuite {
import ExecutorBasedEventDrivenDispatcherActorSpec._
class DispatcherActorSpec extends JUnitSuite {
import DispatcherActorSpec._
private val unit = TimeUnit.MILLISECONDS
@ -74,7 +74,7 @@ class ExecutorBasedEventDrivenDispatcherActorSpec extends JUnitSuite {
@Test
def shouldRespectThroughput {
val throughputDispatcher = Dispatchers.
newExecutorBasedEventDrivenDispatcher("THROUGHPUT", 101, 0, Dispatchers.MAILBOX_TYPE).
newDispatcher("THROUGHPUT", 101, 0, Dispatchers.MAILBOX_TYPE).
setCorePoolSize(1).
build
@ -110,7 +110,7 @@ class ExecutorBasedEventDrivenDispatcherActorSpec extends JUnitSuite {
def shouldRespectThroughputDeadline {
val deadlineMs = 100
val throughputDispatcher = Dispatchers.
newExecutorBasedEventDrivenDispatcher("THROUGHPUT", 2, deadlineMs, Dispatchers.MAILBOX_TYPE).
newDispatcher("THROUGHPUT", 2, deadlineMs, Dispatchers.MAILBOX_TYPE).
setCorePoolSize(1).
build
val works = new AtomicBoolean(true)

View file

@ -12,7 +12,7 @@ import Actor._
*
* @author Jan Van Besien
*/
class ExecutorBasedEventDrivenDispatcherActorsSpec extends JUnitSuite with MustMatchers {
class DispatcherActorsSpec extends JUnitSuite with MustMatchers {
class SlowActor(finishedCounter: CountDownLatch) extends Actor {
def receive = {

View file

@ -10,9 +10,9 @@ import akka.actor.{ IllegalActorStateException, Actor }
import Actor._
import akka.dispatch.{ MessageQueue, Dispatchers }
object ExecutorBasedEventDrivenWorkStealingDispatcherSpec {
object BalancingDispatcherSpec {
def newWorkStealer() = Dispatchers.newExecutorBasedEventDrivenWorkStealingDispatcher("pooled-dispatcher", 1).build
def newWorkStealer() = Dispatchers.newBalancingDispatcher("pooled-dispatcher", 1).build
val delayableActorDispatcher, sharedActorDispatcher, parentActorDispatcher = newWorkStealer()
@ -52,8 +52,8 @@ object ExecutorBasedEventDrivenWorkStealingDispatcherSpec {
/**
* @author Jan Van Besien
*/
class ExecutorBasedEventDrivenWorkStealingDispatcherSpec extends JUnitSuite with MustMatchers {
import ExecutorBasedEventDrivenWorkStealingDispatcherSpec._
class BalancingDispatcherSpec extends JUnitSuite with MustMatchers {
import BalancingDispatcherSpec._
@Test
def fastActorShouldStealWorkFromSlowActor {

View file

@ -68,7 +68,7 @@ abstract class MailboxSpec extends WordSpec with MustMatchers with BeforeAndAfte
//CANDIDATE FOR TESTKIT
def spawn[T <: AnyRef](fun: T)(implicit within: Duration): Future[T] = {
val result = new DefaultCompletableFuture[T](within.length, within.unit)
val result = new DefaultPromise[T](within.length, within.unit)
val t = new Thread(new Runnable {
def run = try {
result.completeWithResult(fun)

View file

@ -8,7 +8,7 @@ import java.util.concurrent.CountDownLatch
class PriorityDispatcherSpec extends WordSpec with MustMatchers {
"A PriorityExecutorBasedEventDrivenDispatcher" must {
"A PriorityDispatcher" must {
"Order it's messages according to the specified comparator using an unbounded mailbox" in {
testOrdering(UnboundedMailbox())
}
@ -19,7 +19,7 @@ class PriorityDispatcherSpec extends WordSpec with MustMatchers {
}
def testOrdering(mboxType: MailboxType) {
val dispatcher = new PriorityExecutorBasedEventDrivenDispatcher("Test",
val dispatcher = new PriorityDispatcher("Test",
PriorityGenerator({
case i: Int i //Reverse order
case 'Result Int.MaxValue

View file

@ -10,7 +10,7 @@ import Actor._
object ThreadBasedActorSpec {
class TestActor extends Actor {
self.dispatcher = Dispatchers.newThreadBasedDispatcher(self)
self.dispatcher = Dispatchers.newPinnedDispatcher(self)
def receive = {
case "Hello"
@ -30,7 +30,7 @@ class ThreadBasedActorSpec extends JUnitSuite {
def shouldSendOneWay {
var oneWay = new CountDownLatch(1)
val actor = actorOf(new Actor {
self.dispatcher = Dispatchers.newThreadBasedDispatcher(self)
self.dispatcher = Dispatchers.newPinnedDispatcher(self)
def receive = {
case "OneWay" oneWay.countDown()
}

View file

@ -3,15 +3,14 @@ package akka.actor
import org.scalatest.junit.JUnitSuite
import org.junit.Test
import Actor._
import java.util.concurrent.{ CyclicBarrier, TimeUnit, CountDownLatch }
import org.scalatest.Assertions._
import java.util.concurrent.{ ConcurrentLinkedQueue, CyclicBarrier, TimeUnit, CountDownLatch }
import akka.dispatch.Future
object ActorRegistrySpec {
var record = ""
class TestActor extends Actor {
def receive = {
case "ping"
record = "pong" + record
self.reply("got ping")
}
}
@ -19,10 +18,8 @@ object ActorRegistrySpec {
class TestActor2 extends Actor {
def receive = {
case "ping"
record = "pong" + record
self.reply("got ping")
case "ping2"
record = "pong" + record
self.reply("got ping")
}
}
@ -41,6 +38,7 @@ class ActorRegistrySpec extends JUnitSuite {
assert(actor2.get.address === actor1.address)
assert(actor2.get.address === "test-actor-1")
actor2.get.stop
assert(Actor.registry.actorFor(actor1.address).isEmpty)
}
@Test
@ -54,6 +52,7 @@ class ActorRegistrySpec extends JUnitSuite {
assert(actorOrNone.get.uuid === uuid)
assert(actorOrNone.get.address === "test-actor-1")
actor.stop
assert(Actor.registry.local.actorFor(uuid).isEmpty)
}
@Test
@ -71,10 +70,8 @@ class ActorRegistrySpec extends JUnitSuite {
@Test
def shouldGetAllActorsFromLocalActorRegistry {
Actor.registry.local.shutdownAll
val actor1 = actorOf[TestActor]("test-actor-1")
actor1.start
val actor2 = actorOf[TestActor]("test-actor-2")
actor2.start
val actor1 = actorOf[TestActor]("test-actor-1").start
val actor2 = actorOf[TestActor]("test-actor-2").start
val actors = Actor.registry.local.actors
assert(actors.size === 2)
assert(actors.head.actor.isInstanceOf[TestActor])
@ -88,13 +85,15 @@ class ActorRegistrySpec extends JUnitSuite {
@Test
def shouldGetResponseByAllActorsInLocalActorRegistryWhenInvokingForeach {
Actor.registry.local.shutdownAll
val actor1 = actorOf[TestActor]("test-actor-1")
actor1.start
val actor2 = actorOf[TestActor]("test-actor-2")
actor2.start
record = ""
Actor.registry.local.foreach(actor actor !! "ping")
assert(record === "pongpong")
val actor1 = actorOf[TestActor]("test-actor-1").start
val actor2 = actorOf[TestActor]("test-actor-2").start
val results = new ConcurrentLinkedQueue[Future[String]]
Actor.registry.local.foreach(actor results.add(actor.!!![String]("ping")))
assert(results.size === 2)
val i = results.iterator
while (i.hasNext) assert(i.next.get === "got ping")
actor1.stop()
actor2.stop()
}

View file

@ -160,7 +160,7 @@ trait ActorRef extends ActorRefShared with java.lang.Comparable[ActorRef] { scal
/**
* Akka Java API. <p/>
* The default dispatcher is the <tt>Dispatchers.globalExecutorBasedEventDrivenDispatcher</tt>.
* The default dispatcher is the <tt>Dispatchers.globalDispatcher</tt>.
* This means that all actors will share the same event-driven executor based dispatcher.
* <p/>
* You can override it so it fits the specific use-case that the actor is used for.
@ -208,7 +208,7 @@ trait ActorRef extends ActorRefShared with java.lang.Comparable[ActorRef] { scal
* The reference sender future of the last received message.
* Is defined if the message was sent with sent with '!!' or '!!!', else None.
*/
def getSenderFuture: Option[CompletableFuture[Any]] = senderFuture
def getSenderFuture: Option[Promise[Any]] = senderFuture
/**
* Is the actor being restarted?
@ -482,7 +482,7 @@ trait ActorRef extends ActorRefShared with java.lang.Comparable[ActorRef] { scal
message: Any,
timeout: Long,
senderOption: Option[ActorRef],
senderFuture: Option[CompletableFuture[T]]): CompletableFuture[T]
senderFuture: Option[Promise[T]]): Promise[T]
protected[akka] def actorInstance: AtomicReference[Actor]
@ -698,10 +698,10 @@ class LocalActorRef private[akka] (private[this] val actorFactory: () ⇒ Actor,
message: Any,
timeout: Long,
senderOption: Option[ActorRef],
senderFuture: Option[CompletableFuture[T]]): CompletableFuture[T] = {
val future = if (senderFuture.isDefined) senderFuture else Some(new DefaultCompletableFuture[T](timeout))
senderFuture: Option[Promise[T]]): Promise[T] = {
val future = if (senderFuture.isDefined) senderFuture else Some(new DefaultPromise[T](timeout))
dispatcher dispatchMessage new MessageInvocation(
this, message, senderOption, future.asInstanceOf[Some[CompletableFuture[Any]]])
this, message, senderOption, future.asInstanceOf[Some[Promise[Any]]])
future.get
}
@ -1020,7 +1020,7 @@ private[akka] case class RemoteActorRef private[akka] (
message: Any,
timeout: Long,
senderOption: Option[ActorRef],
senderFuture: Option[CompletableFuture[T]]): CompletableFuture[T] = {
senderFuture: Option[Promise[T]]): Promise[T] = {
val future = Actor.remote.send[T](
message, senderOption, senderFuture,
remoteAddress, timeout, false, this, loader)
@ -1155,7 +1155,7 @@ trait ScalaActorRef extends ActorRefShared { ref: ActorRef ⇒
* The reference sender future of the last received message.
* Is defined if the message was sent with sent with '!!' or '!!!', else None.
*/
def senderFuture(): Option[CompletableFuture[Any]] = {
def senderFuture(): Option[Promise[Any]] = {
val msg = currentMessage
if (msg eq null) None
else msg.senderFuture

View file

@ -34,7 +34,7 @@ private[actor] final class ActorRegistry private[actor] () extends ListenerManag
//private val isClusterEnabled = ReflectiveAccess.isClusterEnabled
private val actorsByAddress = new ConcurrentHashMap[String, ActorRef]
private val actorsByUuid = new ConcurrentHashMap[String, ActorRef]
private val actorsByUuid = new ConcurrentHashMap[Uuid, ActorRef]
private val typedActorsByUuid = new ConcurrentHashMap[Uuid, AnyRef]
private val guard = new ReadWriteGuard
@ -66,7 +66,7 @@ private[actor] final class ActorRegistry private[actor] () extends ListenerManag
// throw new IllegalStateException("Actor 'address' [" + address + "] is already in use, can't register actor [" + actor + "]")
actorsByAddress.put(address, actor)
actorsByUuid.put(actor.uuid.toString, actor)
actorsByUuid.put(actor.uuid, actor)
notifyListeners(ActorRegistered(address, actor))
}
@ -121,7 +121,7 @@ private[actor] final class ActorRegistry private[actor] () extends ListenerManag
*/
class LocalActorRegistry(
private val actorsByAddress: ConcurrentHashMap[String, ActorRef],
private val actorsByUuid: ConcurrentHashMap[String, ActorRef],
private val actorsByUuid: ConcurrentHashMap[Uuid, ActorRef],
private val typedActorsByUuid: ConcurrentHashMap[Uuid, AnyRef]) {
/**
@ -153,11 +153,8 @@ class LocalActorRegistry(
/**
* Finds the actor that have a specific uuid.
*/
private[akka] def actorFor(uuid: Uuid): Option[ActorRef] = {
val uuidAsString = uuid.toString
if (actorsByUuid.containsKey(uuidAsString)) Some(actorsByUuid.get(uuidAsString))
else None
}
private[akka] def actorFor(uuid: Uuid): Option[ActorRef] =
Option(actorsByUuid.get(uuid))
/**
* Finds the typed actor that have a specific address.

View file

@ -358,19 +358,11 @@ object LocalDeployer {
}
}
private[akka] def undeploy(deployment: Deploy) {
deployments.remove(deployment.address)
}
private[akka] def undeploy(deployment: Deploy): Unit = deployments.remove(deployment.address)
private[akka] def undeployAll() {
deployments.clear()
}
private[akka] def undeployAll(): Unit = deployments.clear()
private[akka] def lookupDeploymentFor(address: String): Option[Deploy] = {
val deployment = deployments.get(address)
if (deployment eq null) None
else Some(deployment)
}
private[akka] def lookupDeploymentFor(address: String): Option[Deploy] = Option(deployments.get(address))
}
/**

View file

@ -15,20 +15,23 @@ object TypedActor {
private val selfReference = new ThreadLocal[AnyRef]
def self[T <: AnyRef] = selfReference.get.asInstanceOf[T]
class TypedActor[TI <: AnyRef](proxyRef: AtomicReference[AnyRef], createInstance: TI) extends Actor {
val me = createInstance
def receive = {
trait TypedActor[Iface <: AnyRef, Impl <: Iface] { self: Actor
val proxyRef: AtomicReference[Iface]
def callMethod(methodCall: MethodCall): Unit
def receive: Receive = {
case m: MethodCall
selfReference set proxyRef.get
try {
m match {
case m if m.isOneWay m(me)
case m if m.returnsFuture_? self.senderFuture.get completeWith m(me).asInstanceOf[Future[Any]]
case m self reply m(me)
}
} finally {
selfReference set null
}
try { callMethod(m) } finally { selfReference set null }
}
}
class DefaultTypedActor[Iface <: AnyRef, Impl <: Iface](
val proxyRef: AtomicReference[Iface], createInstance: Impl) extends TypedActor[Iface, Impl] with Actor {
val me = createInstance
def callMethod(methodCall: MethodCall): Unit = methodCall match {
case m if m.isOneWay m(me)
case m if m.returnsFuture_? self.senderFuture.get completeWith m(me).asInstanceOf[Future[Any]]
case m self reply m(me)
}
}
@ -42,18 +45,13 @@ object TypedActor {
case m if m.isOneWay
actor ! m
null
case m if m.returnsJOption_?
(actor !!! m).as[JOption[Any]] match {
case Some(null) | None JOption.none[Any]
case Some(joption) joption
}
case m if m.returnsOption_?
(actor !!! m).as[AnyRef] match {
case Some(null) | None None
case Some(option) option
}
case m if m.returnsFuture_?
actor !!! m
case m if m.returnsJOption_? || m.returnsOption_?
(actor !!! m).as[AnyRef] match {
case Some(null) | None if (m.returnsJOption_?) JOption.none[Any] else None
case Some(joption) joption
}
case m
(actor !!! m).get
}
@ -103,12 +101,15 @@ object TypedActor {
newTypedActor(clazz.getInterfaces, clazz.newInstance, config, if (loader eq null) clazz.getClassLoader else loader)
}
protected def newTypedActor[R <: AnyRef, T <: R](interfaces: Array[Class[_]], constructor: T, config: Configuration, loader: ClassLoader): R = {
val proxyRef = new AtomicReference[AnyRef](null)
configureAndProxyLocalActorRef[T](interfaces, proxyRef, actorOf(new TypedActor[T](proxyRef, constructor)), config, loader)
private[akka] def newTypedActor[R <: AnyRef, T <: R](interfaces: Array[Class[_]], constructor: T, config: Configuration, loader: ClassLoader): R =
newTypedActor[R, T](interfaces, (ref: AtomicReference[R]) new DefaultTypedActor[R, T](ref, constructor), config, loader)
private[akka] def newTypedActor[R <: AnyRef, T <: R](interfaces: Array[Class[_]], constructor: (AtomicReference[R]) TypedActor[R, T], config: Configuration, loader: ClassLoader): R = {
val proxyRef = new AtomicReference[R]
configureAndProxyLocalActorRef[R](interfaces, proxyRef, actorOf(constructor(proxyRef).asInstanceOf[Actor]), config, loader)
}
protected def configureAndProxyLocalActorRef[T <: AnyRef](interfaces: Array[Class[_]], proxyRef: AtomicReference[AnyRef], actor: ActorRef, config: Configuration, loader: ClassLoader): T = {
protected def configureAndProxyLocalActorRef[T <: AnyRef](interfaces: Array[Class[_]], proxyRef: AtomicReference[T], actor: ActorRef, config: Configuration, loader: ClassLoader): T = {
actor.timeout = config.timeout.toMillis
actor.dispatcher = config.dispatcher

View file

@ -19,18 +19,18 @@ import util.DynamicVariable
* The preferred way of creating dispatchers is to use
* the {@link akka.dispatch.Dispatchers} factory object.
*
* @see akka.dispatch.ExecutorBasedEventDrivenWorkStealingDispatcher
* @see akka.dispatch.BalancingDispatcher
* @see akka.dispatch.Dispatchers
*
* @author Viktor Klang
*/
class ExecutorBasedEventDrivenWorkStealingDispatcher(
class BalancingDispatcher(
_name: String,
throughput: Int = Dispatchers.THROUGHPUT,
throughputDeadlineTime: Int = Dispatchers.THROUGHPUT_DEADLINE_TIME_MILLIS,
mailboxType: MailboxType = Dispatchers.MAILBOX_TYPE,
config: ThreadPoolConfig = ThreadPoolConfig())
extends ExecutorBasedEventDrivenDispatcher(_name, throughput, throughputDeadlineTime, mailboxType, config) {
extends Dispatcher(_name, throughput, throughputDeadlineTime, mailboxType, config) {
def this(_name: String, throughput: Int, throughputDeadlineTime: Int, mailboxType: MailboxType) =
this(_name, throughput, throughputDeadlineTime, mailboxType, ThreadPoolConfig()) // Needed for Java API usage

View file

@ -28,7 +28,7 @@ import java.util.concurrent.{ TimeUnit, ExecutorService, RejectedExecutionExcept
* <p/>
* Example usage:
* <pre/>
* val dispatcher = new ExecutorBasedEventDrivenDispatcher("name")
* val dispatcher = new Dispatcher("name")
* dispatcher
* .withNewThreadPoolWithBoundedBlockingQueue(100)
* .setCorePoolSize(16)
@ -43,7 +43,7 @@ import java.util.concurrent.{ TimeUnit, ExecutorService, RejectedExecutionExcept
* <p/>
* Example usage:
* <pre/>
* ExecutorBasedEventDrivenDispatcher dispatcher = new ExecutorBasedEventDrivenDispatcher("name");
* Dispatcher dispatcher = new Dispatcher("name");
* dispatcher
* .withNewThreadPoolWithBoundedBlockingQueue(100)
* .setCorePoolSize(16)
@ -63,7 +63,7 @@ import java.util.concurrent.{ TimeUnit, ExecutorService, RejectedExecutionExcept
* always continues until the mailbox is empty.
* Larger values (or zero or negative) increase throughput, smaller values increase fairness
*/
class ExecutorBasedEventDrivenDispatcher(
class Dispatcher(
_name: String,
val throughput: Int = Dispatchers.THROUGHPUT,
val throughputDeadlineTime: Int = Dispatchers.THROUGHPUT_DEADLINE_TIME_MILLIS,
@ -117,7 +117,7 @@ class ExecutorBasedEventDrivenDispatcher(
case b: UnboundedMailbox
new ConcurrentLinkedQueue[MessageInvocation] with MessageQueue with ExecutableMailbox {
@inline
final def dispatcher = ExecutorBasedEventDrivenDispatcher.this
final def dispatcher = Dispatcher.this
@inline
final def enqueue(m: MessageInvocation) = this.add(m)
@inline
@ -126,7 +126,7 @@ class ExecutorBasedEventDrivenDispatcher(
case b: BoundedMailbox
new DefaultBoundedMessageQueue(b.capacity, b.pushTimeOut) with ExecutableMailbox {
@inline
final def dispatcher = ExecutorBasedEventDrivenDispatcher.this
final def dispatcher = Dispatcher.this
}
}
@ -173,11 +173,11 @@ class ExecutorBasedEventDrivenDispatcher(
}
/**
* This is the behavior of an ExecutorBasedEventDrivenDispatchers mailbox.
* This is the behavior of an Dispatchers mailbox.
*/
trait ExecutableMailbox extends Runnable { self: MessageQueue
def dispatcher: ExecutorBasedEventDrivenDispatcher
def dispatcher: Dispatcher
final def run = {
try {
@ -237,7 +237,7 @@ object PriorityGenerator {
/**
* A PriorityGenerator is a convenience API to create a Comparator that orders the messages of a
* PriorityExecutorBasedEventDrivenDispatcher
* PriorityDispatcher
*/
abstract class PriorityGenerator extends java.util.Comparator[MessageInvocation] {
def gen(message: Any): Int
@ -247,18 +247,18 @@ abstract class PriorityGenerator extends java.util.Comparator[MessageInvocation]
}
/**
* A version of ExecutorBasedEventDrivenDispatcher that gives all actors registered to it a priority mailbox,
* A version of Dispatcher that gives all actors registered to it a priority mailbox,
* prioritized according to the supplied comparator.
*
* The dispatcher will process the messages with the _lowest_ priority first.
*/
class PriorityExecutorBasedEventDrivenDispatcher(
class PriorityDispatcher(
name: String,
val comparator: java.util.Comparator[MessageInvocation],
throughput: Int = Dispatchers.THROUGHPUT,
throughputDeadlineTime: Int = Dispatchers.THROUGHPUT_DEADLINE_TIME_MILLIS,
mailboxType: MailboxType = Dispatchers.MAILBOX_TYPE,
config: ThreadPoolConfig = ThreadPoolConfig()) extends ExecutorBasedEventDrivenDispatcher(name, throughput, throughputDeadlineTime, mailboxType, config) with PriorityMailbox {
config: ThreadPoolConfig = ThreadPoolConfig()) extends Dispatcher(name, throughput, throughputDeadlineTime, mailboxType, config) with PriorityMailbox {
def this(name: String, comparator: java.util.Comparator[MessageInvocation], throughput: Int, throughputDeadlineTime: Int, mailboxType: MailboxType) =
this(name, comparator, throughput, throughputDeadlineTime, mailboxType, ThreadPoolConfig()) // Needed for Java API usage
@ -277,14 +277,14 @@ class PriorityExecutorBasedEventDrivenDispatcher(
}
/**
* Can be used to give an ExecutorBasedEventDrivenDispatcher's actors priority-enabled mailboxes
* Can be used to give an Dispatcher's actors priority-enabled mailboxes
*
* Usage:
* new ExecutorBasedEventDrivenDispatcher(...) with PriorityMailbox {
* new Dispatcher(...) with PriorityMailbox {
* val comparator = ...comparator that determines mailbox priority ordering...
* }
*/
trait PriorityMailbox { self: ExecutorBasedEventDrivenDispatcher
trait PriorityMailbox { self: Dispatcher
def comparator: java.util.Comparator[MessageInvocation]
override def createMailbox(actorRef: ActorRef): AnyRef = self.mailboxType match {

View file

@ -18,7 +18,7 @@ import java.util.concurrent.TimeUnit
* <p/>
* Example usage:
* <pre/>
* val dispatcher = Dispatchers.newExecutorBasedEventDrivenDispatcher("name")
* val dispatcher = Dispatchers.newDispatcher("name")
* dispatcher
* .withNewThreadPoolWithLinkedBlockingQueueWithCapacity(100)
* .setCorePoolSize(16)
@ -32,7 +32,7 @@ import java.util.concurrent.TimeUnit
* <p/>
* Example usage:
* <pre/>
* MessageDispatcher dispatcher = Dispatchers.newExecutorBasedEventDrivenDispatcher("name");
* MessageDispatcher dispatcher = Dispatchers.newDispatcher("name");
* dispatcher
* .withNewThreadPoolWithLinkedBlockingQueueWithCapacity(100)
* .setCorePoolSize(16)
@ -57,10 +57,10 @@ object Dispatchers {
val MAILBOX_TYPE: MailboxType = if (MAILBOX_CAPACITY < 1) UnboundedMailbox() else BoundedMailbox()
lazy val defaultGlobalDispatcher = {
config.getSection("akka.actor.default-dispatcher").flatMap(from).getOrElse(globalExecutorBasedEventDrivenDispatcher)
config.getSection("akka.actor.default-dispatcher").flatMap(from).getOrElse(globalDispatcher)
}
object globalExecutorBasedEventDrivenDispatcher extends ExecutorBasedEventDrivenDispatcher("global", THROUGHPUT, THROUGHPUT_DEADLINE_TIME_MILLIS, MAILBOX_TYPE)
object globalDispatcher extends Dispatcher("global", THROUGHPUT, THROUGHPUT_DEADLINE_TIME_MILLIS, MAILBOX_TYPE)
/**
* Creates an thread based dispatcher serving a single actor through the same single thread.
@ -68,16 +68,10 @@ object Dispatchers {
* <p/>
* E.g. each actor consumes its own thread.
*/
def newThreadBasedDispatcher(actor: ActorRef) = new ThreadBasedDispatcher(actor)
/**
* Creates an thread based dispatcher serving a single actor through the same single thread.
* Uses the default timeout
* If capacity is negative, it's Integer.MAX_VALUE
* <p/>
* E.g. each actor consumes its own thread.
*/
def newThreadBasedDispatcher(actor: ActorRef, mailboxCapacity: Int) = new ThreadBasedDispatcher(actor, mailboxCapacity)
def newPinnedDispatcher(actor: ActorRef) = actor match {
case null new PinnedDispatcher()
case some new PinnedDispatcher(some)
}
/**
* Creates an thread based dispatcher serving a single actor through the same single thread.
@ -85,69 +79,87 @@ object Dispatchers {
* <p/>
* E.g. each actor consumes its own thread.
*/
def newThreadBasedDispatcher(actor: ActorRef, mailboxCapacity: Int, pushTimeOut: Duration) =
new ThreadBasedDispatcher(actor, mailboxCapacity, pushTimeOut)
def newPinnedDispatcher(actor: ActorRef, mailboxType: MailboxType) = actor match {
case null new PinnedDispatcher(mailboxType)
case some new PinnedDispatcher(some, mailboxType)
}
/**
* Creates an thread based dispatcher serving a single actor through the same single thread.
* <p/>
* E.g. each actor consumes its own thread.
*/
def newPinnedDispatcher(name: String, mailboxType: MailboxType) =
new PinnedDispatcher(name, mailboxType)
/**
* Creates an thread based dispatcher serving a single actor through the same single thread.
* <p/>
* E.g. each actor consumes its own thread.
*/
def newPinnedDispatcher(name: String) =
new PinnedDispatcher(name)
/**
* Creates a executor-based event-driven dispatcher serving multiple (millions) of actors through a thread pool.
* <p/>
* Has a fluent builder interface for configuring its semantics.
*/
def newExecutorBasedEventDrivenDispatcher(name: String) =
ThreadPoolConfigDispatcherBuilder(config new ExecutorBasedEventDrivenDispatcher(name, config), ThreadPoolConfig())
def newDispatcher(name: String) =
ThreadPoolConfigDispatcherBuilder(config new Dispatcher(name, config), ThreadPoolConfig())
/**
* Creates a executor-based event-driven dispatcher serving multiple (millions) of actors through a thread pool.
* <p/>
* Has a fluent builder interface for configuring its semantics.
*/
def newExecutorBasedEventDrivenDispatcher(name: String, throughput: Int, mailboxType: MailboxType) =
def newDispatcher(name: String, throughput: Int, mailboxType: MailboxType) =
ThreadPoolConfigDispatcherBuilder(config
new ExecutorBasedEventDrivenDispatcher(name, throughput, THROUGHPUT_DEADLINE_TIME_MILLIS, mailboxType, config), ThreadPoolConfig())
new Dispatcher(name, throughput, THROUGHPUT_DEADLINE_TIME_MILLIS, mailboxType, config), ThreadPoolConfig())
/**
* Creates a executor-based event-driven dispatcher serving multiple (millions) of actors through a thread pool.
* <p/>
* Has a fluent builder interface for configuring its semantics.
*/
def newExecutorBasedEventDrivenDispatcher(name: String, throughput: Int, throughputDeadlineMs: Int, mailboxType: MailboxType) =
def newDispatcher(name: String, throughput: Int, throughputDeadlineMs: Int, mailboxType: MailboxType) =
ThreadPoolConfigDispatcherBuilder(config
new ExecutorBasedEventDrivenDispatcher(name, throughput, throughputDeadlineMs, mailboxType, config), ThreadPoolConfig())
new Dispatcher(name, throughput, throughputDeadlineMs, mailboxType, config), ThreadPoolConfig())
/**
* Creates a executor-based event-driven dispatcher, with work-stealing, serving multiple (millions) of actors through a thread pool.
* <p/>
* Has a fluent builder interface for configuring its semantics.
*/
def newExecutorBasedEventDrivenWorkStealingDispatcher(name: String) =
ThreadPoolConfigDispatcherBuilder(config new ExecutorBasedEventDrivenWorkStealingDispatcher(name, config), ThreadPoolConfig())
def newBalancingDispatcher(name: String) =
ThreadPoolConfigDispatcherBuilder(config new BalancingDispatcher(name, config), ThreadPoolConfig())
/**
* Creates a executor-based event-driven dispatcher, with work-stealing, serving multiple (millions) of actors through a thread pool.
* <p/>
* Has a fluent builder interface for configuring its semantics.
*/
def newExecutorBasedEventDrivenWorkStealingDispatcher(name: String, throughput: Int) =
def newBalancingDispatcher(name: String, throughput: Int) =
ThreadPoolConfigDispatcherBuilder(config
new ExecutorBasedEventDrivenWorkStealingDispatcher(name, throughput, THROUGHPUT_DEADLINE_TIME_MILLIS, MAILBOX_TYPE, config), ThreadPoolConfig())
new BalancingDispatcher(name, throughput, THROUGHPUT_DEADLINE_TIME_MILLIS, MAILBOX_TYPE, config), ThreadPoolConfig())
/**
* Creates a executor-based event-driven dispatcher, with work-stealing, serving multiple (millions) of actors through a thread pool.
* <p/>
* Has a fluent builder interface for configuring its semantics.
*/
def newExecutorBasedEventDrivenWorkStealingDispatcher(name: String, throughput: Int, mailboxType: MailboxType) =
def newBalancingDispatcher(name: String, throughput: Int, mailboxType: MailboxType) =
ThreadPoolConfigDispatcherBuilder(config
new ExecutorBasedEventDrivenWorkStealingDispatcher(name, throughput, THROUGHPUT_DEADLINE_TIME_MILLIS, mailboxType, config), ThreadPoolConfig())
new BalancingDispatcher(name, throughput, THROUGHPUT_DEADLINE_TIME_MILLIS, mailboxType, config), ThreadPoolConfig())
/**
* Creates a executor-based event-driven dispatcher, with work-stealing, serving multiple (millions) of actors through a thread pool.
* <p/>
* Has a fluent builder interface for configuring its semantics.
*/
def newExecutorBasedEventDrivenWorkStealingDispatcher(name: String, throughput: Int, throughputDeadlineMs: Int, mailboxType: MailboxType) =
def newBalancingDispatcher(name: String, throughput: Int, throughputDeadlineMs: Int, mailboxType: MailboxType) =
ThreadPoolConfigDispatcherBuilder(config
new ExecutorBasedEventDrivenWorkStealingDispatcher(name, throughput, throughputDeadlineMs, mailboxType, config), ThreadPoolConfig())
new BalancingDispatcher(name, throughput, throughputDeadlineMs, mailboxType, config), ThreadPoolConfig())
/**
* Utility function that tries to load the specified dispatcher config from the akka.conf
* or else use the supplied default dispatcher
@ -169,7 +181,7 @@ object Dispatchers {
* executor-bounds = -1 # Makes the Executor bounded, -1 is unbounded
* allow-core-timeout = on # Allow core threads to time out
* rejection-policy = "caller-runs" # abort, caller-runs, discard-oldest, discard
* throughput = 5 # Throughput for ExecutorBasedEventDrivenDispatcher
* throughput = 5 # Throughput for Dispatcher
* }
* ex: from(config.getConfigMap(identifier).get)
*
@ -180,9 +192,9 @@ object Dispatchers {
*/
def from(cfg: Configuration): Option[MessageDispatcher] = {
cfg.getString("type") map {
case "ExecutorBasedEventDriven" new ExecutorBasedEventDrivenDispatcherConfigurator()
case "ExecutorBasedEventDrivenWorkStealing" new ExecutorBasedEventDrivenWorkStealingDispatcherConfigurator()
case "GlobalExecutorBasedEventDriven" GlobalExecutorBasedEventDrivenDispatcherConfigurator
case "Dispatcher" new DispatcherConfigurator()
case "BalancingDispatcher" new BalancingDispatcherConfigurator()
case "GlobalDispatcher" GlobalDispatcherConfigurator
case fqn
ReflectiveAccess.getClassFor[MessageDispatcherConfigurator](fqn) match {
case r: Right[_, Class[MessageDispatcherConfigurator]]
@ -200,13 +212,13 @@ object Dispatchers {
}
}
object GlobalExecutorBasedEventDrivenDispatcherConfigurator extends MessageDispatcherConfigurator {
def configure(config: Configuration): MessageDispatcher = Dispatchers.globalExecutorBasedEventDrivenDispatcher
object GlobalDispatcherConfigurator extends MessageDispatcherConfigurator {
def configure(config: Configuration): MessageDispatcher = Dispatchers.globalDispatcher
}
class ExecutorBasedEventDrivenDispatcherConfigurator extends MessageDispatcherConfigurator {
class DispatcherConfigurator extends MessageDispatcherConfigurator {
def configure(config: Configuration): MessageDispatcher = {
configureThreadPool(config, threadPoolConfig new ExecutorBasedEventDrivenDispatcher(
configureThreadPool(config, threadPoolConfig new Dispatcher(
config.getString("name", newUuid.toString),
config.getInt("throughput", Dispatchers.THROUGHPUT),
config.getInt("throughput-deadline-time", Dispatchers.THROUGHPUT_DEADLINE_TIME_MILLIS),
@ -215,9 +227,9 @@ class ExecutorBasedEventDrivenDispatcherConfigurator extends MessageDispatcherCo
}
}
class ExecutorBasedEventDrivenWorkStealingDispatcherConfigurator extends MessageDispatcherConfigurator {
class BalancingDispatcherConfigurator extends MessageDispatcherConfigurator {
def configure(config: Configuration): MessageDispatcher = {
configureThreadPool(config, threadPoolConfig new ExecutorBasedEventDrivenWorkStealingDispatcher(
configureThreadPool(config, threadPoolConfig new BalancingDispatcher(
config.getString("name", newUuid.toString),
config.getInt("throughput", Dispatchers.THROUGHPUT),
config.getInt("throughput-deadline-time", Dispatchers.THROUGHPUT_DEADLINE_TIME_MILLIS),

View file

@ -20,8 +20,6 @@ import java.lang.{ Iterable ⇒ JIterable }
import java.util.{ LinkedList JLinkedList }
import scala.annotation.tailrec
import scala.collection.generic.CanBuildFrom
import scala.collection.mutable.Builder
import scala.collection.mutable.Stack
class FutureTimeoutException(message: String, cause: Throwable = null) extends AkkaException(message, cause)
@ -56,7 +54,7 @@ object Futures {
* Returns a Future to the result of the first future in the list that is completed
*/
def firstCompletedOf[T](futures: Iterable[Future[T]], timeout: Long = Long.MaxValue): Future[T] = {
val futureResult = new DefaultCompletableFuture[T](timeout)
val futureResult = new DefaultPromise[T](timeout)
val completeFirst: Future[T] Unit = _.value.foreach(futureResult complete _)
for (f futures) f onComplete completeFirst
@ -83,9 +81,9 @@ object Futures {
*/
def fold[T, R](zero: R, timeout: Long = Actor.TIMEOUT)(futures: Iterable[Future[T]])(foldFun: (R, T) R): Future[R] = {
if (futures.isEmpty) {
new AlreadyCompletedFuture[R](Right(zero))
new KeptPromise[R](Right(zero))
} else {
val result = new DefaultCompletableFuture[R](timeout)
val result = new DefaultPromise[R](timeout)
val results = new ConcurrentLinkedQueue[T]()
val allDone = futures.size
@ -135,9 +133,9 @@ object Futures {
*/
def reduce[T, R >: T](futures: Iterable[Future[T]], timeout: Long = Actor.TIMEOUT)(op: (R, T) T): Future[R] = {
if (futures.isEmpty)
new AlreadyCompletedFuture[R](Left(new UnsupportedOperationException("empty reduce left")))
new KeptPromise[R](Left(new UnsupportedOperationException("empty reduce left")))
else {
val result = new DefaultCompletableFuture[R](timeout)
val result = new DefaultPromise[R](timeout)
val seedFound = new AtomicBoolean(false)
val seedFold: Future[T] Unit = f {
if (seedFound.compareAndSet(false, true)) { //Only the first completed should trigger the fold
@ -202,7 +200,7 @@ object Futures {
* in parallel.
*
* def traverse[A, B, M[_] <: Traversable[_]](in: M[A], timeout: Long = Actor.TIMEOUT)(fn: A => Future[B])(implicit cbf: CanBuildFrom[M[A], B, M[B]]): Future[M[B]] =
* in.foldLeft(new DefaultCompletableFuture[Builder[B, M[B]]](timeout).completeWithResult(cbf(in)): Future[Builder[B, M[B]]]) { (fr, a) =>
* in.foldLeft(new DefaultPromise[Builder[B, M[B]]](timeout).completeWithResult(cbf(in)): Future[Builder[B, M[B]]]) { (fr, a) =>
* val fb = fn(a.asInstanceOf[A])
* for (r <- fr; b <-fb) yield (r += b)
* }.map(_.result)
@ -230,7 +228,7 @@ object Future {
/**
* Create an empty Future with default timeout
*/
def empty[T](timeout: Long = Actor.TIMEOUT) = new DefaultCompletableFuture[T](timeout)
def empty[T](timeout: Long = Actor.TIMEOUT) = new DefaultPromise[T](timeout)
import scala.collection.mutable.Builder
import scala.collection.generic.CanBuildFrom
@ -240,7 +238,7 @@ object Future {
* Useful for reducing many Futures into a single Future.
*/
def sequence[A, M[_] <: Traversable[_]](in: M[Future[A]], timeout: Long = Actor.TIMEOUT)(implicit cbf: CanBuildFrom[M[Future[A]], A, M[A]]): Future[M[A]] =
in.foldLeft(new DefaultCompletableFuture[Builder[A, M[A]]](timeout).completeWithResult(cbf(in)): Future[Builder[A, M[A]]])((fr, fa) for (r fr; a fa.asInstanceOf[Future[A]]) yield (r += a)).map(_.result)
in.foldLeft(new DefaultPromise[Builder[A, M[A]]](timeout).completeWithResult(cbf(in)): Future[Builder[A, M[A]]])((fr, fa) for (r fr; a fa.asInstanceOf[Future[A]]) yield (r += a)).map(_.result)
/**
* Transforms a Traversable[A] into a Future[Traversable[B]] using the provided Function A => Future[B].
@ -251,7 +249,7 @@ object Future {
* </pre>
*/
def traverse[A, B, M[_] <: Traversable[_]](in: M[A], timeout: Long = Actor.TIMEOUT)(fn: A Future[B])(implicit cbf: CanBuildFrom[M[A], B, M[B]]): Future[M[B]] =
in.foldLeft(new DefaultCompletableFuture[Builder[B, M[B]]](timeout).completeWithResult(cbf(in)): Future[Builder[B, M[B]]]) { (fr, a)
in.foldLeft(new DefaultPromise[Builder[B, M[B]]](timeout).completeWithResult(cbf(in)): Future[Builder[B, M[B]]]) { (fr, a)
val fb = fn(a.asInstanceOf[A])
for (r fr; b fb) yield (r += b)
}.map(_.result)
@ -267,23 +265,19 @@ object Future {
*
* This allows working with Futures in an imperative style without blocking for each result.
*
* Completing a Future using 'CompletableFuture << Future' will also suspend execution until the
* Completing a Future using 'Promise << Future' will also suspend execution until the
* value of the other Future is available.
*
* The Delimited Continuations compiler plugin must be enabled in order to use this method.
*/
def flow[A](body: A @cps[Future[Any]], timeout: Long = Actor.TIMEOUT): Future[A] = {
val future = Promise[A](timeout)
(reset(future.asInstanceOf[CompletableFuture[Any]].completeWithResult(body)): Future[Any]) onComplete { f
(reset(future.asInstanceOf[Promise[Any]].completeWithResult(body)): Future[Any]) onComplete { f
val opte = f.exception
if (opte.isDefined) future completeWithException (opte.get)
}
future
}
private[akka] val callbacksPendingExecution = new ThreadLocal[Option[Stack[() Unit]]]() {
override def initialValue = None
}
}
sealed trait Future[+T] {
@ -417,7 +411,7 @@ sealed trait Future[+T] {
* </pre>
*/
final def collect[A](pf: PartialFunction[Any, A]): Future[A] = {
val fa = new DefaultCompletableFuture[A](timeoutInNanos, NANOS)
val fa = new DefaultPromise[A](timeoutInNanos, NANOS)
onComplete { ft
val v = ft.value.get
fa complete {
@ -450,7 +444,7 @@ sealed trait Future[+T] {
* </pre>
*/
final def failure[A >: T](pf: PartialFunction[Throwable, A]): Future[A] = {
val fa = new DefaultCompletableFuture[A](timeoutInNanos, NANOS)
val fa = new DefaultPromise[A](timeoutInNanos, NANOS)
onComplete { ft
val opte = ft.exception
fa complete {
@ -482,7 +476,7 @@ sealed trait Future[+T] {
* </pre>
*/
final def map[A](f: T A): Future[A] = {
val fa = new DefaultCompletableFuture[A](timeoutInNanos, NANOS)
val fa = new DefaultPromise[A](timeoutInNanos, NANOS)
onComplete { ft
val optv = ft.value
if (optv.isDefined) {
@ -518,7 +512,7 @@ sealed trait Future[+T] {
* </pre>
*/
final def flatMap[A](f: T Future[A]): Future[A] = {
val fa = new DefaultCompletableFuture[A](timeoutInNanos, NANOS)
val fa = new DefaultPromise[A](timeoutInNanos, NANOS)
onComplete { ft
val optv = ft.value
if (optv.isDefined) {
@ -546,7 +540,7 @@ sealed trait Future[+T] {
}
final def filter(p: Any Boolean): Future[Any] = {
val f = new DefaultCompletableFuture[T](timeoutInNanos, NANOS)
val f = new DefaultPromise[T](timeoutInNanos, NANOS)
onComplete { ft
val optv = ft.value
if (optv.isDefined) {
@ -596,16 +590,19 @@ sealed trait Future[+T] {
object Promise {
def apply[A](timeout: Long): CompletableFuture[A] = new DefaultCompletableFuture[A](timeout)
def apply[A](timeout: Long): Promise[A] = new DefaultPromise[A](timeout)
def apply[A](): CompletableFuture[A] = apply(Actor.TIMEOUT)
def apply[A](): Promise[A] = apply(Actor.TIMEOUT)
private[akka] val callbacksPendingExecution = new ThreadLocal[Option[Stack[() Unit]]]() {
override def initialValue = None
}
}
/**
* Essentially this is the Promise (or write-side) of a Future (read-side).
*/
trait CompletableFuture[T] extends Future[T] {
trait Promise[T] extends Future[T] {
/**
* Completes this Future with the specified result, if not already completed.
* @return this
@ -637,7 +634,7 @@ trait CompletableFuture[T] extends Future[T] {
final def <<(value: T): Future[T] @cps[Future[Any]] = shift { cont: (Future[T] Future[Any]) cont(complete(Right(value))) }
final def <<(other: Future[T]): Future[T] @cps[Future[Any]] = shift { cont: (Future[T] Future[Any])
val fr = new DefaultCompletableFuture[Any](Actor.TIMEOUT)
val fr = new DefaultPromise[Any](Actor.TIMEOUT)
this completeWith other onComplete { f
try {
fr completeWith cont(f)
@ -655,7 +652,7 @@ trait CompletableFuture[T] extends Future[T] {
/**
* The default concrete Future implementation.
*/
class DefaultCompletableFuture[T](timeout: Long, timeunit: TimeUnit) extends CompletableFuture[T] {
class DefaultPromise[T](timeout: Long, timeunit: TimeUnit) extends Promise[T] {
def this() = this(0, MILLIS)
@ -722,7 +719,7 @@ class DefaultCompletableFuture[T](timeout: Long, timeunit: TimeUnit) extends Com
}
}
def complete(value: Either[Throwable, T]): DefaultCompletableFuture[T] = {
def complete(value: Either[Throwable, T]): DefaultPromise[T] = {
_lock.lock
val notifyTheseListeners = try {
if (_value.isEmpty && !isExpired) { //Only complete if we aren't expired
@ -746,7 +743,7 @@ class DefaultCompletableFuture[T](timeout: Long, timeunit: TimeUnit) extends Com
}
}
val pending = Future.callbacksPendingExecution.get
val pending = Promise.callbacksPendingExecution.get
if (pending.isDefined) { //Instead of nesting the calls to the callbacks (leading to stack overflow)
pending.get.push(() { // Linearize/aggregate callbacks at top level and then execute
val doNotify = notifyCompleted _ //Hoist closure to avoid garbage
@ -755,16 +752,16 @@ class DefaultCompletableFuture[T](timeout: Long, timeunit: TimeUnit) extends Com
} else {
try {
val callbacks = Stack[() Unit]() // Allocate new aggregator for pending callbacks
Future.callbacksPendingExecution.set(Some(callbacks)) // Specify the callback aggregator
Promise.callbacksPendingExecution.set(Some(callbacks)) // Specify the callback aggregator
runCallbacks(notifyTheseListeners, callbacks) // Execute callbacks, if they trigger new callbacks, they are aggregated
} finally { Future.callbacksPendingExecution.set(None) } // Ensure cleanup
} finally { Promise.callbacksPendingExecution.set(None) } // Ensure cleanup
}
}
this
}
def onComplete(func: Future[T] Unit): CompletableFuture[T] = {
def onComplete(func: Future[T] Unit): Promise[T] = {
_lock.lock
val notifyNow = try {
if (_value.isEmpty) {
@ -800,10 +797,10 @@ class DefaultCompletableFuture[T](timeout: Long, timeunit: TimeUnit) extends Com
* An already completed Future is seeded with it's result at creation, is useful for when you are participating in
* a Future-composition but you already have a value to contribute.
*/
sealed class AlreadyCompletedFuture[T](suppliedValue: Either[Throwable, T]) extends CompletableFuture[T] {
sealed class KeptPromise[T](suppliedValue: Either[Throwable, T]) extends Promise[T] {
val value = Some(suppliedValue)
def complete(value: Either[Throwable, T]): CompletableFuture[T] = this
def complete(value: Either[Throwable, T]): Promise[T] = this
def onComplete(func: Future[T] Unit): Future[T] = { func(this); this }
def await(atMost: Duration): Future[T] = this
def await: Future[T] = this

View file

@ -19,7 +19,7 @@ import akka.actor._
final case class MessageInvocation(receiver: ActorRef,
message: Any,
sender: Option[ActorRef],
senderFuture: Option[CompletableFuture[Any]]) {
senderFuture: Option[Promise[Any]]) {
if (receiver eq null) throw new IllegalArgumentException("Receiver can't be null")
def invoke() {
@ -32,7 +32,7 @@ final case class MessageInvocation(receiver: ActorRef,
}
}
final case class FutureInvocation[T](future: CompletableFuture[T], function: () T, cleanup: () Unit) extends Runnable {
final case class FutureInvocation[T](future: Promise[T], function: () T, cleanup: () Unit) extends Runnable {
def run() {
future complete (try {
Right(function())
@ -99,7 +99,7 @@ trait MessageDispatcher {
private[akka] final def dispatchFuture[T](block: () T, timeout: Long): Future[T] = {
futures.getAndIncrement()
try {
val future = new DefaultCompletableFuture[T](timeout)
val future = new DefaultPromise[T](timeout)
if (active.isOff)
guard withGuard {

View file

@ -4,31 +4,36 @@
package akka.dispatch
import akka.actor.{ ActorRef }
import akka.util.Duration
import java.util.concurrent.atomic.AtomicReference
import akka.actor.{ Actor, ActorRef }
/**
* Dedicates a unique thread for each actor passed in as reference. Served through its messageQueue.
*
* @author <a href="http://jonasboner.com">Jonas Bon&#233;r</a>
*/
class ThreadBasedDispatcher(_actor: ActorRef, _mailboxType: MailboxType)
extends ExecutorBasedEventDrivenDispatcher(
_actor.uuid.toString, Dispatchers.THROUGHPUT, -1, _mailboxType, ThreadBasedDispatcher.oneThread) {
class PinnedDispatcher(_actor: ActorRef, _name: String, _mailboxType: MailboxType)
extends Dispatcher(
_name, Dispatchers.THROUGHPUT, -1, _mailboxType, PinnedDispatcher.oneThread) {
def this(_name: String, _mailboxType: MailboxType) = this(null, _name, _mailboxType)
def this(_actor: ActorRef, _name: String) = this(_actor, _name, Dispatchers.MAILBOX_TYPE)
def this(_name: String) = this(null, _name, Dispatchers.MAILBOX_TYPE)
def this(_mailboxType: MailboxType) = this(null, "anon", _mailboxType)
def this(_actor: ActorRef, _mailboxType: MailboxType) = this(_actor, _actor.uuid.toString, _mailboxType)
def this(_actor: ActorRef) = this(_actor, _actor.uuid.toString, Dispatchers.MAILBOX_TYPE)
def this() = this(Dispatchers.MAILBOX_TYPE)
private[akka] val owner = new AtomicReference[ActorRef](_actor)
def this(actor: ActorRef) =
this(actor, UnboundedMailbox()) // For Java API
def this(actor: ActorRef, capacity: Int) =
this(actor, BoundedMailbox(capacity)) //For Java API
def this(actor: ActorRef, capacity: Int, pushTimeOut: Duration) = //For Java API
this(actor, BoundedMailbox(capacity, pushTimeOut))
override def register(actorRef: ActorRef) = {
val actor = owner.get()
if ((actor ne null) && actorRef != actor) throw new IllegalArgumentException("Cannot register to anyone but " + actor)
@ -42,7 +47,7 @@ class ThreadBasedDispatcher(_actor: ActorRef, _mailboxType: MailboxType)
}
}
object ThreadBasedDispatcher {
object PinnedDispatcher {
val oneThread: ThreadPoolConfig = ThreadPoolConfig(allowCorePoolTimeout = true, corePoolSize = 1, maxPoolSize = 1)
}

View file

@ -89,7 +89,7 @@ object EventHandler extends ListenerManagement {
class EventHandlerException extends AkkaException
lazy val EventHandlerDispatcher = Dispatchers.newExecutorBasedEventDrivenDispatcher("event:handler").build
lazy val EventHandlerDispatcher = Dispatchers.newDispatcher("event:handler").build
implicit object defaultListenerFormat extends StatelessActorFormat[DefaultListener]

View file

@ -7,7 +7,7 @@ package akka.remoteinterface
import akka.japi.Creator
import akka.actor._
import akka.util._
import akka.dispatch.CompletableFuture
import akka.dispatch.Promise
import akka.serialization._
import akka.AkkaException
@ -300,10 +300,10 @@ trait RemoteClientModule extends RemoteModule { self: RemoteModule ⇒
protected[akka] def send[T](message: Any,
senderOption: Option[ActorRef],
senderFuture: Option[CompletableFuture[T]],
senderFuture: Option[Promise[T]],
remoteAddress: InetSocketAddress,
timeout: Long,
isOneWay: Boolean,
actorRef: ActorRef,
loader: Option[ClassLoader]): Option[CompletableFuture[T]]
loader: Option[ClassLoader]): Option[Promise[T]]
}

View file

@ -10,6 +10,8 @@ import akka.actor.Actor._
import akka.actor.ActorRef
import scala.collection.JavaConversions._
import scala.collection.immutable.Seq
import java.util.concurrent.atomic.AtomicReference
import annotation.tailrec
object Routing {
@ -80,18 +82,27 @@ trait InfiniteIterator[T] extends Iterator[T] {
case class CyclicIterator[T](val items: Seq[T]) extends InfiniteIterator[T] {
def this(items: java.util.List[T]) = this(items.toList)
@volatile
private[this] var current: Seq[T] = items
private[this] val current: AtomicReference[Seq[T]] = new AtomicReference(items)
def hasNext = items != Nil
def next = {
val nc = if (current == Nil) items else current
current = nc.tail
nc.head
def next: T = {
@tailrec
def findNext: T = {
val currentItems = current.get
val newItems = currentItems match {
case Nil items
case xs xs
}
if (current.compareAndSet(currentItems, newItems.tail)) newItems.head
else findNext
}
findNext
}
override def exists(f: T Boolean): Boolean = items.exists(f)
override def exists(f: T Boolean): Boolean = items exists f
}
/**

View file

@ -4,7 +4,7 @@
package akka.util
import akka.dispatch.{ Future, CompletableFuture, MessageInvocation }
import akka.dispatch.{ Future, Promise, MessageInvocation }
import akka.config.{ Config, ModuleNotAvailableException }
import akka.remoteinterface.RemoteSupport
import akka.actor._

View file

@ -227,22 +227,19 @@ object Cluster {
properties = properties + property
}
private def nodename: String = {
val overridden = properties.get("akka.cluster.nodename")
if (overridden.isDefined) overridden.get
else Config.nodename
private def nodename: String = properties.get("akka.cluster.nodename") match {
case Some(uberride) uberride
case None Config.nodename
}
private def hostname: String = {
val overridden = properties.get("akka.cluster.hostname")
if (overridden.isDefined) overridden.get
else Config.hostname
private def hostname: String = properties.get("akka.cluster.hostname") match {
case Some(uberride) uberride
case None Config.hostname
}
private def port: Int = {
val overridden = properties.get("akka.cluster.port")
if (overridden.isDefined) overridden.get.toInt
else Config.remoteServerPort
private def port: Int = properties.get("akka.cluster.port") match {
case Some(uberride) uberride.toInt
case None Config.remoteServerPort
}
val defaultSerializer = new SerializableSerializer
@ -958,7 +955,9 @@ class ClusterNode private[akka] (
*/
def uuidsForActorAddress(actorAddress: String): Array[UUID] = if (isConnected.isOn) {
try {
zkClient.getChildren(actorAddressToUuidsPathFor(actorAddress)).toList.map(new UUID(_)).toArray.asInstanceOf[Array[UUID]]
zkClient.getChildren(actorAddressToUuidsPathFor(actorAddress)).toArray map {
case c: CharSequence new UUID(c)
}
} catch {
case e: ZkNoNodeException Array[UUID]()
}
@ -969,7 +968,7 @@ class ClusterNode private[akka] (
*/
def nodesForActorsInUseWithUuid(uuid: UUID): Array[String] = if (isConnected.isOn) {
try {
zkClient.getChildren(actorLocationsPathFor(uuid)).toList.toArray.asInstanceOf[Array[String]]
zkClient.getChildren(actorLocationsPathFor(uuid)).toArray.asInstanceOf[Array[String]]
} catch {
case e: ZkNoNodeException Array[String]()
}
@ -982,8 +981,7 @@ class ClusterNode private[akka] (
flatten {
actorUuidsForActorAddress(address) map { uuid
try {
val list = zkClient.getChildren(actorLocationsPathFor(uuid))
list.toList.toArray.asInstanceOf[Array[String]]
zkClient.getChildren(actorLocationsPathFor(uuid)).toArray.asInstanceOf[Array[String]]
} catch {
case e: ZkNoNodeException Array[String]()
}
@ -996,7 +994,9 @@ class ClusterNode private[akka] (
*/
def uuidsForActorsInUseOnNode(nodeName: String): Array[UUID] = if (isConnected.isOn) {
try {
zkClient.getChildren(actorsAtNodePathFor(nodeName)).toList.map(new UUID(_)).toArray.asInstanceOf[Array[UUID]]
zkClient.getChildren(actorsAtNodePathFor(nodeName)).toArray map {
case c: CharSequence new UUID(c)
}
} catch {
case e: ZkNoNodeException Array[UUID]()
}
@ -1008,7 +1008,9 @@ class ClusterNode private[akka] (
def addressesForActorsInUseOnNode(nodeName: String): Array[String] = if (isConnected.isOn) {
val uuids =
try {
zkClient.getChildren(actorsAtNodePathFor(nodeName)).toList.map(new UUID(_)).toArray.asInstanceOf[Array[UUID]]
zkClient.getChildren(actorsAtNodePathFor(nodeName)).toArray map {
case c: CharSequence new UUID(c)
}
} catch {
case e: ZkNoNodeException Array[UUID]()
}
@ -1024,11 +1026,10 @@ class ClusterNode private[akka] (
zkClient.readData(actorRegistryFormatPathFor(uuid), new Stat).asInstanceOf[Serializer]
}
val format = formats.head
if (formats.isEmpty) throw new IllegalStateException("No Serializer found for [%s]".format(actorAddress))
if (formats map (_ == format) exists (_ == false)) throw new IllegalStateException(
"Multiple Serializer classes found for [%s]".format(actorAddress))
format
if (formats.forall(_ == formats.head) == false) throw new IllegalStateException("Multiple Serializer classes found for [%s]".format(actorAddress))
formats.head
}
/**
@ -1126,9 +1127,7 @@ class ClusterNode private[akka] (
}
}
}) match {
case Left(_) {
/* do nothing */
}
case Left(_) /* do nothing */
case Right(exception) throw exception
}
}
@ -1429,23 +1428,15 @@ class ClusterNode private[akka] (
import Cluster._
override def start() {
self.start()
}
override def start(): Unit = self.start()
override def stop() {
self.shutdown()
}
override def stop(): Unit = self.shutdown()
override def disconnect() = self.disconnect()
override def reconnect() {
self.reconnect()
}
override def reconnect(): Unit = self.reconnect()
override def resign() {
self.resign()
}
override def resign(): Unit = self.resign()
override def isConnected = self.isConnected.isOn
@ -1479,15 +1470,11 @@ class ClusterNode private[akka] (
override def getAddressesForActorsInUseOnNode(nodeName: String) = self.addressesForActorsInUseOnNode(nodeName).map(_.toString).toArray
override def setConfigElement(key: String, value: String) {
self.setConfigElement(key, value.getBytes("UTF-8"))
}
override def setConfigElement(key: String, value: String): Unit = self.setConfigElement(key, value.getBytes("UTF-8"))
override def getConfigElement(key: String) = new String(self.getConfigElement(key), "UTF-8")
override def removeConfigElement(key: String) {
self.removeConfigElement(key)
}
override def removeConfigElement(key: String): Unit = self.removeConfigElement(key)
override def getConfigElementKeys = self.getConfigElementKeys.toArray
}
@ -1580,7 +1567,7 @@ object RemoteClusterDaemon {
val ADDRESS = "akka-cluster-daemon".intern
// FIXME configure functionServerDispatcher to what?
val functionServerDispatcher = Dispatchers.newExecutorBasedEventDrivenDispatcher("akka:cloud:cluster:function:server").build
val functionServerDispatcher = Dispatchers.newDispatcher("akka:cloud:cluster:function:server").build
}
/**
@ -1591,7 +1578,7 @@ class RemoteClusterDaemon(cluster: ClusterNode) extends Actor {
import RemoteClusterDaemon._
import Cluster._
self.dispatcher = Dispatchers.newThreadBasedDispatcher(self)
self.dispatcher = Dispatchers.newPinnedDispatcher(self)
def receive: Receive = {
case message: RemoteDaemonMessageProtocol
@ -1664,8 +1651,8 @@ class RemoteClusterDaemon(cluster: ClusterNode) extends Actor {
self.dispatcher = functionServerDispatcher
def receive = {
case t: Tuple2[Function1[Any, Unit], Any] try {
t._1(t._2)
case (fun: Function[Any, Unit], param: Any) try {
fun(param)
} finally {
self.stop()
}
@ -1677,8 +1664,8 @@ class RemoteClusterDaemon(cluster: ClusterNode) extends Actor {
self.dispatcher = functionServerDispatcher
def receive = {
case t: Tuple2[Function1[Any, Any], Any] try {
self.reply(t._1(t._2))
case (fun: Function[Any, Unit], param: Any) try {
self.reply(fun(param))
} finally {
self.stop()
}

View file

@ -8,7 +8,7 @@ import Cluster._
import akka.actor._
import akka.actor.Actor._
import akka.event.EventHandler
import akka.dispatch.CompletableFuture
import akka.dispatch.Promise
import java.net.InetSocketAddress
import java.util.concurrent.atomic.AtomicReference
@ -29,43 +29,32 @@ class ClusterActorRef private[akka] (
EventHandler.debug(this, "Creating a ClusterActorRef for actor with address [%s]".format(address))
private[akka] val addresses = new AtomicReference[Map[InetSocketAddress, ActorRef]](
createConnections(actorAddresses))
(Map[InetSocketAddress, ActorRef]() /: actorAddresses) {
case (map, (uuid, address)) map + (address -> createRemoteActorRef(uuid, address))
})
def connections: Map[InetSocketAddress, ActorRef] = addresses.get.toMap
def connections: Map[InetSocketAddress, ActorRef] = addresses.get
override def postMessageToMailbox(message: Any, senderOption: Option[ActorRef]) {
override def postMessageToMailbox(message: Any, senderOption: Option[ActorRef]): Unit =
route(message)(senderOption)
}
override def postMessageToMailboxAndCreateFutureResultWithTimeout[T](
message: Any,
timeout: Long,
senderOption: Option[ActorRef],
senderFuture: Option[CompletableFuture[T]]): CompletableFuture[T] =
route[T](message, timeout)(senderOption).asInstanceOf[CompletableFuture[T]]
senderFuture: Option[Promise[T]]): Promise[T] =
route[T](message, timeout)(senderOption).asInstanceOf[Promise[T]]
private[akka] def failOver(from: InetSocketAddress, to: InetSocketAddress) {
addresses set (addresses.get map {
case (address, actorRef)
if (address == from) {
actorRef.stop()
(to, createRemoteActorRef(actorRef.uuid, to))
} else (address, actorRef)
case (`from`, actorRef)
actorRef.stop()
(to, createRemoteActorRef(actorRef.uuid, to))
case other other
})
}
private def createConnections(addresses: Array[Tuple2[UUID, InetSocketAddress]]): Map[InetSocketAddress, ActorRef] = {
var connections = Map.empty[InetSocketAddress, ActorRef]
addresses foreach {
case (uuid, address)
connections = connections + (address -> createRemoteActorRef(uuid, address))
}
connections
}
private def createRemoteActorRef(uuid: UUID, address: InetSocketAddress) = {
RemoteActorRef(
UUID_PREFIX + uuidToString(uuid), // clustered refs are always registered and looked up by UUID
Actor.TIMEOUT, None)
}
// clustered refs are always registered and looked up by UUID
private def createRemoteActorRef(uuid: UUID, address: InetSocketAddress) =
RemoteActorRef(UUID_PREFIX + uuidToString(uuid), Actor.TIMEOUT, None)
}

View file

@ -86,7 +86,7 @@ class ReplicatedActorRef private[akka] (actorRef: ActorRef, val address: String)
message: Any,
timeout: Long,
senderOption: Option[ActorRef],
senderFuture: Option[CompletableFuture[T]]): CompletableFuture[T] = actorRef.postMessageToMailboxAndCreateFutureResultWithTimeout(message, timeout, senderOption, senderFuture)
senderFuture: Option[Promise[T]]): Promise[T] = actorRef.postMessageToMailboxAndCreateFutureResultWithTimeout(message, timeout, senderOption, senderFuture)
protected[akka] def actorInstance: AtomicReference[Actor] = actorRef.actorInstance
protected[akka] def supervisor_=(sup: Option[ActorRef]) {
actorRef.supervisor_=(sup)

View file

@ -14,6 +14,8 @@ import akka.AkkaException
import java.net.InetSocketAddress
import com.eaio.uuid.UUID
import annotation.tailrec
import java.util.concurrent.atomic.AtomicReference
class RoutingException(message: String) extends AkkaException(message)
@ -53,76 +55,67 @@ object Router {
trait Router {
def connections: Map[InetSocketAddress, ActorRef]
def route(message: Any)(implicit sender: Option[ActorRef])
def route(message: Any)(implicit sender: Option[ActorRef]): Unit
def route[T](message: Any, timeout: Long)(implicit sender: Option[ActorRef]): Future[T]
}
/**
* @author <a href="http://jonasboner.com">Jonas Bon&#233;r</a>
*/
trait Direct extends Router {
lazy val connection: Option[ActorRef] = {
if (connections.size == 0) throw new IllegalStateException("DirectRouter need a single replica connection found [0]")
connections.toList.map({ case (address, actor) actor }).headOption
trait BasicRouter extends Router {
def route(message: Any)(implicit sender: Option[ActorRef]): Unit = next match {
case Some(actor) actor.!(message)(sender)
case _ throw new RoutingException("No node connections for router")
}
def route(message: Any)(implicit sender: Option[ActorRef]) {
if (connection.isDefined) connection.get.!(message)(sender)
else throw new RoutingException("No node connections for router")
def route[T](message: Any, timeout: Long)(implicit sender: Option[ActorRef]): Future[T] = next match {
case Some(actor) actor.!!!(message, timeout)(sender)
case _ throw new RoutingException("No node connections for router")
}
def route[T](message: Any, timeout: Long)(implicit sender: Option[ActorRef]): Future[T] =
if (connection.isDefined) connection.get.!!!(message, timeout)(sender)
else throw new RoutingException("No node connections for router")
protected def next: Option[ActorRef]
}
/**
* @author <a href="http://jonasboner.com">Jonas Bon&#233;r</a>
*/
trait Random extends Router {
trait Direct extends BasicRouter {
lazy val next: Option[ActorRef] = connections.values.headOption
}
/**
* @author <a href="http://jonasboner.com">Jonas Bon&#233;r</a>
*/
trait Random extends BasicRouter {
private val random = new java.util.Random(System.currentTimeMillis)
def route(message: Any)(implicit sender: Option[ActorRef]) {
if (next.isDefined) next.get.!(message)(sender)
else throw new RoutingException("No node connections for router")
}
def route[T](message: Any, timeout: Long)(implicit sender: Option[ActorRef]): Future[T] =
if (next.isDefined) next.get.!!!(message, timeout)(sender)
else throw new RoutingException("No node connections for router")
private def next: Option[ActorRef] = {
val nrOfConnections = connections.size
if (nrOfConnections == 0) None
else Some(connections.toArray.apply(random.nextInt(nrOfConnections))._2)
}
def next: Option[ActorRef] =
if (connections.isEmpty) None
else Some(connections.valuesIterator.drop(random.nextInt(connections.size)).next)
}
/**
* @author <a href="http://jonasboner.com">Jonas Bon&#233;r</a>
*/
trait RoundRobin extends Router {
private def items: List[ActorRef] = connections.toList.map({ case (address, actor) actor })
trait RoundRobin extends BasicRouter {
private def items: List[ActorRef] = connections.values.toList
@volatile
private var current = items
private val current = new AtomicReference[List[ActorRef]](items)
def route(message: Any)(implicit sender: Option[ActorRef]) {
if (next.isDefined) next.get.!(message)(sender)
else throw new RoutingException("No node connections for router")
}
private def hasNext = connections.nonEmpty
def route[T](message: Any, timeout: Long)(implicit sender: Option[ActorRef]): Future[T] =
if (next.isDefined) next.get.!!!(message, timeout)(sender)
else throw new RoutingException("No node connections for router")
def next: Option[ActorRef] = {
@tailrec
def findNext: Option[ActorRef] = {
val currentItems = current.get
val newItems = currentItems match {
case Nil items
case xs xs
}
private def hasNext = items != Nil
if (current.compareAndSet(currentItems, newItems.tail)) newItems.headOption
else findNext
}
private def next: Option[ActorRef] = {
val rest = if (current == Nil) items else current
current = rest.tail
rest.headOption
findNext
}
}
}

View file

@ -13,7 +13,7 @@ import akka.config._
import Config._
import akka.util._
import akka.event.EventHandler
import akka.dispatch.{ DefaultCompletableFuture, CompletableFuture }
import akka.dispatch.{ DefaultPromise, Promise }
import akka.AkkaException
import akka.cluster.zookeeper._
@ -140,7 +140,7 @@ class TransactionLog private (ledger: LedgerHandle, val id: String, val isAsync:
"Reading entries [%s -> %s] for log [%s]".format(from, to, logId))
if (isAsync) {
val future = new DefaultCompletableFuture[Vector[Array[Byte]]](timeout)
val future = new DefaultPromise[Vector[Array[Byte]]](timeout)
ledger.asyncReadEntries(
from, to,
new AsyncCallback.ReadCallback {
@ -149,7 +149,7 @@ class TransactionLog private (ledger: LedgerHandle, val id: String, val isAsync:
ledgerHandle: LedgerHandle,
enumeration: Enumeration[LedgerEntry],
ctx: AnyRef) {
val future = ctx.asInstanceOf[CompletableFuture[Vector[Array[Byte]]]]
val future = ctx.asInstanceOf[Promise[Vector[Array[Byte]]]]
var entries = Vector[Array[Byte]]()
while (enumeration.hasMoreElements) {
entries = entries :+ enumeration.nextElement.getEntry
@ -362,7 +362,7 @@ object TransactionLog {
if (zkClient.exists(txLogPath)) throw new ReplicationException(
"Transaction log for UUID [" + id + "] already exists")
val future = new DefaultCompletableFuture[LedgerHandle](timeout)
val future = new DefaultPromise[LedgerHandle](timeout)
if (isAsync) {
bookieClient.asyncCreateLedger(
ensembleSize, quorumSize, digestType, password,
@ -371,7 +371,7 @@ object TransactionLog {
returnCode: Int,
ledgerHandle: LedgerHandle,
ctx: AnyRef) {
val future = ctx.asInstanceOf[CompletableFuture[LedgerHandle]]
val future = ctx.asInstanceOf[Promise[LedgerHandle]]
if (returnCode == BKException.Code.OK) future.completeWithResult(ledgerHandle)
else future.completeWithException(BKException.create(returnCode))
}
@ -422,7 +422,7 @@ object TransactionLog {
val ledger = try {
if (isAsync) {
val future = new DefaultCompletableFuture[LedgerHandle](timeout)
val future = new DefaultPromise[LedgerHandle](timeout)
bookieClient.asyncOpenLedger(
logId, digestType, password,
new AsyncCallback.OpenCallback {
@ -430,7 +430,7 @@ object TransactionLog {
returnCode: Int,
ledgerHandle: LedgerHandle,
ctx: AnyRef) {
val future = ctx.asInstanceOf[CompletableFuture[LedgerHandle]]
val future = ctx.asInstanceOf[Promise[LedgerHandle]]
if (returnCode == BKException.Code.OK) future.completeWithResult(ledgerHandle)
else future.completeWithException(BKException.create(returnCode))
}
@ -447,7 +447,7 @@ object TransactionLog {
TransactionLog(ledger, id, isAsync)
}
private[akka] def await[T](future: CompletableFuture[T]): T = {
private[akka] def await[T](future: Promise[T]): T = {
future.await
if (future.result.isDefined) future.result.get
else if (future.exception.isDefined) handleError(future.exception.get)

View file

@ -1,146 +0,0 @@
/**
* Copyright (C) 2009-2011 Scalable Solutions AB <http://scalablesolutions.se>
*/
package akka.cluster
import org.scalatest.WordSpec
import org.scalatest.matchers.MustMatchers
import org.scalatest.{ BeforeAndAfterAll, BeforeAndAfterEach }
import akka.cluster.zookeeper._
import org.I0Itec.zkclient._
object MultiNodeTest {
val NrOfNodes = 2
val ClusterName = "test-cluster"
val DataPath = "_akka_cluster/data"
val LogPath = "_akka_cluster/log"
}
trait MultiNodeTest extends WordSpec with MustMatchers with BeforeAndAfterAll with BeforeAndAfterEach {
import MultiNodeTest._
val nodeNr = nodeNumber
val port = 9000 + nodeNumber
var zkServer: ZkServer = _
var zkClient: ZkClient = _
def nodeNumber: Int
def createNode = Cluster.node
def barrier(name: String) = ZooKeeperBarrier(zkClient, ClusterName, name, "node-" + nodeNr, NrOfNodes)
override def beforeAll() = {
if (nodeNr == 1) zkServer = Cluster.startLocalCluster(DataPath, LogPath)
zkClient = Cluster.newZkClient
}
override def beforeEach() = {
// if (nodeNr == 1) Cluster.reset
}
override def afterAll() = {
zkClient.close
if (nodeNr == 1) Cluster.shutdownLocalCluster
}
}
class ClusterMultiJvmNode1 extends MultiNodeTest {
def nodeNumber = 1
"A cluster" should {
"be able to start and stop - one node" in {
Cluster setProperty ("akka.cluster.nodename" -> "node1")
Cluster setProperty ("akka.cluster.port" -> "9991")
import Cluster.node
barrier("start-stop") {
node.start()
Thread.sleep(500)
node.membershipNodes.size must be(1)
// node.stop()
Thread.sleep(500)
// node.membershipNodes.size must be(0)
// node.isRunning must be(false)
}
}
"be able to start and stop - two nodes" in {
import Cluster.node
barrier("start-node1") {
node.start()
Thread.sleep(500)
node.membershipNodes.size must be(1)
}
barrier("start-node2") {
// let node2 start
}
node.membershipNodes.size must be(2)
node.leader must be(node.leaderLock.getId)
barrier("stop-node1") {
// node.stop()
Thread.sleep(500)
// node.isRunning must be(false)
}
barrier("stop-node2") {
// let node2 stop
}
}
}
}
class ClusterMultiJvmNode2 extends MultiNodeTest {
def nodeNumber = 2
"A cluster" should {
"be able to start and stop - one node" in {
Cluster setProperty ("akka.cluster.nodename" -> "node2")
Cluster setProperty ("akka.cluster.port" -> "9992")
barrier("start-stop") {
// let node1 start
}
}
"be able to start and stop - two nodes" in {
import Cluster.node
barrier("start-node1") {
// let node1 start
}
barrier("start-node2") {
node.start()
Thread.sleep(500)
node.membershipNodes.size must be(2)
}
barrier("stop-node1") {
// let node1 stop
}
// node.membershipNodes.size must be(1)
// node.leader must be(node.leaderLock.getId)
barrier("stop-node2") {
// node.stop()
// Thread.sleep(500)
// node.isRunning must be(false)
}
}
}
}

View file

@ -0,0 +1 @@
test.name = "node1"

View file

@ -0,0 +1 @@
-Dakka.cluster.nodename=node1 -Dakka.cluster.port=9991

View file

@ -0,0 +1 @@
test.name = "node2"

View file

@ -0,0 +1 @@
-Dakka.cluster.nodename=node2 -Dakka.cluster.port=9992

View file

@ -0,0 +1,88 @@
/**
* Copyright (C) 2009-2011 Scalable Solutions AB <http://scalablesolutions.se>
*/
package akka.cluster.multi
import org.scalatest.WordSpec
import org.scalatest.matchers.MustMatchers
import org.scalatest.BeforeAndAfterAll
import akka.cluster._
object ClusterMultiJvmSpec {
val NrOfNodes = 2
}
class ClusterMultiJvmNode1 extends WordSpec with MustMatchers with BeforeAndAfterAll {
import ClusterMultiJvmSpec._
override def beforeAll() = {
Cluster.startLocalCluster()
// resetCluster()
}
override def afterAll() = {
Cluster.shutdownLocalCluster()
}
def resetCluster(): Unit = {
import akka.cluster.zookeeper._
import akka.util.Helpers.ignore
import org.I0Itec.zkclient.exception.ZkNoNodeException
val zkClient = Cluster.newZkClient
ignore[ZkNoNodeException](zkClient.deleteRecursive("/" + Cluster.name))
ignore[ZkNoNodeException](zkClient.deleteRecursive(ZooKeeperBarrier.BarriersNode))
zkClient.close
}
"A cluster" must {
"have jvm options" in {
System.getProperty("akka.cluster.nodename", "") must be("node1")
System.getProperty("akka.cluster.port", "") must be("9991")
akka.config.Config.config.getString("test.name", "") must be("node1")
}
"be able to start all nodes" in {
Cluster.node.barrier("start", NrOfNodes) {
// Cluster.node.start()
}
// Cluster.node.isRunning must be(true)
}
"be able to shutdown all nodes" in {
Cluster.node.barrier("shutdown", NrOfNodes) {
// Cluster.node.shutdown()
}
// Cluster.node.isRunning must be(false)
}
}
}
class ClusterMultiJvmNode2 extends WordSpec with MustMatchers {
import ClusterMultiJvmSpec._
"A cluster" must {
"have jvm options" in {
System.getProperty("akka.cluster.nodename", "") must be("node2")
System.getProperty("akka.cluster.port", "") must be("9992")
akka.config.Config.config.getString("test.name", "") must be("node2")
}
"be able to start all nodes" in {
Cluster.node.barrier("start", NrOfNodes) {
// Cluster.node.start()
}
// Cluster.node.isRunning must be(true)
}
"be able to shutdown all nodes" in {
Cluster.node.barrier("shutdown", NrOfNodes) {
// Cluster.node.shutdown()
}
// Cluster.node.isRunning must be(false)
}
}
}

View file

@ -59,13 +59,13 @@ actor is oblivious to which type of mailbox it is using. Here is an example::
or for a thread-based durable dispatcher::
self.dispatcher = DurableThreadBasedDispatcher(
self.dispatcher = DurablePinnedDispatcher(
self,
FileDurableMailboxStorage)
There are 2 different durable dispatchers, ``DurableEventBasedDispatcher`` and
``DurableThreadBasedDispatcher``, which are durable versions of
``ExecutorBasedEventDrivenDispatcher`` and ``ThreadBasedDispatcher``.
``DurablePinnedDispatcher``, which are durable versions of
``Dispatcher`` and ``PinnedDispatcher``.
This gives you an excellent way of creating bulkheads in your application, where
groups of actors sharing the same dispatcher also share the same backing
@ -120,7 +120,7 @@ Here is an example of how you can configure your dispatcher to use this mailbox:
or for a thread-based durable dispatcher::
self.dispatcher = DurableThreadBasedDispatcher(
self.dispatcher = DurablePinnedDispatcher(
self,
RedisDurableMailboxStorage)
@ -164,7 +164,7 @@ Here is an example of how you can configure your dispatcher to use this mailbox:
or for a thread-based durable dispatcher::
self.dispatcher = DurableThreadBasedDispatcher(
self.dispatcher = DurablePinnedDispatcher(
self,
ZooKeeperDurableMailboxStorage)
@ -202,7 +202,7 @@ Beanstalk documentation on how to do that. ::
or for a thread-based durable dispatcher. ::
self.dispatcher = DurableThreadBasedDispatcher(
self.dispatcher = DurablePinnedDispatcher(
self,
BeanstalkDurableMailboxStorage)

View file

@ -19,7 +19,7 @@ You can specify JVM options for the forked JVMs::
There are two sbt commands: ``multi-jvm-run`` for running applications and
``multi-jvm-test`` for running ScalaTest tests.
The ``MultiJvmTests`` trait resides in the ``project/build`` directory.
The ``MultiJvmTests`` trait resides in the ``project/build`` directory.
Creating application tests
~~~~~~~~~~~~~~~~~~~~~~~~~~
@ -91,6 +91,51 @@ You can change what the ``MultiJvm`` identifier is. For example, to change it to
Your tests should now be named ``{TestName}ClusterTest{NodeName}``.
Configuration of the JVM instances
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
Setting JVM options
-------------------
You can define specific JVM options for each of the spawned JVMs. You do that by creating
a file named after the node in the test with suffix ``.opts``.
For example, to feed the JVM options ``-Dakka.cluster.nodename=node1`` and
``-Dakka.cluster.port=9991`` to the ``TestMultiJvmNode1`` let's create three ``*.opts`` files
and add the options to them.
``TestMultiJvmNode1.opts``::
-Dakka.cluster.nodename=node1 -Dakka.cluster.port=9991
``TestMultiJvmNode2.opts``::
-Dakka.cluster.nodename=node2 -Dakka.cluster.port=9992
``TestMultiJvmNode3.opts``::
-Dakka.cluster.nodename=node3 -Dakka.cluster.port=9993
Overriding akka.conf options
----------------------------
You can also override the options in the ``akka.conf`` file with different options for each
spawned JVM. You do that by creating a file named after the node in the test with suffix ``.conf``.
For example, to override the configuration option ``akka.cluster.name`` let's create three ``*.conf`` files
and add the option to them.
``TestMultiJvmNode1.conf``::
akka.cluster.name = "test-cluster"
``TestMultiJvmNode2.conf``::
akka.cluster.name = "test-cluster"
``TestMultiJvmNode3.conf``::
akka.cluster.name = "test-cluster"
ScalaTest
~~~~~~~~~

View file

@ -58,7 +58,7 @@ A custom ``akka.conf`` might look like this:
"sample.myservice.Boot"]
actor {
throughput = 10 # Throughput for ExecutorBasedEventDrivenDispatcher, set to 1 for complete fairness
throughput = 10 # Throughput for Dispatcher, set to 1 for complete fairness
}
remote {

View file

@ -8,7 +8,7 @@ Dataflow Concurrency (Java)
Introduction
------------
**IMPORTANT: As of Akka 1.1, Akka Future, CompletableFuture and DefaultCompletableFuture have all the functionality of DataFlowVariables, they also support non-blocking composition and advanced features like fold and reduce, Akka DataFlowVariable is therefor deprecated and will probably resurface in the following release as a DSL on top of Futures.**
**IMPORTANT: As of Akka 1.1, Akka Future, Promise and DefaultPromise have all the functionality of DataFlowVariables, they also support non-blocking composition and advanced features like fold and reduce, Akka DataFlowVariable is therefor deprecated and will probably resurface in the following release as a DSL on top of Futures.**
Akka implements `Oz-style dataflow concurrency <http://www.mozart-oz.org/documentation/tutorial/node8.html#chapter.concurrency>`_ through dataflow (single assignment) variables and lightweight (event-based) processes/threads.

View file

@ -18,7 +18,7 @@ The event-based Actors currently consume ~600 bytes per Actor which means that y
Default dispatcher
------------------
For most scenarios the default settings are the best. Here we have one single event-based dispatcher for all Actors created. The dispatcher used is globalExecutorBasedEventDrivenDispatcher in akka.dispatch.Dispatchers.
For most scenarios the default settings are the best. Here we have one single event-based dispatcher for all Actors created. The dispatcher used is globalDispatcher in akka.dispatch.Dispatchers.
But if you feel that you are starting to contend on the single dispatcher (the 'Executor' and its queue) or want to group a specific set of Actors for a dedicated dispatcher for better flexibility and configurability then you can override the defaults and define your own dispatcher. See below for details on which ones are available and how they can be configured.
@ -59,11 +59,11 @@ Let's now walk through the different dispatchers in more detail.
Thread-based
^^^^^^^^^^^^
The 'ThreadBasedDispatcher' binds a dedicated OS thread to each specific Actor. The messages are posted to a 'LinkedBlockingQueue' which feeds the messages to the dispatcher one by one. A 'ThreadBasedDispatcher' cannot be shared between actors. This dispatcher has worse performance and scalability than the event-based dispatcher but works great for creating "daemon" Actors that consumes a low frequency of messages and are allowed to go off and do their own thing for a longer period of time. Another advantage with this dispatcher is that Actors do not block threads for each other.
The 'PinnedDispatcher' binds a dedicated OS thread to each specific Actor. The messages are posted to a 'LinkedBlockingQueue' which feeds the messages to the dispatcher one by one. A 'PinnedDispatcher' cannot be shared between actors. This dispatcher has worse performance and scalability than the event-based dispatcher but works great for creating "daemon" Actors that consumes a low frequency of messages and are allowed to go off and do their own thing for a longer period of time. Another advantage with this dispatcher is that Actors do not block threads for each other.
.. code-block:: java
Dispatcher dispatcher = Dispatchers.newThreadBasedDispatcher(actorRef);
Dispatcher dispatcher = Dispatchers.newPinnedDispatcher(actorRef);
It would normally by used from within the actor like this:
@ -71,7 +71,7 @@ It would normally by used from within the actor like this:
class MyActor extends UntypedActor {
public MyActor() {
getContext().setDispatcher(Dispatchers.newThreadBasedDispatcher(getContext()));
getContext().setDispatcher(Dispatchers.newPinnedDispatcher(getContext()));
}
...
}
@ -79,7 +79,7 @@ It would normally by used from within the actor like this:
Event-based
^^^^^^^^^^^
The 'ExecutorBasedEventDrivenDispatcher' binds a set of Actors to a thread pool backed up by a 'BlockingQueue'. This dispatcher is highly configurable and supports a fluent configuration API to configure the 'BlockingQueue' (type of queue, max items etc.) as well as the thread pool.
The 'Dispatcher' binds a set of Actors to a thread pool backed up by a 'BlockingQueue'. This dispatcher is highly configurable and supports a fluent configuration API to configure the 'BlockingQueue' (type of queue, max items etc.) as well as the thread pool.
The event-driven dispatchers **must be shared** between multiple Typed Actors and/or Actors. One best practice is to let each top-level Actor, e.g. the Actors you define in the declarative supervisor config, to get their own dispatcher but reuse the dispatcher for each new Actor that the top-level Actor creates. But you can also share dispatcher between multiple top-level Actors. This is very use-case specific and needs to be tried out on a case by case basis. The important thing is that Akka tries to provide you with the freedom you need to design and implement your system in the most efficient way in regards to performance, throughput and latency.
@ -109,7 +109,7 @@ Here is an example:
class MyActor extends UntypedActor {
public MyActor() {
getContext().setDispatcher(Dispatchers.newExecutorBasedEventDrivenDispatcher(name)
getContext().setDispatcher(Dispatchers.newDispatcher(name)
.withNewThreadPoolWithLinkedBlockingQueueWithCapacity(100)
.setCorePoolSize(16)
.setMaxPoolSize(128)
@ -120,7 +120,7 @@ Here is an example:
...
}
This 'ExecutorBasedEventDrivenDispatcher' allows you to define the 'throughput' it should have. This defines the number of messages for a specific Actor the dispatcher should process in one single sweep.
This 'Dispatcher' allows you to define the 'throughput' it should have. This defines the number of messages for a specific Actor the dispatcher should process in one single sweep.
Setting this to a higher number will increase throughput but lower fairness, and vice versa. If you don't specify it explicitly then it uses the default value defined in the 'akka.conf' configuration file:
.. code-block:: xml
@ -136,10 +136,10 @@ Browse the :ref:`scaladoc` or look at the code for all the options available.
Priority event-based
^^^^^^^^^^^^^^^^^^^^
Sometimes it's useful to be able to specify priority order of messages, that is done by using PriorityExecutorBasedEventDrivenDispatcher and supply
Sometimes it's useful to be able to specify priority order of messages, that is done by using PriorityDispatcher and supply
a java.util.Comparator[MessageInvocation] or use a akka.dispatch.PriorityGenerator (recommended):
Creating a PriorityExecutorBasedEventDrivenDispatcher using PriorityGenerator:
Creating a PriorityDispatcher using PriorityGenerator:
.. code-block:: java
@ -168,7 +168,7 @@ Creating a PriorityExecutorBasedEventDrivenDispatcher using PriorityGenerator:
// We create an instance of the actor that will print out the messages it processes
ActorRef ref = Actors.actorOf(MyActor.class);
// We create a new Priority dispatcher and seed it with the priority generator
ref.setDispatcher(new PriorityExecutorBasedEventDrivenDispatcher("foo", gen));
ref.setDispatcher(new PriorityDispatcher("foo", gen));
ref.start(); // Start the actor
ref.getDispatcher().suspend(ref); // Suspending the actor so it doesn't start to treat the messages before we have enqueued all of them :-)
@ -196,14 +196,14 @@ lowpriority
Work-stealing event-based
^^^^^^^^^^^^^^^^^^^^^^^^^
The 'ExecutorBasedEventDrivenWorkStealingDispatcher' is a variation of the 'ExecutorBasedEventDrivenDispatcher' in which Actors of the same type can be set up to share this dispatcher and during execution time the different actors will steal messages from other actors if they have less messages to process. This can be a great way to improve throughput at the cost of a little higher latency.
The 'BalancingDispatcher' is a variation of the 'Dispatcher' in which Actors of the same type can be set up to share this dispatcher and during execution time the different actors will steal messages from other actors if they have less messages to process. This can be a great way to improve throughput at the cost of a little higher latency.
Normally the way you use it is to define a static field to hold the dispatcher and then set in in the Actor explicitly.
.. code-block:: java
class MyActor extends UntypedActor {
public static MessageDispatcher dispatcher = Dispatchers.newExecutorBasedEventDrivenWorkStealingDispatcher(name).build();
public static MessageDispatcher dispatcher = Dispatchers.newBalancingDispatcher(name).build();
public MyActor() {
getContext().setDispatcher(dispatcher);
@ -236,7 +236,7 @@ Per-instance based configuration
You can also do it on a specific dispatcher instance.
For the 'ExecutorBasedEventDrivenDispatcher' and the 'ExecutorBasedWorkStealingDispatcher' you can do it through their constructor
For the 'Dispatcher' and the 'ExecutorBasedWorkStealingDispatcher' you can do it through their constructor
.. code-block:: java
@ -246,13 +246,13 @@ For the 'ExecutorBasedEventDrivenDispatcher' and the 'ExecutorBasedWorkStealingD
Duration pushTimeout = new FiniteDuration(10, TimeUnit.SECONDS);
MailboxType mailboxCapacity = new BoundedMailbox(false, capacity, pushTimeout);
MessageDispatcher dispatcher =
Dispatchers.newExecutorBasedEventDrivenDispatcher(name, throughput, mailboxCapacity).build();
Dispatchers.newDispatcher(name, throughput, mailboxCapacity).build();
getContext().setDispatcher(dispatcher);
}
...
}
For the 'ThreadBasedDispatcher', it is non-shareable between actors, and associates a dedicated Thread with the actor.
For the 'PinnedDispatcher', it is non-shareable between actors, and associates a dedicated Thread with the actor.
Making it bounded (by specifying a capacity) is optional, but if you do, you need to provide a pushTimeout (default is 10 seconds). When trying to send a message to the Actor it will throw a MessageQueueAppendFailedException("BlockingMessageTransferQueue transfer timed out") if the message cannot be added to the mailbox within the time specified by the pushTimeout.
.. code-block:: java
@ -261,7 +261,7 @@ Making it bounded (by specifying a capacity) is optional, but if you do, you nee
public MyActor() {
int mailboxCapacity = 100;
Duration pushTimeout = new FiniteDuration(10, TimeUnit.SECONDS);
getContext().setDispatcher(Dispatchers.newThreadBasedDispatcher(getContext(), mailboxCapacity, pushTimeout));
getContext().setDispatcher(Dispatchers.newPinnedDispatcher(getContext(), mailboxCapacity, pushTimeout));
}
...
}

View file

@ -308,9 +308,9 @@ Reply using the sender future
If a message was sent with the 'sendRequestReply' or 'sendRequestReplyFuture' methods, which both implements request-reply semantics using Future's, then you either have the option of replying using the 'reply' method as above. This method will then resolve the Future. But you can also get a reference to the Future directly and resolve it yourself or if you would like to store it away to resolve it later, or pass it on to some other Actor to resolve it.
The reference to the Future resides in the 'ActorRef' instance and can be retrieved using 'Option<CompletableFuture> getSenderFuture()'.
The reference to the Future resides in the 'ActorRef' instance and can be retrieved using 'Option<Promise> getSenderFuture()'.
CompletableFuture is a future with methods for 'completing the future:
Promise is a future with methods for 'completing the future:
* completeWithResult(..)
* completeWithException(..)

View file

@ -36,7 +36,7 @@ Configuration
actor {
timeout = 5 # default timeout for future based invocations
throughput = 5 # default throughput for ExecutorBasedEventDrivenDispatcher
throughput = 5 # default throughput for Dispatcher
}
...
}

View file

@ -199,7 +199,7 @@ Release 1.0-MILESTONE1
- **FIX** - #420 REST endpoints should be able to be processed in parallel (Viktor Klang)
- **FIX** - #422 Dispatcher config should work for ThreadPoolBuilder-based dispatchers (Viktor Klang)
- **FIX** - #401 ActorRegistry should not leak memory (Viktor Klang)
- **FIX** - #250 Performance optimization for ExecutorBasedEventDrivenDispatcher (Viktor Klang)
- **FIX** - #250 Performance optimization for Dispatcher (Viktor Klang)
- **FIX** - #419 Rename init and shutdown callbacks to preStart and postStop, and remove initTransactionalState (Viktor Klang)
- **FIX** - #346 Make max no of restarts (and within) are now both optional (Viktor Klang)
- **FIX** - #424 Actors self.supervisor not set by the time init() is called when started by startLink() (Viktor Klang)
@ -210,7 +210,7 @@ Release 1.0-MILESTONE1
- **FIX** - Logger.warn now properly works with varargs (Viktor Klang)
- **FIX** - #450 Removed ActorRef lifeCycle boilerplate: Some(LifeCycle(Permanent)) => Permanent (Viktor Klang)
- **FIX** - Moved ActorRef.trapExit into ActorRef.faultHandler and removed Option-boilerplate from faultHandler (Viktor Klang)
- **FIX** - ThreadBasedDispatcher cheaper for idling actors, also benefits from all that is ExecutorBasedEventDrivenDispatcher (Viktor Klang)
- **FIX** - PinnedDispatcher cheaper for idling actors, also benefits from all that is Dispatcher (Viktor Klang)
- **FIX** - Fixing Futures.future, uses Actor.spawn under the hood, specify dispatcher to control where block is executed (Viktor Klang)
- **FIX** - #469 Akka "dist" now uses a root folder to avoid loitering if unzipped in a folder (Viktor Klang)
- **FIX** - Removed ScalaConfig, JavaConfig and rewrote Supervision configuration (Viktor Klang)
@ -224,7 +224,7 @@ Release 1.0-MILESTONE1
- **ADD** - #262 Add Java API for Agent (Viktor Klang)
- **ADD** - #264 Add Java API for Dataflow (Viktor Klang)
- **ADD** - Using JerseySimpleBroadcaster instead of JerseyBroadcaster in AkkaBroadcaster (Viktor Klang)
- **ADD** - #433 Throughput deadline added for ExecutorBasedEventDrivenDispatcher (Viktor Klang)
- **ADD** - #433 Throughput deadline added for Dispatcher (Viktor Klang)
- **ADD** - Add possibility to set default cometSupport in akka.conf (Viktor Klang)
- **ADD** - #451 Added possibility to use akka-http as a standalone REST server (Viktor Klang)
- **ADD** - #446 Added support for Erlang-style receiveTimeout (Viktor Klang)
@ -308,7 +308,7 @@ Release 0.10 - Aug 21 2010
- **ADD** - Java API for the STM (Peter Vlugter)
- **ADD** - #379 Create STM Atomic templates for Java API (Peter Vlugter)
- **ADD** - #270 SBT plugin for Akka (Peter Vlugter)
- **ADD** - #198 support for ThreadBasedDispatcher in Spring config (Michael Kober)
- **ADD** - #198 support for PinnedDispatcher in Spring config (Michael Kober)
- **ADD** - #377 support HawtDispatcher in Spring config (Michael Kober)
- **ADD** - #376 support Spring config for untyped actors (Michael Kober)
- **ADD** - #200 support WorkStealingDispatcher in Spring config (Michael Kober)

View file

@ -331,7 +331,7 @@ Reply using the sender future
If a message was sent with the ``!!`` or ``!!!`` methods, which both implements request-reply semantics using Future's, then you either have the option of replying using the ``reply`` method as above. This method will then resolve the Future. But you can also get a reference to the Future directly and resolve it yourself or if you would like to store it away to resolve it later, or pass it on to some other Actor to resolve it.
The reference to the Future resides in the ``senderFuture: Option[CompletableFuture[_]]`` member field in the ``ActorRef`` class.
The reference to the Future resides in the ``senderFuture: Option[Promise[_]]`` member field in the ``ActorRef`` class.
Here is an example of how it can be used:

View file

@ -22,7 +22,7 @@ For most scenarios the default settings are the best. Here we have one single ev
.. code-block:: scala
Dispatchers.globalExecutorBasedEventDrivenDispatcher
Dispatchers.globalDispatcher
But if you feel that you are starting to contend on the single dispatcher (the 'Executor' and its queue) or want to group a specific set of Actors for a dedicated dispatcher for better flexibility and configurability then you can override the defaults and define your own dispatcher. See below for details on which ones are available and how they can be configured.
@ -61,7 +61,7 @@ Let's now walk through the different dispatchers in more detail.
Thread-based
^^^^^^^^^^^^
The 'ThreadBasedDispatcher' binds a dedicated OS thread to each specific Actor. The messages are posted to a 'LinkedBlockingQueue' which feeds the messages to the dispatcher one by one. A 'ThreadBasedDispatcher' cannot be shared between actors. This dispatcher has worse performance and scalability than the event-based dispatcher but works great for creating "daemon" Actors that consumes a low frequency of messages and are allowed to go off and do their own thing for a longer period of time. Another advantage with this dispatcher is that Actors do not block threads for each other.
The 'PinnedDispatcher' binds a dedicated OS thread to each specific Actor. The messages are posted to a 'LinkedBlockingQueue' which feeds the messages to the dispatcher one by one. A 'PinnedDispatcher' cannot be shared between actors. This dispatcher has worse performance and scalability than the event-based dispatcher but works great for creating "daemon" Actors that consumes a low frequency of messages and are allowed to go off and do their own thing for a longer period of time. Another advantage with this dispatcher is that Actors do not block threads for each other.
It would normally by used from within the actor like this:
@ -69,7 +69,7 @@ It would normally by used from within the actor like this:
class MyActor extends Actor {
public MyActor() {
self.dispatcher = Dispatchers.newThreadBasedDispatcher(self)
self.dispatcher = Dispatchers.newPinnedDispatcher(self)
}
...
}
@ -77,7 +77,7 @@ It would normally by used from within the actor like this:
Event-based
^^^^^^^^^^^
The 'ExecutorBasedEventDrivenDispatcher' binds a set of Actors to a thread pool backed up by a 'BlockingQueue'. This dispatcher is highly configurable and supports a fluent configuration API to configure the 'BlockingQueue' (type of queue, max items etc.) as well as the thread pool.
The 'Dispatcher' binds a set of Actors to a thread pool backed up by a 'BlockingQueue'. This dispatcher is highly configurable and supports a fluent configuration API to configure the 'BlockingQueue' (type of queue, max items etc.) as well as the thread pool.
The event-driven dispatchers **must be shared** between multiple Actors. One best practice is to let each top-level Actor, e.g. the Actors you define in the declarative supervisor config, to get their own dispatcher but reuse the dispatcher for each new Actor that the top-level Actor creates. But you can also share dispatcher between multiple top-level Actors. This is very use-case specific and needs to be tried out on a case by case basis. The important thing is that Akka tries to provide you with the freedom you need to design and implement your system in the most efficient way in regards to performance, throughput and latency.
@ -106,7 +106,7 @@ Here is an example:
import java.util.concurrent.ThreadPoolExecutor.CallerRunsPolicy
class MyActor extends Actor {
self.dispatcher = Dispatchers.newExecutorBasedEventDrivenDispatcher(name)
self.dispatcher = Dispatchers.newDispatcher(name)
.withNewThreadPoolWithLinkedBlockingQueueWithCapacity(100)
.setCorePoolSize(16)
.setMaxPoolSize(128)
@ -116,7 +116,7 @@ Here is an example:
...
}
This 'ExecutorBasedEventDrivenDispatcher' allows you to define the 'throughput' it should have. This defines the number of messages for a specific Actor the dispatcher should process in one single sweep.
This 'Dispatcher' allows you to define the 'throughput' it should have. This defines the number of messages for a specific Actor the dispatcher should process in one single sweep.
Setting this to a higher number will increase throughput but lower fairness, and vice versa. If you don't specify it explicitly then it uses the default value defined in the 'akka.conf' configuration file:
.. code-block:: ruby
@ -132,10 +132,10 @@ Browse the `ScalaDoc <scaladoc>`_ or look at the code for all the options availa
Priority event-based
^^^^^^^^^^^^^^^^^^^^
Sometimes it's useful to be able to specify priority order of messages, that is done by using PriorityExecutorBasedEventDrivenDispatcher and supply
Sometimes it's useful to be able to specify priority order of messages, that is done by using PriorityDispatcher and supply
a java.util.Comparator[MessageInvocation] or use a akka.dispatch.PriorityGenerator (recommended):
Creating a PriorityExecutorBasedEventDrivenDispatcher using PriorityGenerator:
Creating a PriorityDispatcher using PriorityGenerator:
.. code-block:: scala
@ -156,7 +156,7 @@ Creating a PriorityExecutorBasedEventDrivenDispatcher using PriorityGenerator:
})
// We create a new Priority dispatcher and seed it with the priority generator
a.dispatcher = new PriorityExecutorBasedEventDrivenDispatcher("foo", gen)
a.dispatcher = new PriorityDispatcher("foo", gen)
a.start // Start the Actor
a.dispatcher.suspend(a) // Suspending the actor so it doesn't start to treat the messages before we have enqueued all of them :-)
@ -184,14 +184,14 @@ Prints:
Work-stealing event-based
^^^^^^^^^^^^^^^^^^^^^^^^^
The 'ExecutorBasedEventDrivenWorkStealingDispatcher' is a variation of the 'ExecutorBasedEventDrivenDispatcher' in which Actors of the same type can be set up to share this dispatcher and during execution time the different actors will steal messages from other actors if they have less messages to process. This can be a great way to improve throughput at the cost of a little higher latency.
The 'BalancingDispatcher' is a variation of the 'Dispatcher' in which Actors of the same type can be set up to share this dispatcher and during execution time the different actors will steal messages from other actors if they have less messages to process. This can be a great way to improve throughput at the cost of a little higher latency.
Normally the way you use it is to create an Actor companion object to hold the dispatcher and then set in in the Actor explicitly.
.. code-block:: scala
object MyActor {
val dispatcher = Dispatchers.newExecutorBasedEventDrivenWorkStealingDispatcher(name).build
val dispatcher = Dispatchers.newBalancingDispatcher(name).build
}
class MyActor extends Actor {
@ -224,24 +224,24 @@ Per-instance based configuration
You can also do it on a specific dispatcher instance.
For the 'ExecutorBasedEventDrivenDispatcher' and the 'ExecutorBasedWorkStealingDispatcher' you can do it through their constructor
For the 'Dispatcher' and the 'ExecutorBasedWorkStealingDispatcher' you can do it through their constructor
.. code-block:: scala
class MyActor extends Actor {
val mailboxCapacity = BoundedMailbox(capacity = 100)
self.dispatcher = Dispatchers.newExecutorBasedEventDrivenDispatcher(name, throughput, mailboxCapacity).build
self.dispatcher = Dispatchers.newDispatcher(name, throughput, mailboxCapacity).build
...
}
For the 'ThreadBasedDispatcher', it is non-shareable between actors, and associates a dedicated Thread with the actor.
For the 'PinnedDispatcher', it is non-shareable between actors, and associates a dedicated Thread with the actor.
Making it bounded (by specifying a capacity) is optional, but if you do, you need to provide a pushTimeout (default is 10 seconds). When trying to send a message to the Actor it will throw a MessageQueueAppendFailedException("BlockingMessageTransferQueue transfer timed out") if the message cannot be added to the mailbox within the time specified by the pushTimeout.
.. code-block:: scala
class MyActor extends Actor {
import akka.util.duration._
self.dispatcher = Dispatchers.newThreadBasedDispatcher(self, mailboxCapacity = 100,
self.dispatcher = Dispatchers.newPinnedDispatcher(self, mailboxCapacity = 100,
pushTimeOut = 10 seconds)
...
}

View file

@ -52,7 +52,7 @@ case object FileDurableMailboxStorage extends DurableMailboxStorage("akka.a
case object ZooKeeperDurableMailboxStorage extends DurableMailboxStorage("akka.actor.mailbox.ZooKeeperBasedMailbox")
/**
* The durable equivalent of ExecutorBasedEventDrivenDispatcher
* The durable equivalent of Dispatcher
*
* @author <a href="http://jonasboner.com">Jonas Bon&#233;r</a>
*/
@ -62,7 +62,7 @@ case class DurableEventBasedDispatcher(
_throughput: Int = Dispatchers.THROUGHPUT,
_throughputDeadlineTime: Int = Dispatchers.THROUGHPUT_DEADLINE_TIME_MILLIS,
_mailboxType: MailboxType = Dispatchers.MAILBOX_TYPE,
_config: ThreadPoolConfig = ThreadPoolConfig()) extends ExecutorBasedEventDrivenDispatcher(
_config: ThreadPoolConfig = ThreadPoolConfig()) extends Dispatcher(
_name,
_throughput,
_throughputDeadlineTime,
@ -101,14 +101,14 @@ case class DurableEventBasedDispatcher(
}
/**
* The durable equivalent of ThreadBasedDispatcher
* The durable equivalent of PinnedDispatcher
*
* @author <a href="http://jonasboner.com">Jonas Bon&#233;r</a>
*/
case class DurableThreadBasedDispatcher(
case class DurablePinnedDispatcher(
_actor: ActorRef,
_storage: DurableMailboxStorage,
_mailboxType: MailboxType) extends ThreadBasedDispatcher(_actor,_mailboxType) {
_mailboxType: MailboxType) extends PinnedDispatcher(_actor,_mailboxType) {
def this(actor: ActorRef, _storage: DurableMailboxStorage) =
this(actor, _storage, UnboundedMailbox()) // For Java API

View file

@ -37,8 +37,8 @@ abstract class DurableExecutableMailbox(owner: ActorRef) extends MessageQueue wi
EventHandler.debug(this, "Creating %s mailbox [%s]".format(getClass.getName, name))
val dispatcher: ExecutorBasedEventDrivenDispatcher = owner.dispatcher match {
case e: ExecutorBasedEventDrivenDispatcher => e
val dispatcher: Dispatcher = owner.dispatcher match {
case e: Dispatcher => e
case _ => null
}

View file

@ -10,7 +10,8 @@ public final class RemoteProtocol {
}
public enum CommandType
implements com.google.protobuf.ProtocolMessageEnum {
SHUTDOWN(0, 1),
CONNECT(0, 1),
SHUTDOWN(1, 2),
;
@ -18,7 +19,8 @@ public final class RemoteProtocol {
public static CommandType valueOf(int value) {
switch (value) {
case 1: return SHUTDOWN;
case 1: return CONNECT;
case 2: return SHUTDOWN;
default: return null;
}
}
@ -49,7 +51,7 @@ public final class RemoteProtocol {
}
private static final CommandType[] VALUES = {
SHUTDOWN,
CONNECT, SHUTDOWN,
};
public static CommandType valueOf(
com.google.protobuf.Descriptors.EnumValueDescriptor desc) {
@ -680,13 +682,6 @@ public final class RemoteProtocol {
return metadata_.get(index);
}
// optional string cookie = 9;
public static final int COOKIE_FIELD_NUMBER = 9;
private boolean hasCookie;
private java.lang.String cookie_ = "";
public boolean hasCookie() { return hasCookie; }
public java.lang.String getCookie() { return cookie_; }
private void initFields() {
uuid_ = akka.remote.protocol.RemoteProtocol.UuidProtocol.getDefaultInstance();
actorInfo_ = akka.remote.protocol.RemoteProtocol.ActorInfoProtocol.getDefaultInstance();
@ -746,9 +741,6 @@ public final class RemoteProtocol {
for (akka.remote.protocol.RemoteProtocol.MetadataEntryProtocol element : getMetadataList()) {
output.writeMessage(8, element);
}
if (hasCookie()) {
output.writeString(9, getCookie());
}
getUnknownFields().writeTo(output);
}
@ -790,10 +782,6 @@ public final class RemoteProtocol {
size += com.google.protobuf.CodedOutputStream
.computeMessageSize(8, element);
}
if (hasCookie()) {
size += com.google.protobuf.CodedOutputStream
.computeStringSize(9, getCookie());
}
size += getUnknownFields().getSerializedSize();
memoizedSerializedSize = size;
return size;
@ -983,9 +971,6 @@ public final class RemoteProtocol {
}
result.metadata_.addAll(other.metadata_);
}
if (other.hasCookie()) {
setCookie(other.getCookie());
}
this.mergeUnknownFields(other.getUnknownFields());
return this;
}
@ -1075,10 +1060,6 @@ public final class RemoteProtocol {
addMetadata(subBuilder.buildPartial());
break;
}
case 74: {
setCookie(input.readString());
break;
}
}
}
}
@ -1375,27 +1356,6 @@ public final class RemoteProtocol {
return this;
}
// optional string cookie = 9;
public boolean hasCookie() {
return result.hasCookie();
}
public java.lang.String getCookie() {
return result.getCookie();
}
public Builder setCookie(java.lang.String value) {
if (value == null) {
throw new NullPointerException();
}
result.hasCookie = true;
result.cookie_ = value;
return this;
}
public Builder clearCookie() {
result.hasCookie = false;
result.cookie_ = getDefaultInstance().getCookie();
return this;
}
// @@protoc_insertion_point(builder_scope:RemoteMessageProtocol)
}
@ -1450,7 +1410,7 @@ public final class RemoteProtocol {
public akka.remote.protocol.RemoteProtocol.CommandType getCommandType() { return commandType_; }
private void initFields() {
commandType_ = akka.remote.protocol.RemoteProtocol.CommandType.SHUTDOWN;
commandType_ = akka.remote.protocol.RemoteProtocol.CommandType.CONNECT;
}
public final boolean isInitialized() {
if (!hasCommandType) return false;
@ -1729,7 +1689,7 @@ public final class RemoteProtocol {
}
public Builder clearCommandType() {
result.hasCommandType = false;
result.commandType_ = akka.remote.protocol.RemoteProtocol.CommandType.SHUTDOWN;
result.commandType_ = akka.remote.protocol.RemoteProtocol.CommandType.CONNECT;
return this;
}
@ -5710,46 +5670,46 @@ public final class RemoteProtocol {
"\n\024RemoteProtocol.proto\"j\n\022AkkaRemoteProt" +
"ocol\022\'\n\007message\030\001 \001(\0132\026.RemoteMessagePro" +
"tocol\022+\n\013instruction\030\002 \001(\0132\026.RemoteContr" +
"olProtocol\"\277\002\n\025RemoteMessageProtocol\022\033\n\004" +
"olProtocol\"\257\002\n\025RemoteMessageProtocol\022\033\n\004" +
"uuid\030\001 \002(\0132\r.UuidProtocol\022%\n\tactorInfo\030\002" +
" \002(\0132\022.ActorInfoProtocol\022\016\n\006oneWay\030\003 \002(\010" +
"\022!\n\007message\030\004 \001(\0132\020.MessageProtocol\022%\n\te" +
"xception\030\005 \001(\0132\022.ExceptionProtocol\022%\n\016su" +
"pervisorUuid\030\006 \001(\0132\r.UuidProtocol\022\'\n\006sen" +
"der\030\007 \001(\0132\027.RemoteActorRefProtocol\022(\n\010me",
"tadata\030\010 \003(\0132\026.MetadataEntryProtocol\022\016\n\006" +
"cookie\030\t \001(\t\"J\n\025RemoteControlProtocol\022\016\n" +
"\006cookie\030\001 \001(\t\022!\n\013commandType\030\002 \002(\0162\014.Com" +
"mandType\":\n\026RemoteActorRefProtocol\022\017\n\007ad" +
"dress\030\001 \002(\t\022\017\n\007timeout\030\002 \001(\004\"\323\002\n\032Seriali" +
"zedActorRefProtocol\022\033\n\004uuid\030\001 \002(\0132\r.Uuid" +
"Protocol\022\017\n\007address\030\002 \002(\t\022\026\n\016actorClassn" +
"ame\030\003 \002(\t\022\025\n\ractorInstance\030\004 \001(\014\022\033\n\023seri" +
"alizerClassname\030\005 \001(\t\022\017\n\007timeout\030\006 \001(\004\022\026" +
"\n\016receiveTimeout\030\007 \001(\004\022%\n\tlifeCycle\030\010 \001(",
"\0132\022.LifeCycleProtocol\022+\n\nsupervisor\030\t \001(" +
"\0132\027.RemoteActorRefProtocol\022\024\n\014hotswapSta" +
"ck\030\n \001(\014\022(\n\010messages\030\013 \003(\0132\026.RemoteMessa" +
"geProtocol\"g\n\037SerializedTypedActorRefPro" +
"tocol\022-\n\010actorRef\030\001 \002(\0132\033.SerializedActo" +
"rRefProtocol\022\025\n\rinterfaceName\030\002 \002(\t\"r\n\017M" +
"essageProtocol\0225\n\023serializationScheme\030\001 " +
"\002(\0162\030.SerializationSchemeType\022\017\n\007message" +
"\030\002 \002(\014\022\027\n\017messageManifest\030\003 \001(\014\"R\n\021Actor" +
"InfoProtocol\022\033\n\004uuid\030\001 \002(\0132\r.UuidProtoco",
"l\022\017\n\007timeout\030\002 \002(\004\022\017\n\007address\030\003 \001(\t\")\n\014U" +
"uidProtocol\022\014\n\004high\030\001 \002(\004\022\013\n\003low\030\002 \002(\004\"3" +
"\n\025MetadataEntryProtocol\022\013\n\003key\030\001 \002(\t\022\r\n\005" +
"value\030\002 \002(\014\"6\n\021LifeCycleProtocol\022!\n\tlife" +
"Cycle\030\001 \002(\0162\016.LifeCycleType\"1\n\017AddressPr" +
"otocol\022\020\n\010hostname\030\001 \002(\t\022\014\n\004port\030\002 \002(\r\"7" +
"\n\021ExceptionProtocol\022\021\n\tclassname\030\001 \002(\t\022\017" +
"\n\007message\030\002 \002(\t*\033\n\013CommandType\022\014\n\010SHUTDO" +
"WN\020\001*]\n\027SerializationSchemeType\022\010\n\004JAVA\020" +
"\001\022\013\n\007SBINARY\020\002\022\016\n\nSCALA_JSON\020\003\022\r\n\tJAVA_J",
"SON\020\004\022\014\n\010PROTOBUF\020\005*-\n\rLifeCycleType\022\r\n\t" +
"PERMANENT\020\001\022\r\n\tTEMPORARY\020\002B\030\n\024akka.remot" +
"e.protocolH\001"
"tadata\030\010 \003(\0132\026.MetadataEntryProtocol\"J\n\025" +
"RemoteControlProtocol\022\016\n\006cookie\030\001 \001(\t\022!\n" +
"\013commandType\030\002 \002(\0162\014.CommandType\":\n\026Remo" +
"teActorRefProtocol\022\017\n\007address\030\001 \002(\t\022\017\n\007t" +
"imeout\030\002 \001(\004\"\323\002\n\032SerializedActorRefProto" +
"col\022\033\n\004uuid\030\001 \002(\0132\r.UuidProtocol\022\017\n\007addr" +
"ess\030\002 \002(\t\022\026\n\016actorClassname\030\003 \002(\t\022\025\n\ract" +
"orInstance\030\004 \001(\014\022\033\n\023serializerClassname\030" +
"\005 \001(\t\022\017\n\007timeout\030\006 \001(\004\022\026\n\016receiveTimeout" +
"\030\007 \001(\004\022%\n\tlifeCycle\030\010 \001(\0132\022.LifeCyclePro",
"tocol\022+\n\nsupervisor\030\t \001(\0132\027.RemoteActorR" +
"efProtocol\022\024\n\014hotswapStack\030\n \001(\014\022(\n\010mess" +
"ages\030\013 \003(\0132\026.RemoteMessageProtocol\"g\n\037Se" +
"rializedTypedActorRefProtocol\022-\n\010actorRe" +
"f\030\001 \002(\0132\033.SerializedActorRefProtocol\022\025\n\r" +
"interfaceName\030\002 \002(\t\"r\n\017MessageProtocol\0225" +
"\n\023serializationScheme\030\001 \002(\0162\030.Serializat" +
"ionSchemeType\022\017\n\007message\030\002 \002(\014\022\027\n\017messag" +
"eManifest\030\003 \001(\014\"R\n\021ActorInfoProtocol\022\033\n\004" +
"uuid\030\001 \002(\0132\r.UuidProtocol\022\017\n\007timeout\030\002 \002",
"(\004\022\017\n\007address\030\003 \001(\t\")\n\014UuidProtocol\022\014\n\004h" +
"igh\030\001 \002(\004\022\013\n\003low\030\002 \002(\004\"3\n\025MetadataEntryP" +
"rotocol\022\013\n\003key\030\001 \002(\t\022\r\n\005value\030\002 \002(\014\"6\n\021L" +
"ifeCycleProtocol\022!\n\tlifeCycle\030\001 \002(\0162\016.Li" +
"feCycleType\"1\n\017AddressProtocol\022\020\n\010hostna" +
"me\030\001 \002(\t\022\014\n\004port\030\002 \002(\r\"7\n\021ExceptionProto" +
"col\022\021\n\tclassname\030\001 \002(\t\022\017\n\007message\030\002 \002(\t*" +
"(\n\013CommandType\022\013\n\007CONNECT\020\001\022\014\n\010SHUTDOWN\020" +
"\002*]\n\027SerializationSchemeType\022\010\n\004JAVA\020\001\022\013" +
"\n\007SBINARY\020\002\022\016\n\nSCALA_JSON\020\003\022\r\n\tJAVA_JSON",
"\020\004\022\014\n\010PROTOBUF\020\005*-\n\rLifeCycleType\022\r\n\tPER" +
"MANENT\020\001\022\r\n\tTEMPORARY\020\002B\030\n\024akka.remote.p" +
"rotocolH\001"
};
com.google.protobuf.Descriptors.FileDescriptor.InternalDescriptorAssigner assigner =
new com.google.protobuf.Descriptors.FileDescriptor.InternalDescriptorAssigner() {
@ -5769,7 +5729,7 @@ public final class RemoteProtocol {
internal_static_RemoteMessageProtocol_fieldAccessorTable = new
com.google.protobuf.GeneratedMessage.FieldAccessorTable(
internal_static_RemoteMessageProtocol_descriptor,
new java.lang.String[] { "Uuid", "ActorInfo", "OneWay", "Message", "Exception", "SupervisorUuid", "Sender", "Metadata", "Cookie", },
new java.lang.String[] { "Uuid", "ActorInfo", "OneWay", "Message", "Exception", "SupervisorUuid", "Sender", "Metadata", },
akka.remote.protocol.RemoteProtocol.RemoteMessageProtocol.class,
akka.remote.protocol.RemoteProtocol.RemoteMessageProtocol.Builder.class);
internal_static_RemoteControlProtocol_descriptor =

View file

@ -28,7 +28,6 @@ message RemoteMessageProtocol {
optional UuidProtocol supervisorUuid = 6;
optional RemoteActorRefProtocol sender = 7;
repeated MetadataEntryProtocol metadata = 8;
optional string cookie = 9;
}
/**
@ -43,7 +42,8 @@ message RemoteControlProtocol {
* Defines the type of the RemoteControlProtocol command type
*/
enum CommandType {
SHUTDOWN = 1;
CONNECT = 1;
SHUTDOWN = 2;
}
/**

View file

@ -4,7 +4,7 @@
package akka.remote.netty
import akka.dispatch.{ DefaultCompletableFuture, CompletableFuture, Future }
import akka.dispatch.{ DefaultPromise, Promise, Future }
import akka.remote.{ MessageSerializer, RemoteClientSettings, RemoteServerSettings }
import akka.remote.protocol.RemoteProtocol._
import akka.serialization.RemoteActorSerialization
@ -73,12 +73,12 @@ trait NettyRemoteClientModule extends RemoteClientModule { self: ListenerManagem
protected[akka] def send[T](message: Any,
senderOption: Option[ActorRef],
senderFuture: Option[CompletableFuture[T]],
senderFuture: Option[Promise[T]],
remoteAddress: InetSocketAddress,
timeout: Long,
isOneWay: Boolean,
actorRef: ActorRef,
loader: Option[ClassLoader]): Option[CompletableFuture[T]] =
loader: Option[ClassLoader]): Option[Promise[T]] =
withClientFor(remoteAddress, loader)(_.send[T](message, senderOption, senderFuture, remoteAddress, timeout, isOneWay, actorRef))
private[akka] def withClientFor[T](
@ -154,14 +154,13 @@ abstract class RemoteClient private[akka] (
remoteAddress.getAddress.getHostAddress + "::" +
remoteAddress.getPort
protected val futures = new ConcurrentHashMap[Uuid, CompletableFuture[_]]
protected val futures = new ConcurrentHashMap[Uuid, Promise[_]]
protected val pendingRequests = {
if (transactionLogCapacity < 0) new ConcurrentLinkedQueue[(Boolean, Uuid, RemoteMessageProtocol)]
else new LinkedBlockingQueue[(Boolean, Uuid, RemoteMessageProtocol)](transactionLogCapacity)
}
private[remote] val runSwitch = new Switch()
private[remote] val isAuthenticated = new AtomicBoolean(false)
private[remote] def isRunning = runSwitch.isOn
@ -192,29 +191,21 @@ abstract class RemoteClient private[akka] (
def send[T](
message: Any,
senderOption: Option[ActorRef],
senderFuture: Option[CompletableFuture[T]],
senderFuture: Option[Promise[T]],
remoteAddress: InetSocketAddress,
timeout: Long,
isOneWay: Boolean,
actorRef: ActorRef): Option[CompletableFuture[T]] = synchronized { // FIXME: find better strategy to prevent race
actorRef: ActorRef): Option[Promise[T]] =
send(createRemoteMessageProtocolBuilder(
Some(actorRef),
Left(actorRef.uuid),
actorRef.address,
timeout,
Right(message),
isOneWay,
senderOption,
if (isAuthenticated.compareAndSet(false, true)) RemoteClientSettings.SECURE_COOKIE else None).build, senderFuture)
}
Some(actorRef), Left(actorRef.uuid), actorRef.address, timeout, Right(message), isOneWay, senderOption).build,
senderFuture)
/**
* Sends the message across the wire
*/
def send[T](
request: RemoteMessageProtocol,
senderFuture: Option[CompletableFuture[T]]): Option[CompletableFuture[T]] = {
senderFuture: Option[Promise[T]]): Option[Promise[T]] = {
if (isRunning) {
if (request.getOneWay) {
try {
@ -236,7 +227,7 @@ abstract class RemoteClient private[akka] (
None
} else {
val futureResult = if (senderFuture.isDefined) senderFuture.get
else new DefaultCompletableFuture[T](request.getActorInfo.getTimeout)
else new DefaultPromise[T](request.getActorInfo.getTimeout)
val futureUuid = uuidFrom(request.getUuid.getHigh, request.getUuid.getLow)
futures.put(futureUuid, futureResult) // Add future prematurely, remove it if write fails
@ -342,6 +333,14 @@ class ActiveRemoteClient private[akka] (
notifyListeners(RemoteClientError(connection.getCause, module, remoteAddress))
false
} else {
//Send cookie
val handshake = RemoteControlProtocol.newBuilder.setCommandType(CommandType.CONNECT)
if (SECURE_COOKIE.nonEmpty)
handshake.setCookie(SECURE_COOKIE.get)
connection.getChannel.write(RemoteEncoder.encode(handshake.build))
//Add a task that does GCing of expired Futures
timer.newTimeout(new TimerTask() {
def run(timeout: Timeout) = {
@ -361,7 +360,6 @@ class ActiveRemoteClient private[akka] (
} match {
case true true
case false if reconnectIfAlreadyConnected
isAuthenticated.set(false)
openChannels.remove(connection.getChannel)
connection.getChannel.close
connection = bootstrap.connect(remoteAddress)
@ -369,7 +367,15 @@ class ActiveRemoteClient private[akka] (
if (!connection.isSuccess) {
notifyListeners(RemoteClientError(connection.getCause, module, remoteAddress))
false
} else true
} else {
//Send cookie
val handshake = RemoteControlProtocol.newBuilder.setCommandType(CommandType.CONNECT)
if (SECURE_COOKIE.nonEmpty)
handshake.setCookie(SECURE_COOKIE.get)
connection.getChannel.write(RemoteEncoder.encode(handshake.build))
true
}
case false false
}
}
@ -404,7 +410,7 @@ class ActiveRemoteClient private[akka] (
*/
class ActiveRemoteClientPipelineFactory(
name: String,
futures: ConcurrentMap[Uuid, CompletableFuture[_]],
futures: ConcurrentMap[Uuid, Promise[_]],
bootstrap: ClientBootstrap,
remoteAddress: InetSocketAddress,
timer: HashedWheelTimer,
@ -433,7 +439,7 @@ class ActiveRemoteClientPipelineFactory(
@ChannelHandler.Sharable
class ActiveRemoteClientHandler(
val name: String,
val futures: ConcurrentMap[Uuid, CompletableFuture[_]],
val futures: ConcurrentMap[Uuid, Promise[_]],
val bootstrap: ClientBootstrap,
val remoteAddress: InetSocketAddress,
val timer: HashedWheelTimer,
@ -451,7 +457,7 @@ class ActiveRemoteClientHandler(
case arp: AkkaRemoteProtocol if arp.hasMessage
val reply = arp.getMessage
val replyUuid = uuidFrom(reply.getActorInfo.getUuid.getHigh, reply.getActorInfo.getUuid.getLow)
val future = futures.remove(replyUuid).asInstanceOf[CompletableFuture[Any]]
val future = futures.remove(replyUuid).asInstanceOf[Promise[Any]]
if (reply.hasMessage) {
if (future eq null) throw new IllegalActorStateException("Future mapped to UUID " + replyUuid + " does not exist")
@ -577,10 +583,10 @@ class NettyRemoteServer(serverModule: NettyRemoteServerModule, val host: String,
def shutdown() {
try {
val shutdownSignal = {
val b = RemoteControlProtocol.newBuilder
val b = RemoteControlProtocol.newBuilder.setCommandType(CommandType.SHUTDOWN)
if (RemoteClientSettings.SECURE_COOKIE.nonEmpty)
b.setCookie(RemoteClientSettings.SECURE_COOKIE.get)
b.setCommandType(CommandType.SHUTDOWN)
b.build
}
openChannels.write(RemoteEncoder.encode(shutdownSignal)).awaitUninterruptibly
@ -736,12 +742,39 @@ class RemoteServerPipelineFactory(
MAX_TOTAL_MEMORY_SIZE,
EXECUTION_POOL_KEEPALIVE.length,
EXECUTION_POOL_KEEPALIVE.unit))
val authenticator = if (REQUIRE_COOKIE) new RemoteServerAuthenticationHandler(SECURE_COOKIE) :: Nil else Nil
val remoteServer = new RemoteServerHandler(name, openChannels, loader, server)
val stages: List[ChannelHandler] = dec ::: lenDec :: protobufDec :: enc ::: lenPrep :: protobufEnc :: execution :: remoteServer :: Nil
val stages: List[ChannelHandler] = dec ::: lenDec :: protobufDec :: enc ::: lenPrep :: protobufEnc :: execution :: authenticator ::: remoteServer :: Nil
new StaticChannelPipeline(stages: _*)
}
}
@ChannelHandler.Sharable
class RemoteServerAuthenticationHandler(secureCookie: Option[String]) extends SimpleChannelUpstreamHandler {
val authenticated = new AnyRef
override def messageReceived(ctx: ChannelHandlerContext, event: MessageEvent) = secureCookie match {
case None ctx.sendUpstream(event)
case Some(cookie)
ctx.getAttachment match {
case `authenticated` ctx.sendUpstream(event)
case null event.getMessage match {
case remoteProtocol: AkkaRemoteProtocol if remoteProtocol.hasInstruction
remoteProtocol.getInstruction.getCookie match {
case `cookie`
ctx.setAttachment(authenticated)
ctx.sendUpstream(event)
case _
throw new SecurityException(
"The remote client [" + ctx.getChannel.getRemoteAddress + "] secure cookie is not the same as remote server secure cookie")
}
case _
throw new SecurityException("The remote client [" + ctx.getChannel.getRemoteAddress + "] is not Authorized!")
}
}
}
}
/**
* @author <a href="http://jonasboner.com">Jonas Bon&#233;r</a>
*/
@ -752,7 +785,6 @@ class RemoteServerHandler(
val applicationLoader: Option[ClassLoader],
val server: NettyRemoteServerModule) extends SimpleChannelUpstreamHandler {
import RemoteServerSettings._
val CHANNEL_INIT = "channel-init".intern
applicationLoader.foreach(MessageSerializer.setClassLoader(_)) //TODO: REVISIT: THIS FEELS A BIT DODGY
@ -786,7 +818,6 @@ class RemoteServerHandler(
val clientAddress = getClientAddress(ctx)
sessionActors.set(event.getChannel(), new ConcurrentHashMap[String, ActorRef]())
server.notifyListeners(RemoteServerClientConnected(server, clientAddress))
if (REQUIRE_COOKIE) ctx.setAttachment(CHANNEL_INIT) // signal that this is channel initialization, which will need authentication
}
override def channelDisconnected(ctx: ChannelHandlerContext, event: ChannelStateEvent) = {
@ -810,11 +841,8 @@ class RemoteServerHandler(
override def messageReceived(ctx: ChannelHandlerContext, event: MessageEvent) = event.getMessage match {
case null throw new IllegalActorStateException("Message in remote MessageEvent is null: " + event)
//case remoteProtocol: AkkaRemoteProtocol if remoteProtocol.hasInstruction => RemoteServer cannot receive control messages (yet)
case remoteProtocol: AkkaRemoteProtocol if remoteProtocol.hasMessage
val requestProtocol = remoteProtocol.getMessage
if (REQUIRE_COOKIE) authenticateRemoteClient(requestProtocol, ctx)
handleRemoteMessageProtocol(requestProtocol, event.getChannel)
case remote: AkkaRemoteProtocol if remote.hasMessage handleRemoteMessageProtocol(remote.getMessage, event.getChannel)
//case remote: AkkaRemoteProtocol if remote.hasInstruction => RemoteServer cannot receive control messages (yet)
case _ //ignore
}
@ -863,7 +891,7 @@ class RemoteServerHandler(
message,
request.getActorInfo.getTimeout,
None,
Some(new DefaultCompletableFuture[Any](request.getActorInfo.getTimeout).
Some(new DefaultPromise[Any](request.getActorInfo.getTimeout).
onComplete(_.value.get match {
case l: Left[Throwable, Any] write(channel, createErrorReplyMessage(l.a, request))
case r: Right[Throwable, Any]
@ -874,8 +902,7 @@ class RemoteServerHandler(
actorInfo.getTimeout,
r,
true,
Some(actorRef),
None)
Some(actorRef))
// FIXME lift in the supervisor uuid management into toh createRemoteMessageProtocolBuilder method
if (request.hasSupervisorUuid) messageBuilder.setSupervisorUuid(request.getSupervisorUuid)
@ -939,26 +966,11 @@ class RemoteServerHandler(
actorInfo.getTimeout,
Left(exception),
true,
None,
None)
if (request.hasSupervisorUuid) messageBuilder.setSupervisorUuid(request.getSupervisorUuid)
RemoteEncoder.encode(messageBuilder.build)
}
private def authenticateRemoteClient(request: RemoteMessageProtocol, ctx: ChannelHandlerContext) = {
val attachment = ctx.getAttachment
if ((attachment ne null) &&
attachment.isInstanceOf[String] &&
attachment.asInstanceOf[String] == CHANNEL_INIT) { // is first time around, channel initialization
ctx.setAttachment(null)
val clientAddress = ctx.getChannel.getRemoteAddress.toString
if (!request.hasCookie) throw new SecurityException(
"The remote client [" + clientAddress + "] does not have a secure cookie.")
if (!(request.getCookie == SECURE_COOKIE.get)) throw new SecurityException(
"The remote client [" + clientAddress + "] secure cookie is not the same as remote server secure cookie")
}
}
protected def parseUuid(protocol: UuidProtocol): Uuid = uuidFrom(protocol.getHigh, protocol.getLow)
}

View file

@ -78,10 +78,9 @@ object ActorSerialization {
actorRef.timeout,
Right(m.message),
false,
actorRef.getSender,
RemoteClientSettings.SECURE_COOKIE).build)
actorRef.getSender))
requestProtocols.foreach(rp builder.addMessages(rp))
requestProtocols.foreach(builder.addMessages(_))
}
actorRef.receiveTimeout.foreach(builder.setReceiveTimeout(_))
@ -201,8 +200,7 @@ object RemoteActorSerialization {
timeout: Long,
message: Either[Throwable, Any],
isOneWay: Boolean,
senderOption: Option[ActorRef],
secureCookie: Option[String]): RemoteMessageProtocol.Builder = {
senderOption: Option[ActorRef]): RemoteMessageProtocol.Builder = {
val uuidProtocol = replyUuid match {
case Left(uid) UuidProtocol.newBuilder.setHigh(uid.getTime).setLow(uid.getClockSeqAndNode).build
@ -238,8 +236,6 @@ object RemoteActorSerialization {
case s s
}
secureCookie.foreach(messageBuilder.setCookie(_))
/* TODO invent new supervision strategy
actorRef.foreach { ref =>
ref.registerSupervisorAsRemoteActor.foreach { id =>

View file

@ -7,7 +7,7 @@ package akka.agent
import akka.stm._
import akka.actor.Actor
import akka.japi.{ Function JFunc, Procedure JProc }
import akka.dispatch.{ DefaultCompletableFuture, Dispatchers, Future }
import akka.dispatch.{ DefaultPromise, Dispatchers, Future }
/**
* Used internally to send functions.
@ -122,7 +122,7 @@ class Agent[T](initialValue: T) {
def alter(f: T T)(timeout: Long): Future[T] = {
def dispatch = updater.!!!(Update(f), timeout)
if (Stm.activeTransaction) {
val result = new DefaultCompletableFuture[T](timeout)
val result = new DefaultPromise[T](timeout)
get //Join xa
deferred {
result completeWith dispatch
@ -164,7 +164,7 @@ class Agent[T](initialValue: T) {
* still be executed in order.
*/
def alterOff(f: T T)(timeout: Long): Future[T] = {
val result = new DefaultCompletableFuture[T](timeout)
val result = new DefaultPromise[T](timeout)
send((value: T) {
suspend
val threadBased = Actor.actorOf(new ThreadBasedAgentUpdater(this)).start()
@ -293,7 +293,7 @@ class AgentUpdater[T](agent: Agent[T]) extends Actor {
* Thread-based agent updater actor. Used internally for `sendOff` actions.
*/
class ThreadBasedAgentUpdater[T](agent: Agent[T]) extends Actor {
self.dispatcher = Dispatchers.newThreadBasedDispatcher(self)
self.dispatcher = Dispatchers.newPinnedDispatcher(self)
val txFactory = TransactionFactory(familyName = "ThreadBasedAgentUpdater", readonly = false)

View file

@ -30,8 +30,8 @@ akka {
# - UntypedActor: sendRequestReply && sendRequestReplyFuture
# - TypedActor: methods with non-void return type
serialize-messages = off # Does a deep clone of (non-primitive) messages to ensure immutability
throughput = 5 # Default throughput for all ExecutorBasedEventDrivenDispatcher, set to 1 for complete fairness
throughput-deadline-time = -1 # Default throughput deadline for all ExecutorBasedEventDrivenDispatcher, set to 0 or negative for no deadline
throughput = 5 # Default throughput for all Dispatcher, set to 1 for complete fairness
throughput-deadline-time = -1 # Default throughput deadline for all Dispatcher, set to 0 or negative for no deadline
dispatcher-shutdown-timeout = 1 # Using the akka.time-unit, how long dispatchers by default will wait for new actors until they shut down
deployment {
@ -75,22 +75,22 @@ akka {
}
default-dispatcher {
type = "GlobalExecutorBasedEventDriven" # Must be one of the following, all "Global*" are non-configurable
# - ExecutorBasedEventDriven
# - ExecutorBasedEventDrivenWorkStealing
# - GlobalExecutorBasedEventDriven
type = "GlobalDispatcher" # Must be one of the following, all "Global*" are non-configurable
# - Dispatcher
# - BalancingDispatcher
# - GlobalDispatcher
keep-alive-time = 60 # Keep alive time for threads
core-pool-size-factor = 1.0 # No of core threads ... ceil(available processors * factor)
max-pool-size-factor = 4.0 # Max no of threads ... ceil(available processors * factor)
executor-bounds = -1 # Makes the Executor bounded, -1 is unbounded
allow-core-timeout = on # Allow core threads to time out
rejection-policy = "caller-runs" # abort, caller-runs, discard-oldest, discard
throughput = 5 # Throughput for ExecutorBasedEventDrivenDispatcher, set to 1 for complete fairness
throughput-deadline-time = -1 # Throughput deadline for ExecutorBasedEventDrivenDispatcher, set to 0 or negative for no deadline
throughput = 5 # Throughput for Dispatcher, set to 1 for complete fairness
throughput-deadline-time = -1 # Throughput deadline for Dispatcher, set to 0 or negative for no deadline
mailbox-capacity = -1 # If negative (or zero) then an unbounded mailbox is used (default)
# If positive then a bounded mailbox is used and the capacity is set using the property
# NOTE: setting a mailbox to 'blocking' can be a bit dangerous, could lead to deadlock, use with care
# The following are only used for ExecutorBasedEventDriven and only if mailbox-capacity > 0
# The following are only used for Dispatcher and only if mailbox-capacity > 0
mailbox-push-timeout-time = 10 # Specifies the timeout to add a new message to a mailbox that is full - negative number means infinite timeout
# (in unit defined by the time-unit property)
}
@ -197,7 +197,7 @@ akka {
# If you are using akka.http.AkkaMistServlet
mist-dispatcher {
#type = "GlobalExecutorBasedEventDriven" # Uncomment if you want to use a different dispatcher than the default one for Comet
#type = "GlobalDispatcher" # Uncomment if you want to use a different dispatcher than the default one for Comet
}
connection-close = true # toggles the addition of the "Connection" response header with a "close" value
root-actor-id = "_httproot" # the id of the actor to use as the root endpoint

Binary file not shown.

Before

Width:  |  Height:  |  Size: 39 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 29 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 36 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 47 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 39 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 44 KiB

View file

@ -324,8 +324,21 @@ class AkkaParentProject(info: ProjectInfo) extends ParentProject(info) with Exec
lazy val clusterTest = multiJvmTest
lazy val clusterRun = multiJvmRun
// test task runs normal tests and then all multi-jvm tests
lazy val normalTest = super.testAction
override def multiJvmTestAllAction = super.multiJvmTestAllAction dependsOn (normalTest)
override def testAction = task { None } dependsOn (normalTest, multiJvmTestAll)
override def multiJvmOptions = Seq("-Xmx256M")
override def multiJvmExtraOptions(className: String) = {
val confFiles = (testSourcePath ** (className + ".conf")).get
if (!confFiles.isEmpty) {
val filePath = confFiles.toList.head.absolutePath
Seq("-Dakka.config=" + filePath)
} else Seq.empty
}
lazy val replicationTestsEnabled = systemOptional[Boolean]("cluster.test.replication", false)
override def testOptions =

View file

@ -6,9 +6,10 @@ import java.io.{BufferedReader, Closeable, InputStream, InputStreamReader, IOExc
import java.io.{PipedInputStream, PipedOutputStream}
import scala.concurrent.SyncVar
trait MultiJvmTests extends BasicScalaProject {
trait MultiJvmTests extends DefaultProject {
def multiJvmTestName = "MultiJvm"
def multiJvmOptions: Seq[String] = Nil
def multiJvmOptions: Seq[String] = Seq.empty
def multiJvmExtraOptions(className: String): Seq[String] = Seq.empty
val MultiJvmTestName = multiJvmTestName
@ -25,32 +26,38 @@ trait MultiJvmTests extends BasicScalaProject {
lazy val multiJvmTest = multiJvmTestAction
lazy val multiJvmRun = multiJvmRunAction
lazy val multiJvmTestAll = multiJvmTestAllAction
def multiJvmTestAction = multiJvmAction(getMultiJvmTests, testScalaOptions)
def multiJvmRunAction = multiJvmAction(getMultiJvmApps, runScalaOptions)
def multiJvmTestAction = multiJvmMethod(getMultiJvmTests, testScalaOptions)
def multiJvmRunAction = multiJvmMethod(getMultiJvmApps, runScalaOptions)
def multiJvmTestAllAction = multiJvmTask(Nil, getMultiJvmTests, testScalaOptions)
def multiJvmAction(getMultiTestsMap: => Map[String, Seq[String]], scalaOptions: String => Seq[String]) = {
def multiJvmMethod(getMultiTestsMap: => Map[String, Seq[String]], scalaOptions: String => Seq[String]) = {
task { args =>
task {
val multiTestsMap = getMultiTestsMap
def process(tests: List[String]): Option[String] = {
if (tests.isEmpty) {
None
} else {
val testName = tests(0)
val failed = multiTestsMap.get(testName) match {
case Some(testClasses) => runMulti(testName, testClasses, scalaOptions)
case None => Some("No multi jvm test called " + testName)
}
failed orElse process(tests.tail)
}
}
val tests = if (args.size > 0) args.toList else multiTestsMap.keys.toList.asInstanceOf[List[String]]
process(tests)
} dependsOn (testCompile)
multiJvmTask(args.toList, getMultiTestsMap, scalaOptions)
} completeWith(getMultiTestsMap.keys.toList)
}
def multiJvmTask(tests: List[String], getMultiTestsMap: => Map[String, Seq[String]], scalaOptions: String => Seq[String]) = {
task {
val multiTestsMap = getMultiTestsMap
def process(runTests: List[String]): Option[String] = {
if (runTests.isEmpty) {
None
} else {
val testName = runTests(0)
val failed = multiTestsMap.get(testName) match {
case Some(testClasses) => runMulti(testName, testClasses, scalaOptions)
case None => Some("No multi jvm test called " + testName)
}
failed orElse process(runTests.tail)
}
}
val runTests = if (tests.size > 0) tests else multiTestsMap.keys.toList.asInstanceOf[List[String]]
process(runTests)
} dependsOn (testCompile)
}
def getMultiJvmTests(): Map[String, Seq[String]] = {
val allTests = testCompileConditional.analysis.allTests.toList.map(_.className)
filterMultiJvmTests(allTests)
@ -80,6 +87,10 @@ trait MultiJvmTests extends BasicScalaProject {
className.substring(i + l)
}
def testSimpleName(className: String) = {
className.split("\\.").last
}
def testScalaOptions(testClass: String) = {
val scalaTestJars = testClasspath.get.filter(_.name.contains("scalatest"))
val cp = Path.makeString(scalaTestJars)
@ -98,8 +109,23 @@ trait MultiJvmTests extends BasicScalaProject {
case (testClass, index) => {
val jvmName = "JVM-" + testIdentifier(testClass)
val jvmLogger = new JvmLogger(jvmName)
val className = testSimpleName(testClass)
val optionsFiles = (testSourcePath ** (className + ".opts")).get
val optionsFromFile: Seq[String] = {
if (!optionsFiles.isEmpty) {
val file = optionsFiles.toList.head.asFile
log.info("Reading JVM options from %s" + file)
FileUtilities.readString(file, log) match {
case Right(opts: String) => opts.trim.split(" ").toSeq
case _ => Seq.empty
}
} else Seq.empty
}
val extraOptions = multiJvmExtraOptions(className)
val jvmOptions = multiJvmOptions ++ optionsFromFile ++ extraOptions
log.info("Starting %s for %s" format (jvmName, testClass))
(testClass, startJvm(multiJvmOptions, scalaOptions(testClass), jvmLogger, index == 0))
log.info(" with JVM options: %s" format jvmOptions.mkString(" "))
(testClass, startJvm(jvmOptions, scalaOptions(testClass), jvmLogger, index == 0))
}
}
val exitCodes = processes map {