Merge branch 'master' into amqp
Conflicts: akka-amqp/src/main/scala/se/scalablesolutions/akka/amqp/AMQP.scala akka-amqp/src/main/scala/se/scalablesolutions/akka/amqp/FaultTolerantConnectionActor.scala
This commit is contained in:
commit
53bdcd4951
136 changed files with 4083 additions and 1836 deletions
|
|
@ -159,7 +159,7 @@ object Actor extends Logging {
|
|||
*/
|
||||
def actor(body: Receive): ActorRef =
|
||||
actorOf(new Actor() {
|
||||
self.lifeCycle = Some(LifeCycle(Permanent))
|
||||
self.lifeCycle = Permanent
|
||||
def receive: Receive = body
|
||||
}).start
|
||||
|
||||
|
|
@ -181,7 +181,7 @@ object Actor extends Logging {
|
|||
*/
|
||||
def transactor(body: Receive): ActorRef =
|
||||
actorOf(new Transactor() {
|
||||
self.lifeCycle = Some(LifeCycle(Permanent))
|
||||
self.lifeCycle = Permanent
|
||||
def receive: Receive = body
|
||||
}).start
|
||||
|
||||
|
|
@ -201,7 +201,7 @@ object Actor extends Logging {
|
|||
*/
|
||||
def temporaryActor(body: Receive): ActorRef =
|
||||
actorOf(new Actor() {
|
||||
self.lifeCycle = Some(LifeCycle(Temporary))
|
||||
self.lifeCycle = Temporary
|
||||
def receive = body
|
||||
}).start
|
||||
|
||||
|
|
@ -226,7 +226,7 @@ object Actor extends Logging {
|
|||
def handler[A](body: => Unit) = new {
|
||||
def receive(handler: Receive) =
|
||||
actorOf(new Actor() {
|
||||
self.lifeCycle = Some(LifeCycle(Permanent))
|
||||
self.lifeCycle = Permanent
|
||||
body
|
||||
def receive = handler
|
||||
}).start
|
||||
|
|
@ -444,7 +444,6 @@ trait Actor extends Logging {
|
|||
*/
|
||||
def become(behavior: Option[Receive]) {
|
||||
self.hotswap = behavior
|
||||
self.checkReceiveTimeout // FIXME : how to reschedule receivetimeout on hotswap?
|
||||
}
|
||||
|
||||
/** Akka Java API
|
||||
|
|
|
|||
File diff suppressed because it is too large
Load diff
|
|
@ -4,14 +4,16 @@
|
|||
|
||||
package se.scalablesolutions.akka.actor
|
||||
|
||||
import scala.collection.mutable.ListBuffer
|
||||
import scala.collection.mutable.{ListBuffer, Map}
|
||||
import scala.reflect.Manifest
|
||||
|
||||
import java.util.concurrent.{ConcurrentSkipListSet, ConcurrentHashMap}
|
||||
import java.util.{Set => JSet}
|
||||
|
||||
import se.scalablesolutions.akka.util.ListenerManagement
|
||||
import annotation.tailrec
|
||||
import se.scalablesolutions.akka.util.ReflectiveAccess._
|
||||
import se.scalablesolutions.akka.util.{ReadWriteGuard, Address, ListenerManagement}
|
||||
import java.net.InetSocketAddress
|
||||
|
||||
/**
|
||||
* Base trait for ActorRegistry events, allows listen to when an actor is added and removed from the ActorRegistry.
|
||||
|
|
@ -37,11 +39,18 @@ case class ActorUnregistered(actor: ActorRef) extends ActorRegistryEvent
|
|||
object ActorRegistry extends ListenerManagement {
|
||||
private val actorsByUUID = new ConcurrentHashMap[Uuid, ActorRef]
|
||||
private val actorsById = new Index[String,ActorRef]
|
||||
private val remoteActorSets = Map[Address, RemoteActorSet]()
|
||||
private val guard = new ReadWriteGuard
|
||||
|
||||
/**
|
||||
* Returns all actors in the system.
|
||||
*/
|
||||
def actors: Array[ActorRef] = filter(_ => true)
|
||||
|
||||
/**
|
||||
* Returns the number of actors in the system.
|
||||
*/
|
||||
def size : Int = actorsByUUID.size
|
||||
|
||||
/**
|
||||
* Invokes a function for all actors.
|
||||
|
|
@ -109,11 +118,122 @@ object ActorRegistry extends ListenerManagement {
|
|||
*/
|
||||
def actorsFor(id: String): Array[ActorRef] = actorsById values id
|
||||
|
||||
/**
|
||||
/**
|
||||
* Finds the actor that has a specific UUID.
|
||||
*/
|
||||
def actorFor(uuid: Uuid): Option[ActorRef] = Option(actorsByUUID get uuid)
|
||||
|
||||
/**
|
||||
* Returns all typed actors in the system.
|
||||
*/
|
||||
def typedActors: Array[AnyRef] = filterTypedActors(_ => true)
|
||||
|
||||
/**
|
||||
* Invokes a function for all typed actors.
|
||||
*/
|
||||
def foreachTypedActor(f: (AnyRef) => Unit) = {
|
||||
TypedActorModule.ensureTypedActorEnabled
|
||||
val elements = actorsByUUID.elements
|
||||
while (elements.hasMoreElements) {
|
||||
val proxy = typedActorFor(elements.nextElement)
|
||||
if (proxy.isDefined) {
|
||||
f(proxy.get)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Invokes the function on all known typed actors until it returns Some
|
||||
* Returns None if the function never returns Some
|
||||
*/
|
||||
def findTypedActor[T](f: PartialFunction[AnyRef,T]) : Option[T] = {
|
||||
TypedActorModule.ensureTypedActorEnabled
|
||||
val elements = actorsByUUID.elements
|
||||
while (elements.hasMoreElements) {
|
||||
val proxy = typedActorFor(elements.nextElement)
|
||||
if(proxy.isDefined && (f isDefinedAt proxy))
|
||||
return Some(f(proxy))
|
||||
}
|
||||
None
|
||||
}
|
||||
|
||||
/**
|
||||
* Finds all typed actors that satisfy a predicate.
|
||||
*/
|
||||
def filterTypedActors(p: AnyRef => Boolean): Array[AnyRef] = {
|
||||
TypedActorModule.ensureTypedActorEnabled
|
||||
val all = new ListBuffer[AnyRef]
|
||||
val elements = actorsByUUID.elements
|
||||
while (elements.hasMoreElements) {
|
||||
val proxy = typedActorFor(elements.nextElement)
|
||||
if (proxy.isDefined && p(proxy.get)) {
|
||||
all += proxy.get
|
||||
}
|
||||
}
|
||||
all.toArray
|
||||
}
|
||||
|
||||
/**
|
||||
* Finds all typed actors that are subtypes of the class passed in as the Manifest argument.
|
||||
*/
|
||||
def typedActorsFor[T <: AnyRef](implicit manifest: Manifest[T]): Array[AnyRef] = {
|
||||
TypedActorModule.ensureTypedActorEnabled
|
||||
typedActorsFor[T](manifest.erasure.asInstanceOf[Class[T]])
|
||||
}
|
||||
|
||||
/**
|
||||
* Finds any typed actor that matches T.
|
||||
*/
|
||||
def typedActorFor[T <: AnyRef](implicit manifest: Manifest[T]): Option[AnyRef] = {
|
||||
TypedActorModule.ensureTypedActorEnabled
|
||||
def predicate(proxy: AnyRef) : Boolean = {
|
||||
val actorRef = TypedActorModule.typedActorObjectInstance.get.actorFor(proxy)
|
||||
actorRef.isDefined && manifest.erasure.isAssignableFrom(actorRef.get.actor.getClass)
|
||||
}
|
||||
findTypedActor({ case a:AnyRef if predicate(a) => a })
|
||||
}
|
||||
|
||||
/**
|
||||
* Finds all typed actors of type or sub-type specified by the class passed in as the Class argument.
|
||||
*/
|
||||
def typedActorsFor[T <: AnyRef](clazz: Class[T]): Array[AnyRef] = {
|
||||
TypedActorModule.ensureTypedActorEnabled
|
||||
def predicate(proxy: AnyRef) : Boolean = {
|
||||
val actorRef = TypedActorModule.typedActorObjectInstance.get.actorFor(proxy)
|
||||
actorRef.isDefined && clazz.isAssignableFrom(actorRef.get.actor.getClass)
|
||||
}
|
||||
filterTypedActors(predicate)
|
||||
}
|
||||
|
||||
/**
|
||||
* Finds all typed actors that have a specific id.
|
||||
*/
|
||||
def typedActorsFor(id: String): Array[AnyRef] = {
|
||||
TypedActorModule.ensureTypedActorEnabled
|
||||
val actorRefs = actorsById values id
|
||||
actorRefs.flatMap(typedActorFor(_))
|
||||
}
|
||||
|
||||
/**
|
||||
* Finds the typed actor that has a specific UUID.
|
||||
*/
|
||||
def typedActorFor(uuid: Uuid): Option[AnyRef] = {
|
||||
TypedActorModule.ensureTypedActorEnabled
|
||||
val actorRef = actorsByUUID get uuid
|
||||
if (actorRef eq null)
|
||||
None
|
||||
else
|
||||
typedActorFor(actorRef)
|
||||
}
|
||||
|
||||
/**
|
||||
* Get the typed actor proxy for a given typed actor ref.
|
||||
*/
|
||||
private def typedActorFor(actorRef: ActorRef): Option[AnyRef] = {
|
||||
TypedActorModule.typedActorObjectInstance.get.proxyFor(actorRef)
|
||||
}
|
||||
|
||||
|
||||
/**
|
||||
* Registers an actor in the ActorRegistry.
|
||||
*/
|
||||
|
|
@ -145,67 +265,130 @@ object ActorRegistry extends ListenerManagement {
|
|||
*/
|
||||
def shutdownAll() {
|
||||
log.info("Shutting down all actors in the system...")
|
||||
foreach(_.stop)
|
||||
if (TypedActorModule.isTypedActorEnabled) {
|
||||
val elements = actorsByUUID.elements
|
||||
while (elements.hasMoreElements) {
|
||||
val actorRef = elements.nextElement
|
||||
val proxy = typedActorFor(actorRef)
|
||||
if (proxy.isDefined) {
|
||||
TypedActorModule.typedActorObjectInstance.get.stop(proxy.get)
|
||||
} else {
|
||||
actorRef.stop
|
||||
}
|
||||
}
|
||||
} else {
|
||||
foreach(_.stop)
|
||||
}
|
||||
actorsByUUID.clear
|
||||
actorsById.clear
|
||||
log.info("All actors have been shut down and unregistered from ActorRegistry")
|
||||
}
|
||||
|
||||
/**
|
||||
* Get the remote actors for the given server address. For internal use only.
|
||||
*/
|
||||
private[akka] def actorsFor(remoteServerAddress: Address): RemoteActorSet = guard.withWriteGuard {
|
||||
remoteActorSets.getOrElseUpdate(remoteServerAddress, new RemoteActorSet)
|
||||
}
|
||||
|
||||
private[akka] def registerActorByUuid(address: InetSocketAddress, uuid: String, actor: ActorRef) {
|
||||
actorsByUuid(Address(address.getHostName, address.getPort)).putIfAbsent(uuid, actor)
|
||||
}
|
||||
|
||||
private[akka] def registerTypedActorByUuid(address: InetSocketAddress, uuid: String, typedActor: AnyRef) {
|
||||
typedActorsByUuid(Address(address.getHostName, address.getPort)).putIfAbsent(uuid, typedActor)
|
||||
}
|
||||
|
||||
private[akka] def actors(address: Address) = actorsFor(address).actors
|
||||
private[akka] def actorsByUuid(address: Address) = actorsFor(address).actorsByUuid
|
||||
private[akka] def typedActors(address: Address) = actorsFor(address).typedActors
|
||||
private[akka] def typedActorsByUuid(address: Address) = actorsFor(address).typedActorsByUuid
|
||||
|
||||
private[akka] class RemoteActorSet {
|
||||
private[ActorRegistry] val actors = new ConcurrentHashMap[String, ActorRef]
|
||||
private[ActorRegistry] val actorsByUuid = new ConcurrentHashMap[String, ActorRef]
|
||||
private[ActorRegistry] val typedActors = new ConcurrentHashMap[String, AnyRef]
|
||||
private[ActorRegistry] val typedActorsByUuid = new ConcurrentHashMap[String, AnyRef]
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* An implementation of a ConcurrentMultiMap
|
||||
* Adds/remove is serialized over the specified key
|
||||
* Reads are fully concurrent <-- el-cheapo
|
||||
*
|
||||
* @author Viktor Klang
|
||||
*/
|
||||
class Index[K <: AnyRef,V <: AnyRef : Manifest] {
|
||||
import scala.collection.JavaConversions._
|
||||
|
||||
private val Naught = Array[V]() //Nil for Arrays
|
||||
private val container = new ConcurrentHashMap[K, JSet[V]]
|
||||
private val emptySet = new ConcurrentSkipListSet[V]
|
||||
|
||||
def put(key: K, value: V) {
|
||||
|
||||
//Returns whether it needs to be retried or not
|
||||
def tryPut(set: JSet[V], v: V): Boolean = {
|
||||
set.synchronized {
|
||||
if (set.isEmpty) true //IF the set is empty then it has been removed, so signal retry
|
||||
else { //Else add the value to the set and signal that retry is not needed
|
||||
set add v
|
||||
false
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@tailrec def syncPut(k: K, v: V): Boolean = {
|
||||
/**
|
||||
* Associates the value of type V with the key of type K
|
||||
* @returns true if the value didn't exist for the key previously, and false otherwise
|
||||
*/
|
||||
def put(key: K, value: V): Boolean = {
|
||||
//Tailrecursive spin-locking put
|
||||
@tailrec def spinPut(k: K, v: V): Boolean = {
|
||||
var retry = false
|
||||
var added = false
|
||||
val set = container get k
|
||||
if (set ne null) retry = tryPut(set,v)
|
||||
|
||||
if (set ne null) {
|
||||
set.synchronized {
|
||||
if (set.isEmpty) {
|
||||
retry = true //IF the set is empty then it has been removed, so signal retry
|
||||
}
|
||||
else { //Else add the value to the set and signal that retry is not needed
|
||||
added = set add v
|
||||
retry = false
|
||||
}
|
||||
}
|
||||
}
|
||||
else {
|
||||
val newSet = new ConcurrentSkipListSet[V]
|
||||
newSet add v
|
||||
|
||||
// Parry for two simultaneous putIfAbsent(id,newSet)
|
||||
val oldSet = container.putIfAbsent(k,newSet)
|
||||
if (oldSet ne null)
|
||||
retry = tryPut(oldSet,v)
|
||||
if (oldSet ne null) {
|
||||
oldSet.synchronized {
|
||||
if (oldSet.isEmpty) {
|
||||
retry = true //IF the set is empty then it has been removed, so signal retry
|
||||
}
|
||||
else { //Else try to add the value to the set and signal that retry is not needed
|
||||
added = oldSet add v
|
||||
retry = false
|
||||
}
|
||||
}
|
||||
} else {
|
||||
added = true
|
||||
}
|
||||
}
|
||||
|
||||
if (retry) syncPut(k,v)
|
||||
else true
|
||||
if (retry) spinPut(k,v)
|
||||
else added
|
||||
}
|
||||
|
||||
syncPut(key,value)
|
||||
spinPut(key,value)
|
||||
}
|
||||
|
||||
def values(key: K) = {
|
||||
/**
|
||||
* @returns a _new_ array of all existing values for the given key at the time of the call
|
||||
*/
|
||||
def values(key: K): Array[V] = {
|
||||
val set: JSet[V] = container get key
|
||||
if (set ne null) set toArray Naught
|
||||
else Naught
|
||||
}
|
||||
|
||||
def foreach(key: K)(fun: (V) => Unit) {
|
||||
val set = container get key
|
||||
if (set ne null)
|
||||
set foreach fun
|
||||
val result = if (set ne null) set toArray Naught else Naught
|
||||
result.asInstanceOf[Array[V]]
|
||||
}
|
||||
|
||||
/**
|
||||
* @returns Some(value) for the first matching value where the supplied function returns true for the given key,
|
||||
* if no matches it returns None
|
||||
*/
|
||||
def findValue(key: K)(f: (V) => Boolean): Option[V] = {
|
||||
import scala.collection.JavaConversions._
|
||||
val set = container get key
|
||||
if (set ne null)
|
||||
set.iterator.find(f)
|
||||
|
|
@ -213,23 +396,43 @@ class Index[K <: AnyRef,V <: AnyRef : Manifest] {
|
|||
None
|
||||
}
|
||||
|
||||
/**
|
||||
* Applies the supplied function to all keys and their values
|
||||
*/
|
||||
def foreach(fun: (K,V) => Unit) {
|
||||
import scala.collection.JavaConversions._
|
||||
container.entrySet foreach {
|
||||
(e) => e.getValue.foreach(fun(e.getKey,_))
|
||||
}
|
||||
}
|
||||
|
||||
def remove(key: K, value: V) {
|
||||
/**
|
||||
* Disassociates the value of type V from the key of type K
|
||||
* @returns true if the value was disassociated from the key and false if it wasn't previously associated with the key
|
||||
*/
|
||||
def remove(key: K, value: V): Boolean = {
|
||||
val set = container get key
|
||||
|
||||
if (set ne null) {
|
||||
set.synchronized {
|
||||
if (set.remove(value)) { //If we can remove the value
|
||||
if (set.isEmpty) //and the set becomes empty
|
||||
container.remove(key,emptySet) //We try to remove the key if it's mapped to an empty set
|
||||
|
||||
true //Remove succeeded
|
||||
}
|
||||
else false //Remove failed
|
||||
}
|
||||
}
|
||||
} else false //Remove failed
|
||||
}
|
||||
|
||||
def clear = { foreach(remove _) }
|
||||
/**
|
||||
* @returns true if the underlying containers is empty, may report false negatives when the last remove is underway
|
||||
*/
|
||||
def isEmpty: Boolean = container.isEmpty
|
||||
|
||||
/**
|
||||
* Removes all keys and all values
|
||||
*/
|
||||
def clear = foreach { case (k,v) => remove(k,v) }
|
||||
}
|
||||
|
|
@ -6,7 +6,7 @@ package se.scalablesolutions.akka.actor
|
|||
|
||||
import se.scalablesolutions.akka.stm.Ref
|
||||
import se.scalablesolutions.akka.AkkaException
|
||||
import se.scalablesolutions.akka.util.{ Function => JFunc, Procedure => JProc }
|
||||
import se.scalablesolutions.akka.japi.{ Function => JFunc, Procedure => JProc }
|
||||
import java.util.concurrent.atomic.AtomicReference
|
||||
import java.util.concurrent.CountDownLatch
|
||||
|
||||
|
|
|
|||
|
|
@ -29,10 +29,10 @@ class SupervisorException private[akka](message: String) extends AkkaException(m
|
|||
* RestartStrategy(OneForOne, 3, 10, List(classOf[Exception]),
|
||||
* Supervise(
|
||||
* myFirstActor,
|
||||
* LifeCycle(Permanent)) ::
|
||||
* Permanent) ::
|
||||
* Supervise(
|
||||
* mySecondActor,
|
||||
* LifeCycle(Permanent)) ::
|
||||
* Permanent) ::
|
||||
* Nil))
|
||||
* </pre>
|
||||
*
|
||||
|
|
@ -60,10 +60,10 @@ object Supervisor {
|
|||
* RestartStrategy(OneForOne, 3, 10, List(classOf[Exception]),
|
||||
* Supervise(
|
||||
* myFirstActor,
|
||||
* LifeCycle(Permanent)) ::
|
||||
* Permanent) ::
|
||||
* Supervise(
|
||||
* mySecondActor,
|
||||
* LifeCycle(Permanent)) ::
|
||||
* Permanent) ::
|
||||
* Nil))
|
||||
* </pre>
|
||||
*
|
||||
|
|
@ -79,14 +79,14 @@ object Supervisor {
|
|||
object SupervisorFactory {
|
||||
def apply(config: SupervisorConfig) = new SupervisorFactory(config)
|
||||
|
||||
private[akka] def retrieveFaultHandlerAndTrapExitsFrom(config: SupervisorConfig):
|
||||
Tuple2[FaultHandlingStrategy, List[Class[_ <: Throwable]]] = config match {
|
||||
case SupervisorConfig(RestartStrategy(scheme, maxNrOfRetries, timeRange, trapExceptions), _) =>
|
||||
scheme match {
|
||||
case AllForOne => (AllForOneStrategy(maxNrOfRetries, timeRange), trapExceptions)
|
||||
case OneForOne => (OneForOneStrategy(maxNrOfRetries, timeRange), trapExceptions)
|
||||
}
|
||||
}
|
||||
private[akka] def retrieveFaultHandlerAndTrapExitsFrom(config: SupervisorConfig): FaultHandlingStrategy =
|
||||
config match {
|
||||
case SupervisorConfig(RestartStrategy(scheme, maxNrOfRetries, timeRange, trapExceptions), _) =>
|
||||
scheme match {
|
||||
case AllForOne => AllForOneStrategy(trapExceptions,maxNrOfRetries, timeRange)
|
||||
case OneForOne => OneForOneStrategy(trapExceptions,maxNrOfRetries, timeRange)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
|
|
@ -99,9 +99,8 @@ class SupervisorFactory private[akka] (val config: SupervisorConfig) extends Log
|
|||
|
||||
def newInstance: Supervisor = newInstanceFor(config)
|
||||
|
||||
def newInstanceFor(config: SupervisorConfig): Supervisor = {
|
||||
val (handler, trapExits) = SupervisorFactory.retrieveFaultHandlerAndTrapExitsFrom(config)
|
||||
val supervisor = new Supervisor(handler, trapExits)
|
||||
def newInstanceFor(config: SupervisorConfig): Supervisor = {
|
||||
val supervisor = new Supervisor(SupervisorFactory.retrieveFaultHandlerAndTrapExitsFrom(config))
|
||||
supervisor.configure(config)
|
||||
supervisor.start
|
||||
supervisor
|
||||
|
|
@ -121,13 +120,13 @@ class SupervisorFactory private[akka] (val config: SupervisorConfig) extends Log
|
|||
* @author <a href="http://jonasboner.com">Jonas Bonér</a>
|
||||
*/
|
||||
sealed class Supervisor private[akka] (
|
||||
handler: FaultHandlingStrategy, trapExceptions: List[Class[_ <: Throwable]]) {
|
||||
handler: FaultHandlingStrategy) {
|
||||
import Supervisor._
|
||||
|
||||
private val _childActors = new ConcurrentHashMap[String, List[ActorRef]]
|
||||
private val _childSupervisors = new CopyOnWriteArrayList[Supervisor]
|
||||
|
||||
private[akka] val supervisor = actorOf(new SupervisorActor(handler, trapExceptions)).start
|
||||
private[akka] val supervisor = actorOf(new SupervisorActor(handler)).start
|
||||
|
||||
def uuid = supervisor.uuid
|
||||
|
||||
|
|
@ -160,7 +159,7 @@ sealed class Supervisor private[akka] (
|
|||
else list
|
||||
}
|
||||
_childActors.put(className, actorRef :: currentActors)
|
||||
actorRef.lifeCycle = Some(lifeCycle)
|
||||
actorRef.lifeCycle = lifeCycle
|
||||
supervisor.link(actorRef)
|
||||
remoteAddress.foreach { address =>
|
||||
RemoteServerModule.registerActor(
|
||||
|
|
@ -179,13 +178,9 @@ sealed class Supervisor private[akka] (
|
|||
*
|
||||
* @author <a href="http://jonasboner.com">Jonas Bonér</a>
|
||||
*/
|
||||
final class SupervisorActor private[akka] (
|
||||
handler: FaultHandlingStrategy,
|
||||
trapExceptions: List[Class[_ <: Throwable]]) extends Actor {
|
||||
final class SupervisorActor private[akka] (handler: FaultHandlingStrategy) extends Actor {
|
||||
import self._
|
||||
|
||||
trapExit = trapExceptions
|
||||
faultHandler = Some(handler)
|
||||
faultHandler = handler
|
||||
|
||||
override def postStop(): Unit = shutdownLinkedActors
|
||||
|
||||
|
|
|
|||
|
|
@ -32,15 +32,36 @@ object Config {
|
|||
System.setProperty("org.multiverse.api.GlobalStmInstance.factorymethod", "org.multiverse.stms.alpha.AlphaStm.createFast")
|
||||
|
||||
val HOME = {
|
||||
val systemHome = System.getenv("AKKA_HOME")
|
||||
if (systemHome == null || systemHome.length == 0 || systemHome == ".") {
|
||||
val optionHome = System.getProperty("akka.home", "")
|
||||
if (optionHome.length != 0) Some(optionHome)
|
||||
else None
|
||||
} else Some(systemHome)
|
||||
val envHome = System.getenv("AKKA_HOME") match {
|
||||
case null | "" | "." => None
|
||||
case value => Some(value)
|
||||
}
|
||||
|
||||
val systemHome = System.getProperty("akka.home") match {
|
||||
case null | "" => None
|
||||
case value => Some(value)
|
||||
}
|
||||
|
||||
envHome orElse systemHome
|
||||
}
|
||||
|
||||
val config = {
|
||||
|
||||
val confName = {
|
||||
|
||||
val envConf = System.getenv("AKKA_MODE") match {
|
||||
case null | "" => None
|
||||
case value => Some(value)
|
||||
}
|
||||
|
||||
val systemConf = System.getProperty("akka.mode") match {
|
||||
case null | "" => None
|
||||
case value => Some(value)
|
||||
}
|
||||
|
||||
(envConf orElse systemConf).map("akka." + _ + ".conf").getOrElse("akka.conf")
|
||||
}
|
||||
|
||||
if (System.getProperty("akka.config", "") != "") {
|
||||
val configFile = System.getProperty("akka.config", "")
|
||||
try {
|
||||
|
|
@ -52,19 +73,9 @@ object Config {
|
|||
"\n\tdue to: " + e.toString)
|
||||
}
|
||||
Configgy.config
|
||||
} else if (getClass.getClassLoader.getResource("akka.conf") != null) {
|
||||
} else if (HOME.isDefined) {
|
||||
try {
|
||||
Configgy.configureFromResource("akka.conf", getClass.getClassLoader)
|
||||
ConfigLogger.log.info("Config loaded from the application classpath.")
|
||||
} catch {
|
||||
case e: ParseException => throw new ConfigurationException(
|
||||
"Can't load 'akka.conf' config file from application classpath," +
|
||||
"\n\tdue to: " + e.toString)
|
||||
}
|
||||
Configgy.config
|
||||
} else if (HOME.isDefined) {
|
||||
try {
|
||||
val configFile = HOME.getOrElse(throwNoAkkaHomeException) + "/config/akka.conf"
|
||||
val configFile = HOME.getOrElse(throwNoAkkaHomeException) + "/config/" + confName
|
||||
Configgy.configure(configFile)
|
||||
ConfigLogger.log.info(
|
||||
"AKKA_HOME is defined as [%s], config loaded from [%s].",
|
||||
|
|
@ -73,18 +84,28 @@ object Config {
|
|||
} catch {
|
||||
case e: ParseException => throw new ConfigurationException(
|
||||
"AKKA_HOME is defined as [" + HOME.get + "] " +
|
||||
"\n\tbut the 'akka.conf' config file can not be found at [" + HOME.get + "/config/akka.conf]," +
|
||||
"\n\tbut the 'akka.conf' config file can not be found at [" + HOME.get + "/config/"+ confName + "]," +
|
||||
"\n\tdue to: " + e.toString)
|
||||
}
|
||||
Configgy.config
|
||||
} else if (getClass.getClassLoader.getResource(confName) ne null) {
|
||||
try {
|
||||
Configgy.configureFromResource(confName, getClass.getClassLoader)
|
||||
ConfigLogger.log.info("Config [%s] loaded from the application classpath.",confName)
|
||||
} catch {
|
||||
case e: ParseException => throw new ConfigurationException(
|
||||
"Can't load '" + confName + "' config file from application classpath," +
|
||||
"\n\tdue to: " + e.toString)
|
||||
}
|
||||
Configgy.config
|
||||
} else {
|
||||
ConfigLogger.log.warning(
|
||||
"\nCan't load 'akka.conf'." +
|
||||
"\nOne of the three ways of locating the 'akka.conf' file needs to be defined:" +
|
||||
"\nCan't load '" + confName + "'." +
|
||||
"\nOne of the three ways of locating the '" + confName + "' file needs to be defined:" +
|
||||
"\n\t1. Define the '-Dakka.config=...' system property option." +
|
||||
"\n\t2. Put the 'akka.conf' file on the classpath." +
|
||||
"\n\t2. Put the '" + confName + "' file on the classpath." +
|
||||
"\n\t3. Define 'AKKA_HOME' environment variable pointing to the root of the Akka distribution." +
|
||||
"\nI have no way of finding the 'akka.conf' configuration file." +
|
||||
"\nI have no way of finding the '" + confName + "' configuration file." +
|
||||
"\nUsing default values everywhere.")
|
||||
CConfig.fromString("<akka></akka>") // default empty config
|
||||
}
|
||||
|
|
@ -92,7 +113,7 @@ object Config {
|
|||
|
||||
val CONFIG_VERSION = config.getString("akka.version", VERSION)
|
||||
if (VERSION != CONFIG_VERSION) throw new ConfigurationException(
|
||||
"Akka JAR version [" + VERSION + "] is different than the provided config ('akka.conf') version [" + CONFIG_VERSION + "]")
|
||||
"Akka JAR version [" + VERSION + "] is different than the provided config version [" + CONFIG_VERSION + "]")
|
||||
|
||||
val TIME_UNIT = config.getString("akka.time-unit", "seconds")
|
||||
|
||||
|
|
|
|||
|
|
@ -7,20 +7,45 @@ package se.scalablesolutions.akka.config
|
|||
import se.scalablesolutions.akka.actor.{ActorRef}
|
||||
import se.scalablesolutions.akka.dispatch.MessageDispatcher
|
||||
|
||||
sealed abstract class FaultHandlingStrategy
|
||||
object AllForOneStrategy {
|
||||
def apply(maxNrOfRetries: Int, withinTimeRange: Int): AllForOneStrategy =
|
||||
AllForOneStrategy(if (maxNrOfRetries < 0) None else Some(maxNrOfRetries),
|
||||
if (withinTimeRange < 0) None else Some(withinTimeRange))
|
||||
sealed abstract class FaultHandlingStrategy {
|
||||
def trapExit: List[Class[_ <: Throwable]]
|
||||
}
|
||||
|
||||
object AllForOneStrategy {
|
||||
def apply(trapExit: List[Class[_ <: Throwable]], maxNrOfRetries: Int, withinTimeRange: Int) =
|
||||
new AllForOneStrategy(trapExit, if (maxNrOfRetries < 0) None else Some(maxNrOfRetries), if (withinTimeRange < 0) None else Some(withinTimeRange))
|
||||
def apply(trapExit: Array[Class[Throwable]], maxNrOfRetries: Int, withinTimeRange: Int) =
|
||||
new AllForOneStrategy(trapExit.toList,maxNrOfRetries,withinTimeRange)
|
||||
}
|
||||
|
||||
case class AllForOneStrategy(trapExit: List[Class[_ <: Throwable]],
|
||||
maxNrOfRetries: Option[Int] = None,
|
||||
withinTimeRange: Option[Int] = None) extends FaultHandlingStrategy {
|
||||
def this(trapExit: List[Class[_ <: Throwable]],maxNrOfRetries: Int, withinTimeRange: Int) =
|
||||
this(trapExit, if (maxNrOfRetries < 0) None else Some(maxNrOfRetries), if (withinTimeRange < 0) None else Some(withinTimeRange))
|
||||
def this(trapExit: Array[Class[Throwable]],maxNrOfRetries: Int, withinTimeRange: Int) =
|
||||
this(trapExit.toList,maxNrOfRetries,withinTimeRange)
|
||||
}
|
||||
case class AllForOneStrategy(maxNrOfRetries: Option[Int] = None, withinTimeRange: Option[Int] = None) extends FaultHandlingStrategy
|
||||
|
||||
object OneForOneStrategy {
|
||||
def apply(maxNrOfRetries: Int, withinTimeRange: Int): OneForOneStrategy =
|
||||
this(if (maxNrOfRetries < 0) None else Some(maxNrOfRetries),
|
||||
if (withinTimeRange < 0) None else Some(withinTimeRange))
|
||||
def apply(trapExit: List[Class[_ <: Throwable]], maxNrOfRetries: Int, withinTimeRange: Int) =
|
||||
new OneForOneStrategy(trapExit, if (maxNrOfRetries < 0) None else Some(maxNrOfRetries), if (withinTimeRange < 0) None else Some(withinTimeRange))
|
||||
def apply(trapExit: Array[Class[Throwable]], maxNrOfRetries: Int, withinTimeRange: Int) =
|
||||
new OneForOneStrategy(trapExit.toList,maxNrOfRetries,withinTimeRange)
|
||||
}
|
||||
|
||||
case class OneForOneStrategy(trapExit: List[Class[_ <: Throwable]],
|
||||
maxNrOfRetries: Option[Int] = None,
|
||||
withinTimeRange: Option[Int] = None) extends FaultHandlingStrategy {
|
||||
def this(trapExit: List[Class[_ <: Throwable]],maxNrOfRetries: Int, withinTimeRange: Int) =
|
||||
this(trapExit, if (maxNrOfRetries < 0) None else Some(maxNrOfRetries), if (withinTimeRange < 0) None else Some(withinTimeRange))
|
||||
def this(trapExit: Array[Class[Throwable]],maxNrOfRetries: Int, withinTimeRange: Int) =
|
||||
this(trapExit.toList,maxNrOfRetries,withinTimeRange)
|
||||
}
|
||||
|
||||
case object NoFaultHandlingStrategy extends FaultHandlingStrategy {
|
||||
def trapExit: List[Class[_ <: Throwable]] = Nil
|
||||
}
|
||||
case class OneForOneStrategy(maxNrOfRetries: Option[Int] = None, withinTimeRange: Option[Int] = None) extends FaultHandlingStrategy
|
||||
|
||||
/**
|
||||
* Configuration classes - not to be used as messages.
|
||||
|
|
@ -32,12 +57,13 @@ object ScalaConfig {
|
|||
|
||||
abstract class Server extends ConfigElement
|
||||
abstract class FailOverScheme extends ConfigElement
|
||||
abstract class Scope extends ConfigElement
|
||||
abstract class LifeCycle extends ConfigElement
|
||||
|
||||
case class SupervisorConfig(restartStrategy: RestartStrategy, worker: List[Server]) extends Server
|
||||
class Supervise(val actorRef: ActorRef, val lifeCycle: LifeCycle, _remoteAddress: RemoteAddress) extends Server {
|
||||
val remoteAddress: Option[RemoteAddress] = if (_remoteAddress eq null) None else Some(_remoteAddress)
|
||||
}
|
||||
|
||||
object Supervise {
|
||||
def apply(actorRef: ActorRef, lifeCycle: LifeCycle, remoteAddress: RemoteAddress) = new Supervise(actorRef, lifeCycle, remoteAddress)
|
||||
def apply(actorRef: ActorRef, lifeCycle: LifeCycle) = new Supervise(actorRef, lifeCycle, null)
|
||||
|
|
@ -53,9 +79,9 @@ object ScalaConfig {
|
|||
case object AllForOne extends FailOverScheme
|
||||
case object OneForOne extends FailOverScheme
|
||||
|
||||
case class LifeCycle(scope: Scope) extends ConfigElement
|
||||
case object Permanent extends Scope
|
||||
case object Temporary extends Scope
|
||||
case object Permanent extends LifeCycle
|
||||
case object Temporary extends LifeCycle
|
||||
case object UndefinedLifeCycle extends LifeCycle
|
||||
|
||||
case class RemoteAddress(val hostname: String, val port: Int) extends ConfigElement
|
||||
|
||||
|
|
@ -139,22 +165,22 @@ object JavaConfig {
|
|||
scheme.transform, maxNrOfRetries, withinTimeRange, trapExceptions.toList)
|
||||
}
|
||||
|
||||
class LifeCycle(@BeanProperty val scope: Scope) extends ConfigElement {
|
||||
def transform = {
|
||||
se.scalablesolutions.akka.config.ScalaConfig.LifeCycle(scope.transform)
|
||||
}
|
||||
abstract class LifeCycle extends ConfigElement {
|
||||
def transform: se.scalablesolutions.akka.config.ScalaConfig.LifeCycle
|
||||
}
|
||||
|
||||
abstract class Scope extends ConfigElement {
|
||||
def transform: se.scalablesolutions.akka.config.ScalaConfig.Scope
|
||||
}
|
||||
class Permanent extends Scope {
|
||||
class Permanent extends LifeCycle {
|
||||
override def transform = se.scalablesolutions.akka.config.ScalaConfig.Permanent
|
||||
}
|
||||
class Temporary extends Scope {
|
||||
|
||||
class Temporary extends LifeCycle {
|
||||
override def transform = se.scalablesolutions.akka.config.ScalaConfig.Temporary
|
||||
}
|
||||
|
||||
class UndefinedLifeCycle extends LifeCycle {
|
||||
override def transform = se.scalablesolutions.akka.config.ScalaConfig.UndefinedLifeCycle
|
||||
}
|
||||
|
||||
abstract class FailOverScheme extends ConfigElement {
|
||||
def transform: se.scalablesolutions.akka.config.ScalaConfig.FailOverScheme
|
||||
}
|
||||
|
|
|
|||
|
|
@ -11,7 +11,7 @@ import se.scalablesolutions.akka.actor.{Actor, ActorRef}
|
|||
import se.scalablesolutions.akka.actor.Actor._
|
||||
import se.scalablesolutions.akka.dispatch.CompletableFuture
|
||||
import se.scalablesolutions.akka.AkkaException
|
||||
import se.scalablesolutions.akka.util.{ Function, SideEffect }
|
||||
import se.scalablesolutions.akka.japi.{ Function, SideEffect }
|
||||
|
||||
/**
|
||||
* Implements Oz-style dataflow (single assignment) variables.
|
||||
|
|
|
|||
|
|
@ -5,13 +5,15 @@
|
|||
package se.scalablesolutions.akka.dispatch
|
||||
|
||||
import se.scalablesolutions.akka.actor.{Actor, ActorRef}
|
||||
import se.scalablesolutions.akka.config.Config.config
|
||||
import net.lag.configgy.ConfigMap
|
||||
import java.util.concurrent.ThreadPoolExecutor.{AbortPolicy, CallerRunsPolicy, DiscardOldestPolicy, DiscardPolicy}
|
||||
import java.util.concurrent.TimeUnit
|
||||
import se.scalablesolutions.akka.config.Config._
|
||||
import se.scalablesolutions.akka.util.{Duration, Logging}
|
||||
import se.scalablesolutions.akka.actor.newUuid
|
||||
|
||||
import net.lag.configgy.ConfigMap
|
||||
|
||||
import java.util.concurrent.ThreadPoolExecutor.{AbortPolicy, CallerRunsPolicy, DiscardOldestPolicy, DiscardPolicy}
|
||||
import java.util.concurrent.TimeUnit
|
||||
|
||||
/**
|
||||
* Scala API. Dispatcher factory.
|
||||
* <p/>
|
||||
|
|
@ -45,14 +47,12 @@ import se.scalablesolutions.akka.actor.newUuid
|
|||
* @author <a href="http://jonasboner.com">Jonas Bonér</a>
|
||||
*/
|
||||
object Dispatchers extends Logging {
|
||||
val THROUGHPUT = config.getInt("akka.actor.throughput", 5)
|
||||
val THROUGHPUT_DEADLINE_MS = config.getInt("akka.actor.throughput-deadline-ms",-1)
|
||||
val MAILBOX_CAPACITY = config.getInt("akka.actor.default-dispatcher.mailbox-capacity", -1)
|
||||
val MAILBOX_CONFIG = MailboxConfig(
|
||||
capacity = Dispatchers.MAILBOX_CAPACITY,
|
||||
pushTimeOut = config.getInt("akka.actor.default-dispatcher.mailbox-push-timeout-ms").map(Duration(_,TimeUnit.MILLISECONDS)),
|
||||
blockingDequeue = false
|
||||
)
|
||||
val THROUGHPUT = config.getInt("akka.actor.throughput", 5)
|
||||
val MAILBOX_CAPACITY = config.getInt("akka.actor.default-dispatcher.mailbox-capacity", -1)
|
||||
val MAILBOX_PUSH_TIME_OUT = Duration(config.getInt("akka.actor.default-dispatcher.mailbox-push-timeout-time", 10), TIME_UNIT)
|
||||
val THROUGHPUT_DEADLINE_TIME = Duration(config.getInt("akka.actor.throughput-deadline-time",-1), TIME_UNIT)
|
||||
val THROUGHPUT_DEADLINE_TIME_MILLIS = THROUGHPUT_DEADLINE_TIME.toMillis.toInt
|
||||
val MAILBOX_TYPE = if (MAILBOX_CAPACITY < 0) UnboundedMailbox() else BoundedMailbox()
|
||||
|
||||
lazy val defaultGlobalDispatcher = {
|
||||
config.getConfigMap("akka.actor.default-dispatcher").flatMap(from).getOrElse(globalExecutorBasedEventDrivenDispatcher)
|
||||
|
|
@ -60,7 +60,8 @@ object Dispatchers extends Logging {
|
|||
|
||||
object globalHawtDispatcher extends HawtDispatcher
|
||||
|
||||
object globalExecutorBasedEventDrivenDispatcher extends ExecutorBasedEventDrivenDispatcher("global",THROUGHPUT,THROUGHPUT_DEADLINE_MS,MAILBOX_CONFIG) {
|
||||
object globalExecutorBasedEventDrivenDispatcher extends ExecutorBasedEventDrivenDispatcher(
|
||||
"global", THROUGHPUT, THROUGHPUT_DEADLINE_TIME_MILLIS, MAILBOX_TYPE) {
|
||||
override def register(actor: ActorRef) = {
|
||||
if (isShutdown) init
|
||||
super.register(actor)
|
||||
|
|
@ -82,7 +83,7 @@ object Dispatchers extends Logging {
|
|||
* <p/>
|
||||
* E.g. each actor consumes its own thread.
|
||||
*/
|
||||
def newThreadBasedDispatcher(actor: ActorRef) = new ThreadBasedDispatcher(actor)
|
||||
def newThreadBasedDispatcher(actor: ActorRef) = new ThreadBasedDispatcher(actor, BoundedMailbox(true))
|
||||
|
||||
/**
|
||||
* Creates an thread based dispatcher serving a single actor through the same single thread.
|
||||
|
|
@ -97,36 +98,32 @@ object Dispatchers extends Logging {
|
|||
* <p/>
|
||||
* E.g. each actor consumes its own thread.
|
||||
*/
|
||||
def newThreadBasedDispatcher(actor: ActorRef, mailboxCapacity: Int, pushTimeOut: Duration) = new ThreadBasedDispatcher(actor, MailboxConfig(mailboxCapacity,Option(pushTimeOut),true))
|
||||
def newThreadBasedDispatcher(actor: ActorRef, mailboxCapacity: Int, pushTimeOut: Duration) =
|
||||
new ThreadBasedDispatcher(actor, mailboxCapacity, pushTimeOut)
|
||||
|
||||
/**
|
||||
* Creates a executor-based event-driven dispatcher serving multiple (millions) of actors through a thread pool.
|
||||
* <p/>
|
||||
* Has a fluent builder interface for configuring its semantics.
|
||||
*/
|
||||
def newExecutorBasedEventDrivenDispatcher(name: String) = new ExecutorBasedEventDrivenDispatcher(name, THROUGHPUT)
|
||||
def newExecutorBasedEventDrivenDispatcher(name: String) = new ExecutorBasedEventDrivenDispatcher(name)
|
||||
|
||||
/**
|
||||
* Creates a executor-based event-driven dispatcher serving multiple (millions) of actors through a thread pool.
|
||||
* <p/>
|
||||
* Has a fluent builder interface for configuring its semantics.
|
||||
*/
|
||||
def newExecutorBasedEventDrivenDispatcher(name: String, throughput: Int) = new ExecutorBasedEventDrivenDispatcher(name, throughput)
|
||||
def newExecutorBasedEventDrivenDispatcher(name: String, throughput: Int, mailboxType: MailboxType) =
|
||||
new ExecutorBasedEventDrivenDispatcher(name, throughput, mailboxType)
|
||||
|
||||
|
||||
/**
|
||||
* Creates a executor-based event-driven dispatcher serving multiple (millions) of actors through a thread pool.
|
||||
* <p/>
|
||||
* Has a fluent builder interface for configuring its semantics.
|
||||
*/
|
||||
def newExecutorBasedEventDrivenDispatcher(name: String, throughput: Int, throughputDeadlineMs: Int, mailboxCapacity: Int) = new ExecutorBasedEventDrivenDispatcher(name, throughput, throughputDeadlineMs, mailboxCapacity)
|
||||
|
||||
/**
|
||||
* Creates a executor-based event-driven dispatcher serving multiple (millions) of actors through a thread pool.
|
||||
* <p/>
|
||||
* Has a fluent builder interface for configuring its semantics.
|
||||
*/
|
||||
def newExecutorBasedEventDrivenDispatcher(name: String, throughput: Int, throughputDeadlineMs: Int, mailboxCapacity: Int, pushTimeOut: Duration) = new ExecutorBasedEventDrivenDispatcher(name, throughput, throughputDeadlineMs, MailboxConfig(mailboxCapacity,Some(pushTimeOut),false))
|
||||
|
||||
def newExecutorBasedEventDrivenDispatcher(name: String, throughput: Int, throughputDeadlineMs: Int, mailboxType: MailboxType) =
|
||||
new ExecutorBasedEventDrivenDispatcher(name, throughput, throughputDeadlineMs, mailboxType)
|
||||
|
||||
/**
|
||||
* Creates a executor-based event-driven dispatcher with work stealing (TODO: better doc) serving multiple (millions) of actors through a thread pool.
|
||||
|
|
@ -140,7 +137,8 @@ object Dispatchers extends Logging {
|
|||
* <p/>
|
||||
* Has a fluent builder interface for configuring its semantics.
|
||||
*/
|
||||
def newExecutorBasedEventDrivenWorkStealingDispatcher(name: String, mailboxCapacity: Int) = new ExecutorBasedEventDrivenWorkStealingDispatcher(name, mailboxCapacity)
|
||||
def newExecutorBasedEventDrivenWorkStealingDispatcher(name: String, mailboxType: MailboxType) =
|
||||
new ExecutorBasedEventDrivenWorkStealingDispatcher(name, mailboxType = mailboxType)
|
||||
|
||||
/**
|
||||
* Utility function that tries to load the specified dispatcher config from the akka.conf
|
||||
|
|
@ -156,7 +154,7 @@ object Dispatchers extends Logging {
|
|||
* type = "GlobalExecutorBasedEventDriven" # Must be one of the following, all "Global*" are non-configurable
|
||||
* # (ExecutorBasedEventDrivenWorkStealing), ExecutorBasedEventDriven,
|
||||
* # Hawt, GlobalExecutorBasedEventDriven, GlobalHawt
|
||||
* keep-alive-ms = 60000 # Keep alive time for threads
|
||||
* keep-alive-time = 60 # Keep alive time for threads
|
||||
* core-pool-size-factor = 1.0 # No of core threads ... ceil(available processors * factor)
|
||||
* max-pool-size-factor = 4.0 # Max no of threads ... ceil(available processors * factor)
|
||||
* executor-bounds = -1 # Makes the Executor bounded, -1 is unbounded
|
||||
|
|
@ -176,7 +174,7 @@ object Dispatchers extends Logging {
|
|||
|
||||
def threadPoolConfig(b: ThreadPoolBuilder) {
|
||||
b.configureIfPossible( builder => {
|
||||
cfg.getInt("keep-alive-ms").foreach(builder.setKeepAliveTimeInMillis(_))
|
||||
cfg.getInt("keep-alive-time").foreach(time => builder.setKeepAliveTimeInMillis(Duration(time, TIME_UNIT).toMillis.toInt))
|
||||
cfg.getDouble("core-pool-size-factor").foreach(builder.setCorePoolSizeFromFactor(_))
|
||||
cfg.getDouble("max-pool-size-factor").foreach(builder.setMaxPoolSizeFromFactor(_))
|
||||
cfg.getInt("executor-bounds").foreach(builder.setExecutorBounds(_))
|
||||
|
|
@ -193,37 +191,27 @@ object Dispatchers extends Logging {
|
|||
})
|
||||
}
|
||||
|
||||
lazy val mailboxBounds: MailboxConfig = {
|
||||
val capacity = cfg.getInt("mailbox-capacity",Dispatchers.MAILBOX_CAPACITY)
|
||||
val timeout = cfg.getInt("mailbox-push-timeout-ms").map(Duration(_,TimeUnit.MILLISECONDS))
|
||||
MailboxConfig(capacity,timeout,false)
|
||||
lazy val mailboxType: MailboxType = {
|
||||
val capacity = cfg.getInt("mailbox-capacity", MAILBOX_CAPACITY)
|
||||
// FIXME how do we read in isBlocking for mailbox? Now set to 'false'.
|
||||
if (capacity < 0) UnboundedMailbox()
|
||||
else BoundedMailbox(false, capacity, Duration(cfg.getInt("mailbox-push-timeout", MAILBOX_PUSH_TIME_OUT.toMillis.toInt), TIME_UNIT))
|
||||
}
|
||||
|
||||
val dispatcher: Option[MessageDispatcher] = cfg.getString("type") map {
|
||||
case "ExecutorBasedEventDrivenWorkStealing" =>
|
||||
new ExecutorBasedEventDrivenWorkStealingDispatcher(name,MAILBOX_CAPACITY,threadPoolConfig)
|
||||
|
||||
cfg.getString("type") map {
|
||||
case "ExecutorBasedEventDriven" =>
|
||||
new ExecutorBasedEventDrivenDispatcher(
|
||||
name,
|
||||
cfg.getInt("throughput",THROUGHPUT),
|
||||
cfg.getInt("throughput-deadline-ms",THROUGHPUT_DEADLINE_MS),
|
||||
mailboxBounds,
|
||||
cfg.getInt("throughput", THROUGHPUT),
|
||||
cfg.getInt("throughput-deadline", THROUGHPUT_DEADLINE_TIME_MILLIS),
|
||||
mailboxType,
|
||||
threadPoolConfig)
|
||||
|
||||
case "Hawt" =>
|
||||
new HawtDispatcher(cfg.getBool("aggregate").getOrElse(true))
|
||||
|
||||
case "GlobalExecutorBasedEventDriven" =>
|
||||
globalExecutorBasedEventDrivenDispatcher
|
||||
|
||||
case "GlobalHawt" =>
|
||||
globalHawtDispatcher
|
||||
|
||||
case unknown =>
|
||||
throw new IllegalArgumentException("Unknown dispatcher type [%s]" format unknown)
|
||||
case "ExecutorBasedEventDrivenWorkStealing" => new ExecutorBasedEventDrivenWorkStealingDispatcher(name, mailboxType, threadPoolConfig)
|
||||
case "Hawt" => new HawtDispatcher(cfg.getBool("aggregate").getOrElse(true))
|
||||
case "GlobalExecutorBasedEventDriven" => globalExecutorBasedEventDrivenDispatcher
|
||||
case "GlobalHawt" => globalHawtDispatcher
|
||||
case unknown => throw new IllegalArgumentException("Unknown dispatcher type [%s]" format unknown)
|
||||
}
|
||||
|
||||
dispatcher
|
||||
}
|
||||
}
|
||||
|
|
|
|||
|
|
@ -5,6 +5,7 @@
|
|||
package se.scalablesolutions.akka.dispatch
|
||||
|
||||
import se.scalablesolutions.akka.actor.{ActorRef, IllegalActorStateException}
|
||||
import se.scalablesolutions.akka.util.ReflectiveAccess.EnterpriseModule
|
||||
|
||||
import java.util.Queue
|
||||
import java.util.concurrent.{RejectedExecutionException, ConcurrentLinkedQueue, LinkedBlockingQueue}
|
||||
|
|
@ -65,103 +66,67 @@ import java.util.concurrent.{RejectedExecutionException, ConcurrentLinkedQueue,
|
|||
class ExecutorBasedEventDrivenDispatcher(
|
||||
_name: String,
|
||||
val throughput: Int = Dispatchers.THROUGHPUT,
|
||||
val throughputDeadlineMs: Int = Dispatchers.THROUGHPUT_DEADLINE_MS,
|
||||
mailboxConfig: MailboxConfig = Dispatchers.MAILBOX_CONFIG,
|
||||
config: (ThreadPoolBuilder) => Unit = _ => ()) extends MessageDispatcher with ThreadPoolBuilder {
|
||||
val throughputDeadlineTime: Int = Dispatchers.THROUGHPUT_DEADLINE_TIME_MILLIS,
|
||||
_mailboxType: MailboxType = Dispatchers.MAILBOX_TYPE,
|
||||
config: (ThreadPoolBuilder) => Unit = _ => ())
|
||||
extends MessageDispatcher with ThreadPoolBuilder {
|
||||
|
||||
def this(_name: String, throughput: Int, throughputDeadlineMs: Int, capacity: Int) = this(_name,throughput,throughputDeadlineMs,MailboxConfig(capacity,None,false))
|
||||
def this(_name: String, throughput: Int) = this(_name, throughput, Dispatchers.THROUGHPUT_DEADLINE_MS, Dispatchers.MAILBOX_CAPACITY) // Needed for Java API usage
|
||||
def this(_name: String) = this(_name,Dispatchers.THROUGHPUT, Dispatchers.THROUGHPUT_DEADLINE_MS,Dispatchers.MAILBOX_CAPACITY) // Needed for Java API usage
|
||||
def this(_name: String, throughput: Int, throughputDeadlineTime: Int, mailboxType: MailboxType) =
|
||||
this(_name, throughput, throughputDeadlineTime, mailboxType, _ => ()) // Needed for Java API usage
|
||||
|
||||
//FIXME remove this from ThreadPoolBuilder
|
||||
mailboxCapacity = mailboxConfig.capacity
|
||||
def this(_name: String, throughput: Int, mailboxType: MailboxType) =
|
||||
this(_name, throughput, Dispatchers.THROUGHPUT_DEADLINE_TIME_MILLIS, mailboxType) // Needed for Java API usage
|
||||
|
||||
@volatile private var active: Boolean = false
|
||||
def this(_name: String, throughput: Int) =
|
||||
this(_name, throughput, Dispatchers.THROUGHPUT_DEADLINE_TIME_MILLIS, Dispatchers.MAILBOX_TYPE) // Needed for Java API usage
|
||||
|
||||
def this(_name: String) =
|
||||
this(_name, Dispatchers.THROUGHPUT, Dispatchers.THROUGHPUT_DEADLINE_TIME_MILLIS, Dispatchers.MAILBOX_TYPE) // Needed for Java API usage
|
||||
|
||||
val mailboxType = Some(_mailboxType)
|
||||
|
||||
@volatile private[akka] var active = false
|
||||
|
||||
val name = "akka:event-driven:dispatcher:" + _name
|
||||
init
|
||||
|
||||
/**
|
||||
* This is the behavior of an ExecutorBasedEventDrivenDispatchers mailbox
|
||||
*/
|
||||
trait ExecutableMailbox extends Runnable { self: MessageQueue =>
|
||||
final def run = {
|
||||
|
||||
val reschedule = try {
|
||||
processMailbox()
|
||||
} finally {
|
||||
dispatcherLock.unlock()
|
||||
}
|
||||
|
||||
if (reschedule || !self.isEmpty)
|
||||
registerForExecution(self)
|
||||
}
|
||||
|
||||
/**
|
||||
* Process the messages in the mailbox
|
||||
*
|
||||
* @return true if the processing finished before the mailbox was empty, due to the throughput constraint
|
||||
*/
|
||||
final def processMailbox(): Boolean = {
|
||||
var nextMessage = self.dequeue
|
||||
if (nextMessage ne null) {
|
||||
val throttle = throughput > 0
|
||||
var processedMessages = 0
|
||||
val isDeadlineEnabled = throttle && throughputDeadlineMs > 0
|
||||
val started = if (isDeadlineEnabled) System.currentTimeMillis else 0
|
||||
|
||||
do {
|
||||
nextMessage.invoke
|
||||
|
||||
if(throttle) { //Will be elided when false
|
||||
processedMessages += 1
|
||||
if ((processedMessages >= throughput)
|
||||
|| (isDeadlineEnabled && (System.currentTimeMillis - started) >= throughputDeadlineMs)) //If we're throttled, break out
|
||||
return !self.isEmpty
|
||||
}
|
||||
nextMessage = self.dequeue
|
||||
}
|
||||
while (nextMessage ne null)
|
||||
}
|
||||
|
||||
false
|
||||
}
|
||||
}
|
||||
|
||||
def dispatch(invocation: MessageInvocation) = {
|
||||
val mbox = getMailbox(invocation.receiver)
|
||||
mbox enqueue invocation
|
||||
registerForExecution(mbox)
|
||||
}
|
||||
|
||||
protected def registerForExecution(mailbox: MessageQueue with ExecutableMailbox): Unit = if (active) {
|
||||
if (mailbox.dispatcherLock.tryLock()) {
|
||||
try {
|
||||
executor execute mailbox
|
||||
} catch {
|
||||
case e: RejectedExecutionException =>
|
||||
mailbox.dispatcherLock.unlock()
|
||||
throw e
|
||||
}
|
||||
}
|
||||
} else {
|
||||
log.warning("%s is shut down,\n\tignoring the rest of the messages in the mailbox of\n\t%s", toString, mailbox)
|
||||
mbox.registerForExecution
|
||||
}
|
||||
|
||||
/**
|
||||
* @return the mailbox associated with the actor
|
||||
*/
|
||||
private def getMailbox(receiver: ActorRef) = receiver.mailbox.asInstanceOf[MessageQueue with ExecutableMailbox]
|
||||
private def getMailbox(receiver: ActorRef) = {
|
||||
val mb = receiver.mailbox.asInstanceOf[MessageQueue with ExecutableMailbox]
|
||||
mb.register(this)
|
||||
mb
|
||||
}
|
||||
|
||||
override def mailboxSize(actorRef: ActorRef) = getMailbox(actorRef).size
|
||||
|
||||
override def createMailbox(actorRef: ActorRef): AnyRef = {
|
||||
if (mailboxCapacity > 0)
|
||||
new DefaultBoundedMessageQueue(mailboxCapacity,mailboxConfig.pushTimeOut,blockDequeue = false) with ExecutableMailbox
|
||||
else
|
||||
new DefaultUnboundedMessageQueue(blockDequeue = false) with ExecutableMailbox
|
||||
def createTransientMailbox(actorRef: ActorRef, mailboxType: TransientMailboxType): AnyRef = mailboxType match {
|
||||
case UnboundedMailbox(blocking) =>
|
||||
new DefaultUnboundedMessageQueue(blocking) with ExecutableMailbox
|
||||
case BoundedMailbox(blocking, capacity, pushTimeOut) =>
|
||||
val cap = if (mailboxCapacity == -1) capacity else mailboxCapacity
|
||||
new DefaultBoundedMessageQueue(cap, pushTimeOut, blocking) with ExecutableMailbox
|
||||
}
|
||||
|
||||
/**
|
||||
* Creates and returns a durable mailbox for the given actor.
|
||||
*/
|
||||
def createDurableMailbox(actorRef: ActorRef, mailboxType: DurableMailboxType): AnyRef = mailboxType match {
|
||||
// FIXME make generic (work for TypedActor as well)
|
||||
case FileBasedDurableMailbox(serializer) => EnterpriseModule.createFileBasedMailbox(actorRef).asInstanceOf[MessageQueue]
|
||||
case ZooKeeperBasedDurableMailbox(serializer) => EnterpriseModule.createZooKeeperBasedMailbox(actorRef).asInstanceOf[MessageQueue]
|
||||
case BeanstalkBasedDurableMailbox(serializer) => EnterpriseModule.createBeanstalkBasedMailbox(actorRef).asInstanceOf[MessageQueue]
|
||||
case RedisBasedDurableMailbox(serializer) => EnterpriseModule.createRedisBasedMailbox(actorRef).asInstanceOf[MessageQueue]
|
||||
case AMQPBasedDurableMailbox(serializer) => throw new UnsupportedOperationException("AMQPBasedDurableMailbox is not yet supported")
|
||||
case JMSBasedDurableMailbox(serializer) => throw new UnsupportedOperationException("JMSBasedDurableMailbox is not yet supported")
|
||||
}
|
||||
|
||||
def start = if (!active) {
|
||||
log.debug("Starting up %s\n\twith throughput [%d]", toString, throughput)
|
||||
|
|
@ -188,4 +153,69 @@ class ExecutorBasedEventDrivenDispatcher(
|
|||
config(this)
|
||||
buildThreadPool
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* This is the behavior of an ExecutorBasedEventDrivenDispatchers mailbox.
|
||||
*/
|
||||
trait ExecutableMailbox extends Runnable { self: MessageQueue =>
|
||||
|
||||
private var _dispatcher: Option[ExecutorBasedEventDrivenDispatcher] = None
|
||||
|
||||
def register(md: ExecutorBasedEventDrivenDispatcher) = _dispatcher = Some(md)
|
||||
def dispatcher: ExecutorBasedEventDrivenDispatcher = _dispatcher.getOrElse(
|
||||
throw new IllegalActorStateException("mailbox.register(dispatcher) has not been invoked"))
|
||||
|
||||
final def run = {
|
||||
val reschedule = try {
|
||||
processMailbox()
|
||||
} finally {
|
||||
dispatcherLock.unlock()
|
||||
}
|
||||
if (reschedule || !self.isEmpty) registerForExecution
|
||||
}
|
||||
|
||||
/**
|
||||
* Process the messages in the mailbox
|
||||
*
|
||||
* @return true if the processing finished before the mailbox was empty, due to the throughput constraint
|
||||
*/
|
||||
final def processMailbox(): Boolean = {
|
||||
var nextMessage = self.dequeue
|
||||
if (nextMessage ne null) {
|
||||
val throttle = dispatcher.throughput > 0
|
||||
var processedMessages = 0
|
||||
val isDeadlineEnabled = throttle && dispatcher.throughputDeadlineTime > 0
|
||||
val started = if (isDeadlineEnabled) System.currentTimeMillis else 0
|
||||
do {
|
||||
nextMessage.invoke
|
||||
|
||||
if (nextMessage.receiver.isBeingRestarted)
|
||||
return !self.isEmpty
|
||||
|
||||
if (throttle) { // Will be elided when false
|
||||
processedMessages += 1
|
||||
if ((processedMessages >= dispatcher.throughput) ||
|
||||
(isDeadlineEnabled && (System.currentTimeMillis - started) >= dispatcher.throughputDeadlineTime)) // If we're throttled, break out
|
||||
return !self.isEmpty
|
||||
}
|
||||
nextMessage = self.dequeue
|
||||
} while (nextMessage ne null)
|
||||
}
|
||||
false
|
||||
}
|
||||
|
||||
|
||||
def registerForExecution: Unit = if (dispatcher.active) {
|
||||
if (dispatcherLock.tryLock()) {
|
||||
try {
|
||||
dispatcher.execute(this)
|
||||
} catch {
|
||||
case e: RejectedExecutionException =>
|
||||
dispatcherLock.unlock()
|
||||
throw e
|
||||
}
|
||||
}
|
||||
} else dispatcher.log.warning("%s is shut down,\n\tignoring the rest of the messages in the mailbox of\n\t%s", toString, this)
|
||||
}
|
||||
|
||||
|
|
|
|||
|
|
@ -31,13 +31,15 @@ import se.scalablesolutions.akka.actor.{Actor, ActorRef, IllegalActorStateExcept
|
|||
*/
|
||||
class ExecutorBasedEventDrivenWorkStealingDispatcher(
|
||||
_name: String,
|
||||
capacity: Int = Dispatchers.MAILBOX_CAPACITY,
|
||||
_mailboxType: MailboxType = Dispatchers.MAILBOX_TYPE,
|
||||
config: (ThreadPoolBuilder) => Unit = _ => ()) extends MessageDispatcher with ThreadPoolBuilder {
|
||||
|
||||
def this(_name: String, capacity: Int) = this(_name,capacity, _ => ())
|
||||
|
||||
mailboxCapacity = capacity
|
||||
def this(_name: String, mailboxType: MailboxType) = this(_name, mailboxType, _ => ())
|
||||
|
||||
def this(_name: String) = this(_name, Dispatchers.MAILBOX_TYPE, _ => ())
|
||||
|
||||
val mailboxType = Some(_mailboxType)
|
||||
|
||||
@volatile private var active: Boolean = false
|
||||
|
||||
implicit def actorRef2actor(actorRef: ActorRef): Actor = actorRef.actor
|
||||
|
|
@ -73,33 +75,36 @@ class ExecutorBasedEventDrivenWorkStealingDispatcher(
|
|||
* @return true if the mailbox was processed, false otherwise
|
||||
*/
|
||||
private def tryProcessMailbox(mailbox: MessageQueue): Boolean = {
|
||||
var lockAcquiredOnce = false
|
||||
var mailboxWasProcessed = false
|
||||
|
||||
// this do-wile loop is required to prevent missing new messages between the end of processing
|
||||
// the mailbox and releasing the lock
|
||||
do {
|
||||
if (mailbox.dispatcherLock.tryLock) {
|
||||
lockAcquiredOnce = true
|
||||
try {
|
||||
processMailbox(mailbox)
|
||||
mailboxWasProcessed = processMailbox(mailbox)
|
||||
} finally {
|
||||
mailbox.dispatcherLock.unlock
|
||||
}
|
||||
}
|
||||
} while ((lockAcquiredOnce && !mailbox.isEmpty))
|
||||
} while ((mailboxWasProcessed && !mailbox.isEmpty))
|
||||
|
||||
lockAcquiredOnce
|
||||
mailboxWasProcessed
|
||||
}
|
||||
|
||||
/**
|
||||
* Process the messages in the mailbox of the given actor.
|
||||
* @return
|
||||
*/
|
||||
private def processMailbox(mailbox: MessageQueue) = {
|
||||
private def processMailbox(mailbox: MessageQueue): Boolean = {
|
||||
var messageInvocation = mailbox.dequeue
|
||||
while (messageInvocation ne null) {
|
||||
messageInvocation.invoke
|
||||
if (messageInvocation.receiver.isBeingRestarted)
|
||||
return false
|
||||
messageInvocation = mailbox.dequeue
|
||||
}
|
||||
true
|
||||
}
|
||||
|
||||
private def findThief(receiver: ActorRef): Option[ActorRef] = {
|
||||
|
|
@ -182,35 +187,45 @@ class ExecutorBasedEventDrivenWorkStealingDispatcher(
|
|||
buildThreadPool
|
||||
}
|
||||
|
||||
protected override def createMailbox(actorRef: ActorRef): AnyRef = {
|
||||
if (mailboxCapacity <= 0) {
|
||||
def createTransientMailbox(actorRef: ActorRef, mailboxType: TransientMailboxType): AnyRef = mailboxType match {
|
||||
case UnboundedMailbox(blocking) => // FIXME make use of 'blocking' in work stealer ConcurrentLinkedDeque
|
||||
new ConcurrentLinkedDeque[MessageInvocation] with MessageQueue with Runnable {
|
||||
def enqueue(handle: MessageInvocation): Unit = this.add(handle)
|
||||
|
||||
def dequeue: MessageInvocation = this.poll()
|
||||
|
||||
def run = {
|
||||
if (!tryProcessMailbox(this)) {
|
||||
// we are not able to process our mailbox (another thread is busy with it), so lets donate some of our mailbox
|
||||
// to another actor and then process his mailbox in stead.
|
||||
findThief(actorRef).foreach( tryDonateAndProcessMessages(actorRef,_) )
|
||||
}
|
||||
def run = if (!tryProcessMailbox(this)) {
|
||||
// we are not able to process our mailbox (another thread is busy with it), so lets donate some of our mailbox
|
||||
// to another actor and then process his mailbox in stead.
|
||||
findThief(actorRef).foreach( tryDonateAndProcessMessages(actorRef,_) )
|
||||
}
|
||||
}
|
||||
}
|
||||
else {
|
||||
new LinkedBlockingDeque[MessageInvocation](mailboxCapacity) with MessageQueue with Runnable {
|
||||
case BoundedMailbox(blocking, capacity, pushTimeOut) =>
|
||||
val cap = if (mailboxCapacity == -1) capacity else mailboxCapacity
|
||||
new LinkedBlockingDeque[MessageInvocation](cap) with MessageQueue with Runnable {
|
||||
def enqueue(handle: MessageInvocation): Unit = this.add(handle)
|
||||
|
||||
def dequeue: MessageInvocation = this.poll()
|
||||
|
||||
def run = {
|
||||
if (!tryProcessMailbox(this)) {
|
||||
// we are not able to process our mailbox (another thread is busy with it), so lets donate some of our mailbox
|
||||
// to another actor and then process his mailbox in stead.
|
||||
findThief(actorRef).foreach( tryDonateAndProcessMessages(actorRef,_) )
|
||||
}
|
||||
def run = if (!tryProcessMailbox(this)) {
|
||||
// we are not able to process our mailbox (another thread is busy with it), so lets donate some of our mailbox
|
||||
// to another actor and then process his mailbox in stead.
|
||||
findThief(actorRef).foreach( tryDonateAndProcessMessages(actorRef, _) )
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Creates and returns a durable mailbox for the given actor.
|
||||
*/
|
||||
protected def createDurableMailbox(actorRef: ActorRef, mailboxType: DurableMailboxType): AnyRef = mailboxType match {
|
||||
// FIXME make generic (work for TypedActor as well)
|
||||
case FileBasedDurableMailbox(serializer) => throw new UnsupportedOperationException("FileBasedDurableMailbox is not yet supported for ExecutorBasedEventDrivenWorkStealingDispatcher")
|
||||
case ZooKeeperBasedDurableMailbox(serializer) => throw new UnsupportedOperationException("ZooKeeperBasedDurableMailbox is not yet supported for ExecutorBasedEventDrivenWorkStealingDispatcher")
|
||||
case BeanstalkBasedDurableMailbox(serializer) => throw new UnsupportedOperationException("BeanstalkBasedDurableMailbox is not yet supported for ExecutorBasedEventDrivenWorkStealingDispatcher")
|
||||
case RedisBasedDurableMailbox(serializer) => throw new UnsupportedOperationException("RedisBasedDurableMailbox is not yet supported for ExecutorBasedEventDrivenWorkStealingDispatcher")
|
||||
case AMQPBasedDurableMailbox(serializer) => throw new UnsupportedOperationException("AMQPBasedDurableMailbox is not yet supported for ExecutorBasedEventDrivenWorkStealingDispatcher")
|
||||
case JMSBasedDurableMailbox(serializer) => throw new UnsupportedOperationException("JMSBasedDurableMailbox is not yet supported for ExecutorBasedEventDrivenWorkStealingDispatcher")
|
||||
}
|
||||
|
||||
override def register(actorRef: ActorRef) = {
|
||||
|
|
|
|||
|
|
@ -23,6 +23,7 @@ object Futures {
|
|||
*/
|
||||
def future[T](timeout: Long)(body: => T): Future[T] = {
|
||||
val promise = new DefaultCompletableFuture[T](timeout)
|
||||
|
||||
try {
|
||||
promise completeWithResult body
|
||||
} catch {
|
||||
|
|
|
|||
|
|
@ -15,49 +15,41 @@ import java.util.concurrent.atomic.{AtomicInteger, AtomicBoolean}
|
|||
import java.util.concurrent.CountDownLatch
|
||||
|
||||
/**
|
||||
* Holds helper methods for working with actors that are using
|
||||
* a HawtDispatcher as it's dispatcher.
|
||||
* Holds helper methods for working with actors that are using a HawtDispatcher as it's dispatcher.
|
||||
*/
|
||||
object HawtDispatcher {
|
||||
|
||||
private val retained = new AtomicInteger()
|
||||
|
||||
@volatile private var shutdownLatch: CountDownLatch = _
|
||||
|
||||
private def retainNonDaemon = {
|
||||
if( retained.getAndIncrement == 0 ) {
|
||||
shutdownLatch = new CountDownLatch(1)
|
||||
new Thread("HawtDispatch Non-Daemon") {
|
||||
override def run = {
|
||||
try {
|
||||
shutdownLatch.await
|
||||
} catch {
|
||||
case _ =>
|
||||
}
|
||||
private def retainNonDaemon = if (retained.getAndIncrement == 0) {
|
||||
shutdownLatch = new CountDownLatch(1)
|
||||
new Thread("HawtDispatch Non-Daemon") {
|
||||
override def run = {
|
||||
try {
|
||||
shutdownLatch.await
|
||||
} catch {
|
||||
case _ =>
|
||||
}
|
||||
}.start()
|
||||
}
|
||||
}
|
||||
}.start()
|
||||
}
|
||||
|
||||
private def releaseNonDaemon = {
|
||||
if( retained.decrementAndGet == 0 ) {
|
||||
shutdownLatch.countDown
|
||||
shutdownLatch = null
|
||||
}
|
||||
private def releaseNonDaemon = if (retained.decrementAndGet == 0) {
|
||||
shutdownLatch.countDown
|
||||
shutdownLatch = null
|
||||
}
|
||||
|
||||
/**
|
||||
* @return the mailbox associated with the actor
|
||||
*/
|
||||
private def mailbox(actorRef: ActorRef) = {
|
||||
actorRef.mailbox.asInstanceOf[HawtDispatcherMailbox]
|
||||
}
|
||||
private def mailbox(actorRef: ActorRef) = actorRef.mailbox.asInstanceOf[HawtDispatcherMailbox]
|
||||
|
||||
/**
|
||||
* @return the dispatch queue associated with the actor
|
||||
*/
|
||||
def queue(actorRef: ActorRef) = {
|
||||
mailbox(actorRef).queue
|
||||
}
|
||||
def queue(actorRef: ActorRef) = mailbox(actorRef).queue
|
||||
|
||||
/**
|
||||
* <p>
|
||||
|
|
@ -71,13 +63,11 @@ object HawtDispatcher {
|
|||
*
|
||||
* @return true if the actor was pinned
|
||||
*/
|
||||
def pin(actorRef: ActorRef) = {
|
||||
actorRef.mailbox match {
|
||||
case x:HawtDispatcherMailbox=>
|
||||
x.queue.setTargetQueue( getRandomThreadQueue )
|
||||
true
|
||||
case _ => false
|
||||
}
|
||||
def pin(actorRef: ActorRef) = actorRef.mailbox match {
|
||||
case x: HawtDispatcherMailbox =>
|
||||
x.queue.setTargetQueue( getRandomThreadQueue )
|
||||
true
|
||||
case _ => false
|
||||
}
|
||||
|
||||
/**
|
||||
|
|
@ -91,19 +81,14 @@ object HawtDispatcher {
|
|||
* </p>
|
||||
* @return true if the actor was unpinned
|
||||
*/
|
||||
def unpin(actorRef: ActorRef) = {
|
||||
target(actorRef, globalQueue)
|
||||
}
|
||||
def unpin(actorRef: ActorRef) = target(actorRef, globalQueue)
|
||||
|
||||
/**
|
||||
* @return true if the actor was pinned to a thread.
|
||||
*/
|
||||
def pinned(actorRef: ActorRef):Boolean = {
|
||||
actorRef.mailbox match {
|
||||
case x:HawtDispatcherMailbox=>
|
||||
x.queue.getTargetQueue.getQueueType == QueueType.THREAD_QUEUE
|
||||
case _ => false
|
||||
}
|
||||
def pinned(actorRef: ActorRef):Boolean = actorRef.mailbox match {
|
||||
case x: HawtDispatcherMailbox => x.queue.getTargetQueue.getQueueType == QueueType.THREAD_QUEUE
|
||||
case _ => false
|
||||
}
|
||||
|
||||
/**
|
||||
|
|
@ -117,15 +102,12 @@ object HawtDispatcher {
|
|||
* </p>
|
||||
* @return true if the actor was unpinned
|
||||
*/
|
||||
def target(actorRef: ActorRef, parent:DispatchQueue) = {
|
||||
actorRef.mailbox match {
|
||||
case x:HawtDispatcherMailbox=>
|
||||
x.queue.setTargetQueue( parent )
|
||||
true
|
||||
case _ => false
|
||||
}
|
||||
def target(actorRef: ActorRef, parent: DispatchQueue) = actorRef.mailbox match {
|
||||
case x: HawtDispatcherMailbox =>
|
||||
x.queue.setTargetQueue(parent)
|
||||
true
|
||||
case _ => false
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
/**
|
||||
|
|
@ -156,25 +138,22 @@ object HawtDispatcher {
|
|||
*
|
||||
* @author <a href="http://hiramchirino.com">Hiram Chirino</a>
|
||||
*/
|
||||
class HawtDispatcher(val aggregate:Boolean=true, val parent:DispatchQueue=globalQueue) extends MessageDispatcher {
|
||||
class HawtDispatcher(val aggregate: Boolean = true, val parent: DispatchQueue = globalQueue) extends MessageDispatcher {
|
||||
import HawtDispatcher._
|
||||
|
||||
private val active = new AtomicBoolean(false)
|
||||
|
||||
def start = {
|
||||
if( active.compareAndSet(false, true) ) {
|
||||
retainNonDaemon
|
||||
}
|
||||
}
|
||||
val mailboxType: Option[MailboxType] = None
|
||||
|
||||
def start = if (active.compareAndSet(false, true)) retainNonDaemon
|
||||
|
||||
def shutdown = {
|
||||
if( active.compareAndSet(true, false) ) {
|
||||
releaseNonDaemon
|
||||
}
|
||||
}
|
||||
def execute(task: Runnable) {}
|
||||
|
||||
def shutdown = if (active.compareAndSet(true, false)) releaseNonDaemon
|
||||
|
||||
def isShutdown = !active.get
|
||||
|
||||
def dispatch(invocation: MessageInvocation) = if(active.get()) {
|
||||
def dispatch(invocation: MessageInvocation) = if (active.get()) {
|
||||
mailbox(invocation.receiver).dispatch(invocation)
|
||||
} else {
|
||||
log.warning("%s is shut down,\n\tignoring the the messages sent to\n\t%s", toString, invocation.receiver)
|
||||
|
|
@ -191,11 +170,18 @@ class HawtDispatcher(val aggregate:Boolean=true, val parent:DispatchQueue=global
|
|||
else new HawtDispatcherMailbox(queue)
|
||||
}
|
||||
|
||||
override def toString = "HawtDispatchEventDrivenDispatcher"
|
||||
def createTransientMailbox(actorRef: ActorRef, mailboxType: TransientMailboxType): AnyRef = null.asInstanceOf[AnyRef]
|
||||
|
||||
/**
|
||||
* Creates and returns a durable mailbox for the given actor.
|
||||
*/
|
||||
protected def createDurableMailbox(actorRef: ActorRef, mailboxType: DurableMailboxType): AnyRef = null.asInstanceOf[AnyRef]
|
||||
|
||||
override def toString = "HawtDispatcher"
|
||||
}
|
||||
|
||||
class HawtDispatcherMailbox(val queue:DispatchQueue) {
|
||||
def dispatch(invocation: MessageInvocation):Unit = {
|
||||
class HawtDispatcherMailbox(val queue: DispatchQueue) {
|
||||
def dispatch(invocation: MessageInvocation) {
|
||||
queue {
|
||||
invocation.invoke
|
||||
}
|
||||
|
|
@ -207,14 +193,10 @@ class AggregatingHawtDispatcherMailbox(queue:DispatchQueue) extends HawtDispatch
|
|||
source.setEventHandler (^{drain_source} )
|
||||
source.resume
|
||||
|
||||
private def drain_source = {
|
||||
source.getData.foreach { invocation =>
|
||||
invocation.invoke
|
||||
}
|
||||
}
|
||||
private def drain_source = source.getData.foreach(_.invoke)
|
||||
|
||||
override def dispatch(invocation: MessageInvocation):Unit = {
|
||||
if ( getCurrentQueue == null ) {
|
||||
override def dispatch(invocation: MessageInvocation) {
|
||||
if (getCurrentQueue eq null) {
|
||||
// we are being call from a non hawtdispatch thread, can't aggregate
|
||||
// it's events
|
||||
super.dispatch(invocation)
|
||||
|
|
|
|||
107
akka-actor/src/main/scala/dispatch/MailboxHandling.scala
Normal file
107
akka-actor/src/main/scala/dispatch/MailboxHandling.scala
Normal file
|
|
@ -0,0 +1,107 @@
|
|||
/**
|
||||
* Copyright (C) 2009-2010 Scalable Solutions AB <http://scalablesolutions.se>
|
||||
*/
|
||||
|
||||
package se.scalablesolutions.akka.dispatch
|
||||
|
||||
import se.scalablesolutions.akka.actor.{Actor, ActorType, ActorRef, ActorInitializationException}
|
||||
import se.scalablesolutions.akka.util.{SimpleLock, Duration, HashCode, Logging}
|
||||
import se.scalablesolutions.akka.util.ReflectiveAccess.EnterpriseModule
|
||||
import se.scalablesolutions.akka.AkkaException
|
||||
|
||||
import java.util.{Queue, List}
|
||||
import java.util.concurrent._
|
||||
import concurrent.forkjoin.LinkedTransferQueue
|
||||
|
||||
class MessageQueueAppendFailedException(message: String) extends AkkaException(message)
|
||||
|
||||
/**
|
||||
* @author <a href="http://jonasboner.com">Jonas Bonér</a>
|
||||
*/
|
||||
trait MessageQueue {
|
||||
val dispatcherLock = new SimpleLock
|
||||
def enqueue(handle: MessageInvocation)
|
||||
def dequeue(): MessageInvocation
|
||||
def size: Int
|
||||
def isEmpty: Boolean
|
||||
}
|
||||
|
||||
/**
|
||||
* Mailbox configuration.
|
||||
*/
|
||||
sealed trait MailboxType
|
||||
|
||||
abstract class TransientMailboxType(val blocking: Boolean = false) extends MailboxType
|
||||
case class UnboundedMailbox(block: Boolean = false) extends TransientMailboxType(block)
|
||||
case class BoundedMailbox(
|
||||
block: Boolean = false,
|
||||
val capacity: Int = { if (Dispatchers.MAILBOX_CAPACITY < 0) Int.MaxValue else Dispatchers.MAILBOX_CAPACITY },
|
||||
val pushTimeOut: Duration = Dispatchers.MAILBOX_PUSH_TIME_OUT) extends TransientMailboxType(block) {
|
||||
if (capacity < 0) throw new IllegalArgumentException("The capacity for BoundedMailbox can not be negative")
|
||||
if (pushTimeOut eq null) throw new IllegalArgumentException("The push time-out for BoundedMailbox can not be null")
|
||||
}
|
||||
|
||||
abstract class DurableMailboxType(val serializer: EnterpriseModule.Serializer) extends MailboxType {
|
||||
if (serializer eq null) throw new IllegalArgumentException("The serializer for DurableMailboxType can not be null")
|
||||
}
|
||||
case class FileBasedDurableMailbox(ser: EnterpriseModule.Serializer) extends DurableMailboxType(ser)
|
||||
case class RedisBasedDurableMailbox(ser: EnterpriseModule.Serializer) extends DurableMailboxType(ser)
|
||||
case class BeanstalkBasedDurableMailbox(ser: EnterpriseModule.Serializer) extends DurableMailboxType(ser)
|
||||
case class ZooKeeperBasedDurableMailbox(ser: EnterpriseModule.Serializer) extends DurableMailboxType(ser)
|
||||
case class AMQPBasedDurableMailbox(ser: EnterpriseModule.Serializer) extends DurableMailboxType(ser)
|
||||
case class JMSBasedDurableMailbox(ser: EnterpriseModule.Serializer) extends DurableMailboxType(ser)
|
||||
|
||||
class DefaultUnboundedMessageQueue(blockDequeue: Boolean)
|
||||
extends LinkedBlockingQueue[MessageInvocation] with MessageQueue {
|
||||
|
||||
final def enqueue(handle: MessageInvocation) {
|
||||
this add handle
|
||||
}
|
||||
|
||||
final def dequeue(): MessageInvocation = {
|
||||
if (blockDequeue) this.take()
|
||||
else this.poll()
|
||||
}
|
||||
}
|
||||
|
||||
class DefaultBoundedMessageQueue(capacity: Int, pushTimeOut: Duration, blockDequeue: Boolean)
|
||||
extends LinkedBlockingQueue[MessageInvocation](capacity) with MessageQueue {
|
||||
|
||||
final def enqueue(handle: MessageInvocation) {
|
||||
if (pushTimeOut.toMillis > 0) {
|
||||
if (!this.offer(handle, pushTimeOut.length, pushTimeOut.unit))
|
||||
throw new MessageQueueAppendFailedException("Couldn't enqueue message " + handle + " to " + toString)
|
||||
} else this put handle
|
||||
}
|
||||
|
||||
final def dequeue(): MessageInvocation =
|
||||
if (blockDequeue) this.take()
|
||||
else this.poll()
|
||||
}
|
||||
|
||||
/**
|
||||
* @author <a href="http://jonasboner.com">Jonas Bonér</a>
|
||||
*/
|
||||
trait MailboxFactory {
|
||||
|
||||
val mailboxType: Option[MailboxType]
|
||||
|
||||
/**
|
||||
* Creates a MessageQueue (Mailbox) with the specified properties.
|
||||
*/
|
||||
protected def createMailbox(actorRef: ActorRef): AnyRef =
|
||||
mailboxType.getOrElse(throw new IllegalStateException("No mailbox type defined")) match {
|
||||
case mb: TransientMailboxType => createTransientMailbox(actorRef, mb)
|
||||
case mb: DurableMailboxType => createDurableMailbox(actorRef, mb)
|
||||
}
|
||||
|
||||
/**
|
||||
* Creates and returns a transient mailbox for the given actor.
|
||||
*/
|
||||
protected def createTransientMailbox(actorRef: ActorRef, mailboxType: TransientMailboxType): AnyRef
|
||||
|
||||
/**
|
||||
* Creates and returns a durable mailbox for the given actor.
|
||||
*/
|
||||
protected def createDurableMailbox(actorRef: ActorRef, mailboxType: DurableMailboxType): AnyRef
|
||||
}
|
||||
|
|
@ -4,14 +4,15 @@
|
|||
|
||||
package se.scalablesolutions.akka.dispatch
|
||||
|
||||
import se.scalablesolutions.akka.actor.{Actor, ActorRef, ActorInitializationException}
|
||||
import se.scalablesolutions.akka.actor.{Actor, ActorRef, Uuid, ActorInitializationException}
|
||||
import se.scalablesolutions.akka.util.{SimpleLock, Duration, HashCode, Logging}
|
||||
import se.scalablesolutions.akka.util.ReflectiveAccess.EnterpriseModule
|
||||
import se.scalablesolutions.akka.AkkaException
|
||||
|
||||
import org.multiverse.commitbarriers.CountDownCommitBarrier
|
||||
import se.scalablesolutions.akka.AkkaException
|
||||
|
||||
import java.util.{Queue, List}
|
||||
import java.util.concurrent._
|
||||
import se.scalablesolutions.akka.actor.Uuid
|
||||
import se.scalablesolutions.akka.util.{SimpleLock, Duration, HashCode, Logging}
|
||||
|
||||
/**
|
||||
* @author <a href="http://jonasboner.com">Jonas Bonér</a>
|
||||
|
|
@ -21,30 +22,29 @@ final class MessageInvocation(val receiver: ActorRef,
|
|||
val sender: Option[ActorRef],
|
||||
val senderFuture: Option[CompletableFuture[Any]],
|
||||
val transactionSet: Option[CountDownCommitBarrier]) {
|
||||
if (receiver eq null) throw new IllegalArgumentException("receiver is null")
|
||||
if (receiver eq null) throw new IllegalArgumentException("Receiver can't be null")
|
||||
|
||||
def invoke = try {
|
||||
receiver.invoke(this)
|
||||
} catch {
|
||||
case e: NullPointerException => throw new ActorInitializationException(
|
||||
"Don't call 'self ! message' in the Actor's constructor (e.g. body of the class).")
|
||||
"Don't call 'self ! message' in the Actor's constructor (in Scala this means in the body of the class).")
|
||||
}
|
||||
|
||||
override def hashCode(): Int = synchronized {
|
||||
override def hashCode(): Int = {
|
||||
var result = HashCode.SEED
|
||||
result = HashCode.hash(result, receiver.actor)
|
||||
result = HashCode.hash(result, message.asInstanceOf[AnyRef])
|
||||
result
|
||||
}
|
||||
|
||||
override def equals(that: Any): Boolean = synchronized {
|
||||
that != null &&
|
||||
override def equals(that: Any): Boolean = {
|
||||
that.isInstanceOf[MessageInvocation] &&
|
||||
that.asInstanceOf[MessageInvocation].receiver.actor == receiver.actor &&
|
||||
that.asInstanceOf[MessageInvocation].message == message
|
||||
}
|
||||
|
||||
override def toString = synchronized {
|
||||
override def toString = {
|
||||
"MessageInvocation[" +
|
||||
"\n\tmessage = " + message +
|
||||
"\n\treceiver = " + receiver +
|
||||
|
|
@ -55,83 +55,26 @@ final class MessageInvocation(val receiver: ActorRef,
|
|||
}
|
||||
}
|
||||
|
||||
class MessageQueueAppendFailedException(message: String) extends AkkaException(message)
|
||||
|
||||
/**
|
||||
* @author <a href="http://jonasboner.com">Jonas Bonér</a>
|
||||
*/
|
||||
trait MessageQueue {
|
||||
val dispatcherLock = new SimpleLock
|
||||
def enqueue(handle: MessageInvocation)
|
||||
def dequeue(): MessageInvocation
|
||||
def size: Int
|
||||
def isEmpty: Boolean
|
||||
}
|
||||
|
||||
/* Tells the dispatcher that it should create a bounded mailbox with the specified push timeout
|
||||
* (If capacity > 0)
|
||||
*/
|
||||
case class MailboxConfig(capacity: Int, pushTimeOut: Option[Duration], blockingDequeue: Boolean) {
|
||||
|
||||
/**
|
||||
* Creates a MessageQueue (Mailbox) with the specified properties
|
||||
* bounds = whether the mailbox should be bounded (< 0 means unbounded)
|
||||
* pushTime = only used if bounded, indicates if and how long an enqueue should block
|
||||
* blockDequeue = whether dequeues should block or not
|
||||
*
|
||||
* The bounds + pushTime generates a MessageQueueAppendFailedException if enqueue times out
|
||||
*/
|
||||
def newMailbox(bounds: Int = capacity,
|
||||
pushTime: Option[Duration] = pushTimeOut,
|
||||
blockDequeue: Boolean = blockingDequeue) : MessageQueue =
|
||||
if (capacity > 0) new DefaultBoundedMessageQueue(bounds,pushTime,blockDequeue)
|
||||
else new DefaultUnboundedMessageQueue(blockDequeue)
|
||||
}
|
||||
|
||||
class DefaultUnboundedMessageQueue(blockDequeue: Boolean) extends LinkedBlockingQueue[MessageInvocation] with MessageQueue {
|
||||
final def enqueue(handle: MessageInvocation) {
|
||||
this add handle
|
||||
}
|
||||
|
||||
final def dequeue(): MessageInvocation =
|
||||
if (blockDequeue) this.take()
|
||||
else this.poll()
|
||||
}
|
||||
|
||||
class DefaultBoundedMessageQueue(capacity: Int, pushTimeOut: Option[Duration], blockDequeue: Boolean) extends LinkedBlockingQueue[MessageInvocation](capacity) with MessageQueue {
|
||||
final def enqueue(handle: MessageInvocation) {
|
||||
if (pushTimeOut.isDefined) {
|
||||
if(!this.offer(handle,pushTimeOut.get.length,pushTimeOut.get.unit))
|
||||
throw new MessageQueueAppendFailedException("Couldn't enqueue message " + handle + " to " + toString)
|
||||
}
|
||||
else {
|
||||
this put handle
|
||||
}
|
||||
}
|
||||
|
||||
final def dequeue(): MessageInvocation =
|
||||
if (blockDequeue) this.take()
|
||||
else this.poll()
|
||||
|
||||
}
|
||||
|
||||
/**
|
||||
* @author <a href="http://jonasboner.com">Jonas Bonér</a>
|
||||
*/
|
||||
trait MessageDispatcher extends Logging {
|
||||
trait MessageDispatcher extends MailboxFactory with Logging {
|
||||
|
||||
protected val uuids = new ConcurrentSkipListSet[Uuid]
|
||||
|
||||
def dispatch(invocation: MessageInvocation): Unit
|
||||
|
||||
def dispatch(invocation: MessageInvocation)
|
||||
def execute(task: Runnable): Unit
|
||||
|
||||
def start
|
||||
def start: Unit
|
||||
|
||||
def shutdown
|
||||
def shutdown: Unit
|
||||
|
||||
def register(actorRef: ActorRef) {
|
||||
if(actorRef.mailbox eq null)
|
||||
actorRef.mailbox = createMailbox(actorRef)
|
||||
if (actorRef.mailbox eq null) actorRef.mailbox = createMailbox(actorRef)
|
||||
uuids add actorRef.uuid
|
||||
}
|
||||
|
||||
def unregister(actorRef: ActorRef) = {
|
||||
uuids remove actorRef.uuid
|
||||
actorRef.mailbox = null
|
||||
|
|
@ -145,10 +88,5 @@ trait MessageDispatcher extends Logging {
|
|||
/**
|
||||
* Returns the size of the mailbox for the specified actor
|
||||
*/
|
||||
def mailboxSize(actorRef: ActorRef):Int
|
||||
|
||||
/**
|
||||
* Creates and returns a mailbox for the given actor
|
||||
*/
|
||||
protected def createMailbox(actorRef: ActorRef): AnyRef = null
|
||||
def mailboxSize(actorRef: ActorRef): Int
|
||||
}
|
||||
|
|
@ -4,13 +4,40 @@
|
|||
|
||||
package se.scalablesolutions.akka.dispatch
|
||||
|
||||
import java.util.Queue
|
||||
|
||||
import se.scalablesolutions.akka.actor.{Actor, ActorRef}
|
||||
import se.scalablesolutions.akka.config.Config.config
|
||||
import concurrent.forkjoin.{TransferQueue, LinkedTransferQueue}
|
||||
import se.scalablesolutions.akka.util.Duration
|
||||
|
||||
import java.util.Queue
|
||||
import java.util.concurrent.{ConcurrentLinkedQueue, BlockingQueue, TimeUnit, LinkedBlockingQueue}
|
||||
|
||||
/**
|
||||
* Dedicates a unique thread for each actor passed in as reference. Served through its messageQueue.
|
||||
*
|
||||
* @author <a href="http://jonasboner.com">Jonas Bonér</a>
|
||||
*/
|
||||
class ThreadBasedDispatcher(private val actor: ActorRef, _mailboxType: MailboxType)
|
||||
extends ExecutorBasedEventDrivenDispatcher(
|
||||
actor.getClass.getName + ":" + actor.uuid,
|
||||
Dispatchers.THROUGHPUT,
|
||||
-1,
|
||||
_mailboxType,
|
||||
ThreadBasedDispatcher.oneThread) {
|
||||
|
||||
def this(actor: ActorRef) = this(actor, BoundedMailbox(true)) // For Java API
|
||||
|
||||
def this(actor: ActorRef, capacity: Int) = this(actor, BoundedMailbox(true, capacity))
|
||||
|
||||
def this(actor: ActorRef, capacity: Int, pushTimeOut: Duration) = this(actor, BoundedMailbox(true, capacity, pushTimeOut))
|
||||
|
||||
override def register(actorRef: ActorRef) = {
|
||||
if (actorRef != actor) throw new IllegalArgumentException("Cannot register to anyone but " + actor)
|
||||
super.register(actorRef)
|
||||
}
|
||||
|
||||
override def toString = "ThreadBasedDispatcher[" + name + "]"
|
||||
}
|
||||
|
||||
object ThreadBasedDispatcher {
|
||||
def oneThread(b: ThreadPoolBuilder) {
|
||||
b setCorePoolSize 1
|
||||
|
|
@ -19,28 +46,3 @@ object ThreadBasedDispatcher {
|
|||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Dedicates a unique thread for each actor passed in as reference. Served through its messageQueue.
|
||||
*
|
||||
* @author <a href="http://jonasboner.com">Jonas Bonér</a>
|
||||
*/
|
||||
class ThreadBasedDispatcher(private val actor: ActorRef,
|
||||
val mailboxConfig: MailboxConfig
|
||||
) extends ExecutorBasedEventDrivenDispatcher(
|
||||
actor.getClass.getName + ":" + actor.uuid,
|
||||
Dispatchers.THROUGHPUT,
|
||||
-1,
|
||||
mailboxConfig,
|
||||
ThreadBasedDispatcher.oneThread) {
|
||||
def this(actor: ActorRef, capacity: Int) = this(actor,MailboxConfig(capacity,None,true))
|
||||
def this(actor: ActorRef) = this(actor, Dispatchers.MAILBOX_CAPACITY)// For Java
|
||||
|
||||
override def register(actorRef: ActorRef) = {
|
||||
if(actorRef != actor)
|
||||
throw new IllegalArgumentException("Cannot register to anyone but " + actor)
|
||||
|
||||
super.register(actorRef)
|
||||
}
|
||||
|
||||
override def toString = "ThreadBasedDispatcher[" + name + "]"
|
||||
}
|
||||
|
|
@ -30,6 +30,8 @@ trait ThreadPoolBuilder extends Logging {
|
|||
|
||||
protected var executor: ExecutorService = _
|
||||
|
||||
def execute(task: Runnable) = executor execute task
|
||||
|
||||
def isShutdown = executor.isShutdown
|
||||
|
||||
def buildThreadPool(): Unit = synchronized {
|
||||
|
|
|
|||
78
akka-actor/src/main/scala/japi/JavaAPI.scala
Normal file
78
akka-actor/src/main/scala/japi/JavaAPI.scala
Normal file
|
|
@ -0,0 +1,78 @@
|
|||
package se.scalablesolutions.akka.japi
|
||||
|
||||
/**
|
||||
* A Function interface. Used to create first-class-functions is Java (sort of).
|
||||
*/
|
||||
trait Function[T,R] {
|
||||
def apply(param: T): R
|
||||
}
|
||||
|
||||
/** A Procedure is like a Function, but it doesn't produce a return value
|
||||
*/
|
||||
trait Procedure[T] {
|
||||
def apply(param: T): Unit
|
||||
}
|
||||
|
||||
/**
|
||||
* An executable piece of code that takes no parameters and doesn't return any value.
|
||||
*/
|
||||
trait SideEffect {
|
||||
def apply: Unit
|
||||
}
|
||||
|
||||
/**
|
||||
* This class represents optional values. Instances of <code>Option</code>
|
||||
* are either instances of case class <code>Some</code> or it is case
|
||||
* object <code>None</code>.
|
||||
* <p>
|
||||
* Java API
|
||||
*/
|
||||
sealed abstract class Option[A] extends java.lang.Iterable[A] {
|
||||
import scala.collection.JavaConversions._
|
||||
|
||||
def get: A
|
||||
def isEmpty: Boolean
|
||||
def isDefined = !isEmpty
|
||||
def asScala: scala.Option[A]
|
||||
def iterator = if (isEmpty) Iterator.empty else Iterator.single(get)
|
||||
}
|
||||
|
||||
object Option {
|
||||
/**
|
||||
* <code>Option</code> factory that creates <code>Some</code>
|
||||
*/
|
||||
def some[A](v: A): Option[A] = Some(v)
|
||||
|
||||
/**
|
||||
* <code>Option</code> factory that creates <code>None</code>
|
||||
*/
|
||||
def none[A] = None.asInstanceOf[Option[A]]
|
||||
|
||||
/**
|
||||
* <code>Option</code> factory that creates <code>None</code> if
|
||||
* <code>v</code> is <code>null</code>, <code>Some(v)</code> otherwise.
|
||||
*/
|
||||
def option[A](v: A): Option[A] = if (v == null) none else some(v)
|
||||
|
||||
/**
|
||||
* Class <code>Some[A]</code> represents existing values of type
|
||||
* <code>A</code>.
|
||||
*/
|
||||
final case class Some[A](v: A) extends Option[A] {
|
||||
def get = v
|
||||
def isEmpty = false
|
||||
def asScala = scala.Some(v)
|
||||
}
|
||||
|
||||
/**
|
||||
* This case object represents non-existent values.
|
||||
*/
|
||||
private case object None extends Option[Nothing] {
|
||||
def get = throw new NoSuchElementException("None.get")
|
||||
def isEmpty = true
|
||||
def asScala = scala.None
|
||||
}
|
||||
|
||||
implicit def java2ScalaOption[A](o: Option[A]): scala.Option[A] = o.asScala
|
||||
implicit def scala2JavaOption[A](o: scala.Option[A]): Option[A] = option(o.get)
|
||||
}
|
||||
|
|
@ -165,7 +165,6 @@ object Transaction {
|
|||
}
|
||||
*/
|
||||
override def equals(that: Any): Boolean = synchronized {
|
||||
that != null &&
|
||||
that.isInstanceOf[Transaction] &&
|
||||
that.asInstanceOf[Transaction].id == this.id
|
||||
}
|
||||
|
|
|
|||
23
akka-actor/src/main/scala/util/Address.scala
Normal file
23
akka-actor/src/main/scala/util/Address.scala
Normal file
|
|
@ -0,0 +1,23 @@
|
|||
/**
|
||||
* Copyright (C) 2009-2010 Scalable Solutions AB <http://scalablesolutions.se>
|
||||
*/
|
||||
package se.scalablesolutions.akka.util
|
||||
|
||||
object Address {
|
||||
def apply(hostname: String, port: Int) = new Address(hostname, port)
|
||||
}
|
||||
|
||||
class Address(val hostname: String, val port: Int) {
|
||||
override def hashCode: Int = {
|
||||
var result = HashCode.SEED
|
||||
result = HashCode.hash(result, hostname)
|
||||
result = HashCode.hash(result, port)
|
||||
result
|
||||
}
|
||||
|
||||
override def equals(that: Any): Boolean = {
|
||||
that.isInstanceOf[Address] &&
|
||||
that.asInstanceOf[Address].hostname == hostname &&
|
||||
that.asInstanceOf[Address].port == port
|
||||
}
|
||||
}
|
||||
|
|
@ -11,7 +11,7 @@ import java.security.MessageDigest
|
|||
*/
|
||||
object Helpers extends Logging {
|
||||
|
||||
implicit def null2Option[T](t: T): Option[T] = if (t != null) Some(t) else None
|
||||
implicit def null2Option[T](t: T): Option[T] = Option(t)
|
||||
|
||||
def intToBytes(value: Int): Array[Byte] = {
|
||||
val bytes = new Array[Byte](4)
|
||||
|
|
@ -41,7 +41,7 @@ object Helpers extends Logging {
|
|||
* if the actual type is not assignable from the given one.
|
||||
*/
|
||||
def narrow[T](o: Option[Any]): Option[T] = {
|
||||
require(o != null, "Option to be narrowed must not be null!")
|
||||
require((o ne null), "Option to be narrowed must not be null!")
|
||||
o.asInstanceOf[Option[T]]
|
||||
}
|
||||
|
||||
|
|
|
|||
|
|
@ -1,23 +0,0 @@
|
|||
package se.scalablesolutions.akka.util
|
||||
|
||||
/** A Function interface
|
||||
* Used to create first-class-functions is Java (sort of)
|
||||
* Java API
|
||||
*/
|
||||
trait Function[T,R] {
|
||||
def apply(param: T): R
|
||||
}
|
||||
|
||||
/** A Procedure is like a Function, but it doesn't produce a return value
|
||||
* Java API
|
||||
*/
|
||||
trait Procedure[T] {
|
||||
def apply(param: T): Unit
|
||||
}
|
||||
|
||||
/**
|
||||
* An executable piece of code that takes no parameters and doesn't return any value
|
||||
*/
|
||||
trait SideEffect {
|
||||
def apply: Unit
|
||||
}
|
||||
|
|
@ -45,6 +45,11 @@ trait ListenerManagement extends Logging {
|
|||
*/
|
||||
def hasListeners: Boolean = !listeners.isEmpty
|
||||
|
||||
/**
|
||||
* Checks if a specfic listener is registered.
|
||||
*/
|
||||
def hasListener(listener: ActorRef): Boolean = listeners.contains(listener)
|
||||
|
||||
protected def notifyListeners(message: => Any) {
|
||||
if (hasListeners) {
|
||||
val msg = message
|
||||
|
|
|
|||
|
|
@ -111,4 +111,62 @@ class SimpleLock {
|
|||
def unlock() {
|
||||
acquired.set(false)
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* An atomic switch that can be either on or off
|
||||
*/
|
||||
class Switch(startAsOn: Boolean = false) {
|
||||
private val switch = new AtomicBoolean(startAsOn)
|
||||
|
||||
protected def transcend(from: Boolean,action: => Unit): Boolean = {
|
||||
if (switch.compareAndSet(from,!from)) {
|
||||
try {
|
||||
action
|
||||
} catch {
|
||||
case t =>
|
||||
switch.compareAndSet(!from,from) //Revert status
|
||||
throw t
|
||||
}
|
||||
true
|
||||
} else false
|
||||
}
|
||||
|
||||
def switchOff(action: => Unit): Boolean = transcend(from = true, action)
|
||||
def switchOn(action: => Unit): Boolean = transcend(from = false,action)
|
||||
|
||||
def ifOnYield[T](action: => T): Option[T] = {
|
||||
if (switch.get)
|
||||
Some(action)
|
||||
else
|
||||
None
|
||||
}
|
||||
|
||||
def ifOffYield[T](action: => T): Option[T] = {
|
||||
if (switch.get)
|
||||
Some(action)
|
||||
else
|
||||
None
|
||||
}
|
||||
|
||||
def ifOn(action: => Unit): Boolean = {
|
||||
if (switch.get) {
|
||||
action
|
||||
true
|
||||
}
|
||||
else
|
||||
false
|
||||
}
|
||||
|
||||
def ifOff(action: => Unit): Boolean = {
|
||||
if (!switch.get) {
|
||||
action
|
||||
true
|
||||
}
|
||||
else
|
||||
false
|
||||
}
|
||||
|
||||
def isOn = switch.get
|
||||
def isOff = !isOn
|
||||
}
|
||||
|
|
@ -4,30 +4,32 @@
|
|||
|
||||
package se.scalablesolutions.akka.util
|
||||
|
||||
import se.scalablesolutions.akka.actor.{ActorRef, IllegalActorStateException, ActorType}
|
||||
import se.scalablesolutions.akka.dispatch.{Future, CompletableFuture}
|
||||
import se.scalablesolutions.akka.actor.{ActorRef, IllegalActorStateException, ActorType, Uuid}
|
||||
import se.scalablesolutions.akka.dispatch.{Future, CompletableFuture, MessageInvocation}
|
||||
import se.scalablesolutions.akka.config.{Config, ModuleNotAvailableException}
|
||||
import se.scalablesolutions.akka.actor.Uuid
|
||||
import java.net.InetSocketAddress
|
||||
import se.scalablesolutions.akka.stm.Transaction
|
||||
import se.scalablesolutions.akka.AkkaException
|
||||
|
||||
import java.net.InetSocketAddress
|
||||
|
||||
/**
|
||||
* Helper class for reflective access to different modules in order to allow optional loading of modules.
|
||||
*
|
||||
* @author <a href="http://jonasboner.com">Jonas Bonér</a>
|
||||
*/
|
||||
object ReflectiveAccess {
|
||||
object ReflectiveAccess extends Logging {
|
||||
|
||||
val loader = getClass.getClassLoader
|
||||
|
||||
lazy val isRemotingEnabled = RemoteClientModule.isRemotingEnabled
|
||||
lazy val isTypedActorEnabled = TypedActorModule.isTypedActorEnabled
|
||||
lazy val isJtaEnabled = JtaModule.isJtaEnabled
|
||||
lazy val isEnterpriseEnabled = EnterpriseModule.isEnterpriseEnabled
|
||||
|
||||
def ensureRemotingEnabled = RemoteClientModule.ensureRemotingEnabled
|
||||
def ensureTypedActorEnabled = TypedActorModule.ensureTypedActorEnabled
|
||||
def ensureJtaEnabled = JtaModule.ensureJtaEnabled
|
||||
def ensureEnterpriseEnabled = EnterpriseModule.ensureEnterpriseEnabled
|
||||
|
||||
/**
|
||||
* Reflective access to the RemoteClient module.
|
||||
|
|
@ -63,7 +65,7 @@ object ReflectiveAccess {
|
|||
"Can't load the remoting module, make sure that akka-remote.jar is on the classpath")
|
||||
|
||||
val remoteClientObjectInstance: Option[RemoteClientObject] =
|
||||
getObject("se.scalablesolutions.akka.remote.RemoteClient$")
|
||||
getObjectFor("se.scalablesolutions.akka.remote.RemoteClient$")
|
||||
|
||||
def register(address: InetSocketAddress, uuid: Uuid) = {
|
||||
ensureRemotingEnabled
|
||||
|
|
@ -121,10 +123,10 @@ object ReflectiveAccess {
|
|||
}
|
||||
|
||||
val remoteServerObjectInstance: Option[RemoteServerObject] =
|
||||
getObject("se.scalablesolutions.akka.remote.RemoteServer$")
|
||||
getObjectFor("se.scalablesolutions.akka.remote.RemoteServer$")
|
||||
|
||||
val remoteNodeObjectInstance: Option[RemoteNodeObject] =
|
||||
getObject("se.scalablesolutions.akka.remote.RemoteNode$")
|
||||
getObjectFor("se.scalablesolutions.akka.remote.RemoteNode$")
|
||||
|
||||
def registerActor(address: InetSocketAddress, uuid: Uuid, actorRef: ActorRef) = {
|
||||
ensureRemotingEnabled
|
||||
|
|
@ -152,6 +154,9 @@ object ReflectiveAccess {
|
|||
type TypedActorObject = {
|
||||
def isJoinPoint(message: Any): Boolean
|
||||
def isJoinPointAndOneWay(message: Any): Boolean
|
||||
def actorFor(proxy: AnyRef): Option[ActorRef]
|
||||
def proxyFor(actorRef: ActorRef): Option[AnyRef]
|
||||
def stop(anyRef: AnyRef) : Unit
|
||||
}
|
||||
|
||||
lazy val isTypedActorEnabled = typedActorObjectInstance.isDefined
|
||||
|
|
@ -160,7 +165,7 @@ object ReflectiveAccess {
|
|||
"Can't load the typed actor module, make sure that akka-typed-actor.jar is on the classpath")
|
||||
|
||||
val typedActorObjectInstance: Option[TypedActorObject] =
|
||||
getObject("se.scalablesolutions.akka.actor.TypedActor$")
|
||||
getObjectFor("se.scalablesolutions.akka.actor.TypedActor$")
|
||||
|
||||
def resolveFutureIfMessageIsJoinPoint(message: Any, future: Future[_]): Boolean = {
|
||||
ensureTypedActorEnabled
|
||||
|
|
@ -189,7 +194,7 @@ object ReflectiveAccess {
|
|||
"Can't load the typed actor module, make sure that akka-jta.jar is on the classpath")
|
||||
|
||||
val transactionContainerObjectInstance: Option[TransactionContainerObject] =
|
||||
getObject("se.scalablesolutions.akka.actor.TransactionContainer$")
|
||||
getObjectFor("se.scalablesolutions.akka.actor.TransactionContainer$")
|
||||
|
||||
def createTransactionContainer: TransactionContainer = {
|
||||
ensureJtaEnabled
|
||||
|
|
@ -197,36 +202,99 @@ object ReflectiveAccess {
|
|||
}
|
||||
}
|
||||
|
||||
object EnterpriseModule {
|
||||
|
||||
type Mailbox = {
|
||||
def enqueue(message: MessageInvocation)
|
||||
def dequeue: MessageInvocation
|
||||
}
|
||||
|
||||
type Serializer = {
|
||||
def toBinary(obj: AnyRef): Array[Byte]
|
||||
def fromBinary(bytes: Array[Byte], clazz: Option[Class[_]]): AnyRef
|
||||
}
|
||||
|
||||
lazy val isEnterpriseEnabled = clusterObjectInstance.isDefined
|
||||
|
||||
val clusterObjectInstance: Option[AnyRef] =
|
||||
getObjectFor("se.scalablesolutions.akka.cluster.Cluster$")
|
||||
|
||||
val serializerClass: Option[Class[_]] =
|
||||
getClassFor("se.scalablesolutions.akka.serialization.Serializer")
|
||||
|
||||
def ensureEnterpriseEnabled = if (!isEnterpriseEnabled) throw new ModuleNotAvailableException(
|
||||
"Feature is only available in Akka Enterprise edition")
|
||||
|
||||
def createFileBasedMailbox(actorRef: ActorRef): Mailbox = createMailbox("se.scalablesolutions.akka.actor.mailbox.FileBasedMailbox", actorRef)
|
||||
|
||||
def createZooKeeperBasedMailbox(actorRef: ActorRef): Mailbox = createMailbox("se.scalablesolutions.akka.actor.mailbox.ZooKeeperBasedMailbox", actorRef)
|
||||
|
||||
def createBeanstalkBasedMailbox(actorRef: ActorRef): Mailbox = createMailbox("se.scalablesolutions.akka.actor.mailbox.BeanstalkBasedMailbox", actorRef)
|
||||
|
||||
def createRedisBasedMailbox(actorRef: ActorRef): Mailbox = createMailbox("se.scalablesolutions.akka.actor.mailbox.RedisBasedMailbox", actorRef)
|
||||
|
||||
private def createMailbox(mailboxClassname: String, actorRef: ActorRef): Mailbox = {
|
||||
ensureEnterpriseEnabled
|
||||
createInstance(
|
||||
mailboxClassname,
|
||||
Array(classOf[ActorRef]),
|
||||
Array(actorRef).asInstanceOf[Array[AnyRef]],
|
||||
loader)
|
||||
.getOrElse(throw new IllegalActorStateException("Could not create durable mailbox [" + mailboxClassname + "] for actor [" + actorRef + "]"))
|
||||
.asInstanceOf[Mailbox]
|
||||
}
|
||||
}
|
||||
|
||||
val noParams = Array[Class[_]]()
|
||||
val noArgs = Array[AnyRef]()
|
||||
|
||||
def createInstance[T](clazz: Class[_],
|
||||
params: Array[Class[_]],
|
||||
args: Array[AnyRef]): Option[T] = try {
|
||||
assert(clazz ne null)
|
||||
assert(params ne null)
|
||||
assert(args ne null)
|
||||
val ctor = clazz.getDeclaredConstructor(params: _*)
|
||||
ctor.setAccessible(true)
|
||||
Some(ctor.newInstance(args: _*).asInstanceOf[T])
|
||||
} catch {
|
||||
case e: Exception => None
|
||||
case e: Exception =>
|
||||
log.debug(e, "Could not instantiate class [%s] due to [%s]", clazz.getName, e.getMessage)
|
||||
None
|
||||
}
|
||||
|
||||
def createInstance[T](fqn: String,
|
||||
params: Array[Class[_]],
|
||||
args: Array[AnyRef],
|
||||
classloader: ClassLoader = loader): Option[T] = try {
|
||||
assert(fqn ne null)
|
||||
assert(params ne null)
|
||||
assert(args ne null)
|
||||
val clazz = classloader.loadClass(fqn)
|
||||
val ctor = clazz.getDeclaredConstructor(params: _*)
|
||||
ctor.setAccessible(true)
|
||||
Some(ctor.newInstance(args: _*).asInstanceOf[T])
|
||||
} catch {
|
||||
case e: Exception => None
|
||||
case e: Exception =>
|
||||
log.debug(e, "Could not instantiate class [%s] due to [%s]", fqn, e.getMessage)
|
||||
None
|
||||
}
|
||||
|
||||
def getObject[T](fqn: String, classloader: ClassLoader = loader): Option[T] = try {//Obtains a reference to $MODULE$
|
||||
def getObjectFor[T](fqn: String, classloader: ClassLoader = loader): Option[T] = try {//Obtains a reference to $MODULE$
|
||||
assert(fqn ne null)
|
||||
val clazz = classloader.loadClass(fqn)
|
||||
val instance = clazz.getDeclaredField("MODULE$")
|
||||
instance.setAccessible(true)
|
||||
Option(instance.get(null).asInstanceOf[T])
|
||||
} catch {
|
||||
case e: Exception =>
|
||||
log.debug(e, "Could not get object [%s] due to [%s]", fqn, e.getMessage)
|
||||
None
|
||||
}
|
||||
|
||||
def getClassFor[T](fqn: String, classloader: ClassLoader = loader): Option[Class[T]] = try {
|
||||
assert(fqn ne null)
|
||||
Some(classloader.loadClass(fqn).asInstanceOf[Class[T]])
|
||||
} catch {
|
||||
case e: Exception => None
|
||||
}
|
||||
|
|
|
|||
|
|
@ -0,0 +1,42 @@
|
|||
package se.scalablesolutions.akka.japi;
|
||||
|
||||
import org.junit.Test;
|
||||
|
||||
import static org.junit.Assert.*;
|
||||
|
||||
public class JavaAPITestBase {
|
||||
|
||||
@Test public void shouldCreateSomeString() {
|
||||
Option<String> o = Option.some("abc");
|
||||
assertFalse(o.isEmpty());
|
||||
assertTrue(o.isDefined());
|
||||
assertEquals("abc", o.get());
|
||||
}
|
||||
|
||||
@Test public void shouldCreateNone() {
|
||||
Option<String> o1 = Option.none();
|
||||
assertTrue(o1.isEmpty());
|
||||
assertFalse(o1.isDefined());
|
||||
|
||||
Option<Float> o2 = Option.none();
|
||||
assertTrue(o2.isEmpty());
|
||||
assertFalse(o2.isDefined());
|
||||
}
|
||||
|
||||
@Test public void shouldEnterForLoop() {
|
||||
for(String s : Option.some("abc")) {
|
||||
return;
|
||||
}
|
||||
fail("for-loop not entered");
|
||||
}
|
||||
|
||||
@Test public void shouldNotEnterForLoop() {
|
||||
for(Object o : Option.none()) {
|
||||
fail("for-loop entered");
|
||||
}
|
||||
}
|
||||
|
||||
@Test public void shouldBeSingleton() {
|
||||
assertSame(Option.none(), Option.none());
|
||||
}
|
||||
}
|
||||
|
|
@ -10,7 +10,6 @@ import Actor._
|
|||
|
||||
object ActorFireForgetRequestReplySpec {
|
||||
class ReplyActor extends Actor {
|
||||
self.dispatcher = Dispatchers.newThreadBasedDispatcher(self)
|
||||
|
||||
def receive = {
|
||||
case "Send" =>
|
||||
|
|
@ -21,7 +20,7 @@ object ActorFireForgetRequestReplySpec {
|
|||
}
|
||||
|
||||
class CrashingTemporaryActor extends Actor {
|
||||
self.lifeCycle = Some(LifeCycle(Temporary))
|
||||
self.lifeCycle = Temporary
|
||||
|
||||
def receive = {
|
||||
case "Die" =>
|
||||
|
|
@ -31,10 +30,10 @@ object ActorFireForgetRequestReplySpec {
|
|||
}
|
||||
|
||||
class SenderActor(replyActor: ActorRef) extends Actor {
|
||||
self.dispatcher = Dispatchers.newThreadBasedDispatcher(self)
|
||||
|
||||
def receive = {
|
||||
case "Init" => replyActor ! "Send"
|
||||
case "Init" =>
|
||||
replyActor ! "Send"
|
||||
case "Reply" => {
|
||||
state.s = "Reply"
|
||||
state.finished.await
|
||||
|
|
@ -84,7 +83,7 @@ class ActorFireForgetRequestReplySpec extends JUnitSuite {
|
|||
val actor = actorOf[CrashingTemporaryActor].start
|
||||
assert(actor.isRunning)
|
||||
actor ! "Die"
|
||||
try { state.finished.await(1L, TimeUnit.SECONDS) }
|
||||
try { state.finished.await(10L, TimeUnit.SECONDS) }
|
||||
catch { case e: TimeoutException => fail("Never got the message") }
|
||||
Thread.sleep(100)
|
||||
assert(actor.isShutdown)
|
||||
|
|
|
|||
101
akka-actor/src/test/scala/actor/actor/ActorRefSpec.scala
Normal file
101
akka-actor/src/test/scala/actor/actor/ActorRefSpec.scala
Normal file
|
|
@ -0,0 +1,101 @@
|
|||
/**
|
||||
* Copyright (C) 2009-2010 Scalable Solutions AB <http://scalablesolutions.se>
|
||||
*/
|
||||
|
||||
package se.scalablesolutions.akka.actor
|
||||
|
||||
import org.scalatest.Spec
|
||||
import org.scalatest.matchers.ShouldMatchers
|
||||
import org.scalatest.BeforeAndAfterAll
|
||||
import org.scalatest.junit.JUnitRunner
|
||||
import org.junit.runner.RunWith
|
||||
|
||||
import se.scalablesolutions.akka.actor._
|
||||
import java.util.concurrent.{CountDownLatch, TimeUnit}
|
||||
|
||||
object ActorRefSpec {
|
||||
|
||||
var latch = new CountDownLatch(4)
|
||||
|
||||
class ReplyActor extends Actor {
|
||||
var replyTo: Channel[Any] = null
|
||||
|
||||
def receive = {
|
||||
case "complexRequest" => {
|
||||
replyTo = self.channel
|
||||
val worker = Actor.actorOf[WorkerActor].start
|
||||
worker ! "work"
|
||||
}
|
||||
case "complexRequest2" =>
|
||||
val worker = Actor.actorOf[WorkerActor].start
|
||||
worker ! self.channel
|
||||
case "workDone" => replyTo ! "complexReply"
|
||||
case "simpleRequest" => self.reply("simpleReply")
|
||||
}
|
||||
}
|
||||
|
||||
class WorkerActor() extends Actor {
|
||||
def receive = {
|
||||
case "work" => {
|
||||
work
|
||||
self.reply("workDone")
|
||||
self.stop
|
||||
}
|
||||
case replyTo: Channel[Any] => {
|
||||
work
|
||||
replyTo ! "complexReply"
|
||||
}
|
||||
}
|
||||
|
||||
private def work {
|
||||
Thread.sleep(1000)
|
||||
}
|
||||
}
|
||||
|
||||
class SenderActor(replyActor: ActorRef) extends Actor {
|
||||
|
||||
def receive = {
|
||||
case "complex" => replyActor ! "complexRequest"
|
||||
case "complex2" => replyActor ! "complexRequest2"
|
||||
case "simple" => replyActor ! "simpleRequest"
|
||||
case "complexReply" => {
|
||||
println("got complex reply")
|
||||
latch.countDown
|
||||
}
|
||||
case "simpleReply" => {
|
||||
println("got simple reply")
|
||||
latch.countDown
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@RunWith(classOf[JUnitRunner])
|
||||
class ActorRefSpec extends
|
||||
Spec with
|
||||
ShouldMatchers with
|
||||
BeforeAndAfterAll {
|
||||
|
||||
import ActorRefSpec._
|
||||
|
||||
describe("ActorRef") {
|
||||
it("should support to reply via channel") {
|
||||
val serverRef = Actor.actorOf[ReplyActor].start
|
||||
val clientRef = Actor.actorOf(new SenderActor(serverRef)).start
|
||||
|
||||
clientRef ! "complex"
|
||||
clientRef ! "simple"
|
||||
clientRef ! "simple"
|
||||
clientRef ! "simple"
|
||||
assert(latch.await(4L, TimeUnit.SECONDS))
|
||||
latch = new CountDownLatch(4)
|
||||
clientRef ! "complex2"
|
||||
clientRef ! "simple"
|
||||
clientRef ! "simple"
|
||||
clientRef ! "simple"
|
||||
assert(latch.await(4L, TimeUnit.SECONDS))
|
||||
clientRef.stop
|
||||
serverRef.stop
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
@ -6,6 +6,7 @@ import org.junit.Test
|
|||
import java.util.concurrent.TimeUnit
|
||||
import org.multiverse.api.latches.StandardLatch
|
||||
import Actor._
|
||||
import java.util.concurrent.atomic.AtomicInteger
|
||||
|
||||
class ReceiveTimeoutSpec extends JUnitSuite {
|
||||
|
||||
|
|
@ -22,6 +23,7 @@ class ReceiveTimeoutSpec extends JUnitSuite {
|
|||
}).start
|
||||
|
||||
assert(timeoutLatch.tryAwait(3, TimeUnit.SECONDS))
|
||||
timeoutActor.stop
|
||||
}
|
||||
|
||||
@Test def swappedReceiveShouldAlsoGetTimout = {
|
||||
|
|
@ -44,9 +46,10 @@ class ReceiveTimeoutSpec extends JUnitSuite {
|
|||
})
|
||||
|
||||
assert(swappedLatch.tryAwait(3, TimeUnit.SECONDS))
|
||||
timeoutActor.stop
|
||||
}
|
||||
|
||||
@Test def timeoutShouldBeCancelledAfterRegularReceive = {
|
||||
@Test def timeoutShouldBeRescheduledAfterRegularReceive = {
|
||||
|
||||
val timeoutLatch = new StandardLatch
|
||||
case object Tick
|
||||
|
|
@ -60,7 +63,30 @@ class ReceiveTimeoutSpec extends JUnitSuite {
|
|||
}).start
|
||||
timeoutActor ! Tick
|
||||
|
||||
assert(timeoutLatch.tryAwait(2, TimeUnit.SECONDS) == false)
|
||||
assert(timeoutLatch.tryAwait(2, TimeUnit.SECONDS) == true)
|
||||
timeoutActor.stop
|
||||
}
|
||||
|
||||
@Test def timeoutShouldBeTurnedOffIfDesired = {
|
||||
val count = new AtomicInteger(0)
|
||||
val timeoutLatch = new StandardLatch
|
||||
case object Tick
|
||||
val timeoutActor = actorOf(new Actor {
|
||||
self.receiveTimeout = Some(500L)
|
||||
|
||||
protected def receive = {
|
||||
case Tick => ()
|
||||
case ReceiveTimeout =>
|
||||
timeoutLatch.open
|
||||
count.incrementAndGet
|
||||
self.receiveTimeout = None
|
||||
}
|
||||
}).start
|
||||
timeoutActor ! Tick
|
||||
|
||||
assert(timeoutLatch.tryAwait(2, TimeUnit.SECONDS) == true)
|
||||
assert(count.get === 1)
|
||||
timeoutActor.stop
|
||||
}
|
||||
|
||||
@Test def timeoutShouldNotBeSentWhenNotSpecified = {
|
||||
|
|
@ -73,5 +99,6 @@ class ReceiveTimeoutSpec extends JUnitSuite {
|
|||
}).start
|
||||
|
||||
assert(timeoutLatch.tryAwait(1, TimeUnit.SECONDS) == false)
|
||||
timeoutActor.stop
|
||||
}
|
||||
}
|
||||
|
|
|
|||
|
|
@ -22,8 +22,7 @@ class RestartStrategySpec extends JUnitSuite {
|
|||
def slaveShouldStayDeadAfterMaxRestarts = {
|
||||
|
||||
val boss = actorOf(new Actor{
|
||||
self.trapExit = List(classOf[Throwable])
|
||||
self.faultHandler = Some(OneForOneStrategy(1, 1000))
|
||||
self.faultHandler = OneForOneStrategy(List(classOf[Throwable]), 1, 1000)
|
||||
protected def receive = { case _ => () }
|
||||
}).start
|
||||
|
||||
|
|
@ -75,8 +74,7 @@ class RestartStrategySpec extends JUnitSuite {
|
|||
def slaveShouldBeImmortalWithoutMaxRestarts = {
|
||||
|
||||
val boss = actorOf(new Actor{
|
||||
self.trapExit = List(classOf[Throwable])
|
||||
self.faultHandler = Some(OneForOneStrategy(None, None))
|
||||
self.faultHandler = OneForOneStrategy(List(classOf[Throwable]), None, None)
|
||||
protected def receive = { case _ => () }
|
||||
}).start
|
||||
|
||||
|
|
|
|||
|
|
@ -37,8 +37,7 @@ class SupervisorHierarchySpec extends JUnitSuite {
|
|||
val workerThree = actorOf(new CountDownActor(countDown))
|
||||
|
||||
val boss = actorOf(new Actor{
|
||||
self.trapExit = List(classOf[Throwable])
|
||||
self.faultHandler = Some(OneForOneStrategy(5, 1000))
|
||||
self.faultHandler = OneForOneStrategy(List(classOf[Throwable]), 5, 1000)
|
||||
|
||||
protected def receive = { case _ => () }
|
||||
}).start
|
||||
|
|
@ -63,8 +62,7 @@ class SupervisorHierarchySpec extends JUnitSuite {
|
|||
val countDown = new CountDownLatch(2)
|
||||
val crasher = actorOf(new CountDownActor(countDown))
|
||||
val boss = actorOf(new Actor{
|
||||
self.trapExit = List(classOf[Throwable])
|
||||
self.faultHandler = Some(OneForOneStrategy(1, 5000))
|
||||
self.faultHandler = OneForOneStrategy(List(classOf[Throwable]), 1, 5000)
|
||||
protected def receive = {
|
||||
case MaximumNumberOfRestartsWithinTimeRangeReached(_, _, _, _) =>
|
||||
countDown.countDown
|
||||
|
|
|
|||
|
|
@ -58,10 +58,10 @@ class SupervisorMiscSpec extends WordSpec with MustMatchers {
|
|||
val sup = Supervisor(
|
||||
SupervisorConfig(
|
||||
RestartStrategy(OneForOne, 3, 5000, List(classOf[Exception])),
|
||||
Supervise(actor1, LifeCycle(Permanent)) ::
|
||||
Supervise(actor2, LifeCycle(Permanent)) ::
|
||||
Supervise(actor3, LifeCycle(Permanent)) ::
|
||||
Supervise(actor4, LifeCycle(Permanent)) ::
|
||||
Supervise(actor1, Permanent) ::
|
||||
Supervise(actor2, Permanent) ::
|
||||
Supervise(actor3, Permanent) ::
|
||||
Supervise(actor4, Permanent) ::
|
||||
Nil))
|
||||
|
||||
actor1 ! "kill"
|
||||
|
|
|
|||
|
|
@ -78,7 +78,7 @@ object SupervisorSpec {
|
|||
|
||||
class TemporaryActor extends Actor {
|
||||
import self._
|
||||
lifeCycle = Some(LifeCycle(Temporary))
|
||||
lifeCycle = Temporary
|
||||
def receive = {
|
||||
case Ping =>
|
||||
messageLog.put("ping")
|
||||
|
|
@ -95,8 +95,7 @@ object SupervisorSpec {
|
|||
}
|
||||
|
||||
class Master extends Actor {
|
||||
self.trapExit = classOf[Exception] :: Nil
|
||||
self.faultHandler = Some(OneForOneStrategy(5, 1000))
|
||||
self.faultHandler = OneForOneStrategy(List(classOf[Exception]), 5, 1000)
|
||||
val temp = self.spawnLink[TemporaryActor]
|
||||
override def receive = {
|
||||
case Die => temp !! (Die, 5000)
|
||||
|
|
@ -506,7 +505,7 @@ class SupervisorSpec extends JUnitSuite {
|
|||
RestartStrategy(AllForOne, 3, 5000, List(classOf[Exception])),
|
||||
Supervise(
|
||||
temporaryActor,
|
||||
LifeCycle(Temporary))
|
||||
Temporary)
|
||||
:: Nil))
|
||||
}
|
||||
|
||||
|
|
@ -518,7 +517,7 @@ class SupervisorSpec extends JUnitSuite {
|
|||
RestartStrategy(AllForOne, 3, 5000, List(classOf[Exception])),
|
||||
Supervise(
|
||||
pingpong1,
|
||||
LifeCycle(Permanent))
|
||||
Permanent)
|
||||
:: Nil))
|
||||
}
|
||||
|
||||
|
|
@ -530,7 +529,7 @@ class SupervisorSpec extends JUnitSuite {
|
|||
RestartStrategy(OneForOne, 3, 5000, List(classOf[Exception])),
|
||||
Supervise(
|
||||
pingpong1,
|
||||
LifeCycle(Permanent))
|
||||
Permanent)
|
||||
:: Nil))
|
||||
}
|
||||
|
||||
|
|
@ -544,15 +543,15 @@ class SupervisorSpec extends JUnitSuite {
|
|||
RestartStrategy(AllForOne, 3, 5000, List(classOf[Exception])),
|
||||
Supervise(
|
||||
pingpong1,
|
||||
LifeCycle(Permanent))
|
||||
Permanent)
|
||||
::
|
||||
Supervise(
|
||||
pingpong2,
|
||||
LifeCycle(Permanent))
|
||||
Permanent)
|
||||
::
|
||||
Supervise(
|
||||
pingpong3,
|
||||
LifeCycle(Permanent))
|
||||
Permanent)
|
||||
:: Nil))
|
||||
}
|
||||
|
||||
|
|
@ -566,15 +565,15 @@ class SupervisorSpec extends JUnitSuite {
|
|||
RestartStrategy(OneForOne, 3, 5000, List(classOf[Exception])),
|
||||
Supervise(
|
||||
pingpong1,
|
||||
LifeCycle(Permanent))
|
||||
Permanent)
|
||||
::
|
||||
Supervise(
|
||||
pingpong2,
|
||||
LifeCycle(Permanent))
|
||||
Permanent)
|
||||
::
|
||||
Supervise(
|
||||
pingpong3,
|
||||
LifeCycle(Permanent))
|
||||
Permanent)
|
||||
:: Nil))
|
||||
}
|
||||
|
||||
|
|
@ -588,17 +587,17 @@ class SupervisorSpec extends JUnitSuite {
|
|||
RestartStrategy(AllForOne, 3, 5000, List(classOf[Exception])),
|
||||
Supervise(
|
||||
pingpong1,
|
||||
LifeCycle(Permanent))
|
||||
Permanent)
|
||||
::
|
||||
SupervisorConfig(
|
||||
RestartStrategy(AllForOne, 3, 5000, Nil),
|
||||
Supervise(
|
||||
pingpong2,
|
||||
LifeCycle(Permanent))
|
||||
Permanent)
|
||||
::
|
||||
Supervise(
|
||||
pingpong3,
|
||||
LifeCycle(Permanent))
|
||||
Permanent)
|
||||
:: Nil)
|
||||
:: Nil))
|
||||
}
|
||||
|
|
|
|||
|
|
@ -15,7 +15,7 @@ object DispatchersSpec {
|
|||
import Dispatchers._
|
||||
//
|
||||
val tipe = "type"
|
||||
val keepalivems = "keep-alive-ms"
|
||||
val keepalivems = "keep-alive-time"
|
||||
val corepoolsizefactor = "core-pool-size-factor"
|
||||
val maxpoolsizefactor = "max-pool-size-factor"
|
||||
val executorbounds = "executor-bounds"
|
||||
|
|
|
|||
|
|
@ -68,7 +68,7 @@ class ExecutorBasedEventDrivenDispatcherActorSpec extends JUnitSuite {
|
|||
}
|
||||
|
||||
@Test def shouldRespectThroughput {
|
||||
val throughputDispatcher = new ExecutorBasedEventDrivenDispatcher("THROUGHPUT",101,0,Dispatchers.MAILBOX_CONFIG, (e) => {
|
||||
val throughputDispatcher = new ExecutorBasedEventDrivenDispatcher("THROUGHPUT",101,0,Dispatchers.MAILBOX_TYPE, (e) => {
|
||||
e.setCorePoolSize(1)
|
||||
})
|
||||
|
||||
|
|
@ -103,7 +103,7 @@ class ExecutorBasedEventDrivenDispatcherActorSpec extends JUnitSuite {
|
|||
|
||||
@Test def shouldRespectThroughputDeadline {
|
||||
val deadlineMs = 100
|
||||
val throughputDispatcher = new ExecutorBasedEventDrivenDispatcher("THROUGHPUT",2,deadlineMs,Dispatchers.MAILBOX_CONFIG, (e) => {
|
||||
val throughputDispatcher = new ExecutorBasedEventDrivenDispatcher("THROUGHPUT",2,deadlineMs,Dispatchers.MAILBOX_TYPE, (e) => {
|
||||
e.setCorePoolSize(1)
|
||||
})
|
||||
|
||||
|
|
|
|||
|
|
@ -1,44 +1,44 @@
|
|||
package se.scalablesolutions.akka.actor.dispatch
|
||||
|
||||
import org.scalatest.junit.JUnitSuite
|
||||
|
||||
import org.junit.Test
|
||||
|
||||
import se.scalablesolutions.akka.actor.Actor
|
||||
import Actor._
|
||||
import java.util.concurrent.{BlockingQueue, CountDownLatch, TimeUnit}
|
||||
import se.scalablesolutions.akka.util.Duration
|
||||
import se.scalablesolutions.akka.dispatch.{MessageQueueAppendFailedException, MessageInvocation, MailboxConfig, Dispatchers}
|
||||
import java.util.concurrent.atomic.{AtomicReference}
|
||||
import se.scalablesolutions.akka.dispatch._
|
||||
import Actor._
|
||||
|
||||
object MailboxConfigSpec {
|
||||
import java.util.concurrent.{BlockingQueue, CountDownLatch, TimeUnit}
|
||||
import java.util.concurrent.atomic.AtomicReference
|
||||
|
||||
}
|
||||
|
||||
class MailboxConfigSpec extends JUnitSuite {
|
||||
import MailboxConfigSpec._
|
||||
class MailboxTypeSpec extends JUnitSuite {
|
||||
@Test def shouldDoNothing = assert(true)
|
||||
|
||||
/*
|
||||
private val unit = TimeUnit.MILLISECONDS
|
||||
|
||||
@Test def shouldCreateUnboundedQueue = {
|
||||
val m = MailboxConfig(-1,None,false)
|
||||
assert(m.newMailbox().asInstanceOf[BlockingQueue[MessageInvocation]].remainingCapacity === Integer.MAX_VALUE)
|
||||
val m = UnboundedMailbox(false)
|
||||
assert(m.newMailbox("uuid").asInstanceOf[BlockingQueue[MessageInvocation]].remainingCapacity === Integer.MAX_VALUE)
|
||||
}
|
||||
|
||||
@Test def shouldCreateBoundedQueue = {
|
||||
val m = MailboxConfig(1,None,false)
|
||||
assert(m.newMailbox().asInstanceOf[BlockingQueue[MessageInvocation]].remainingCapacity === 1)
|
||||
val m = BoundedMailbox(blocking = false, capacity = 1)
|
||||
assert(m.newMailbox("uuid").asInstanceOf[BlockingQueue[MessageInvocation]].remainingCapacity === 1)
|
||||
}
|
||||
|
||||
@Test(expected = classOf[MessageQueueAppendFailedException]) def shouldThrowMessageQueueAppendFailedExceptionWhenTimeOutEnqueue = {
|
||||
val m = MailboxConfig(1,Some(Duration(1,unit)),false)
|
||||
val m = BoundedMailbox(false, 1, Duration(1, unit))
|
||||
val testActor = actorOf( new Actor { def receive = { case _ => }} )
|
||||
val mbox = m.newMailbox()
|
||||
(1 to 10000) foreach { i => mbox.enqueue(new MessageInvocation(testActor,i,None,None,None)) }
|
||||
val mbox = m.newMailbox("uuid")
|
||||
(1 to 10000) foreach { i => mbox.enqueue(new MessageInvocation(testActor, i, None, None, None)) }
|
||||
}
|
||||
|
||||
|
||||
@Test def shouldBeAbleToDequeueUnblocking = {
|
||||
val m = MailboxConfig(1,Some(Duration(1,unit)),false)
|
||||
val mbox = m.newMailbox()
|
||||
val m = BoundedMailbox(false, 1, Duration(1, unit))
|
||||
val mbox = m.newMailbox("uuid")
|
||||
val latch = new CountDownLatch(1)
|
||||
val t = new Thread { override def run = {
|
||||
mbox.dequeue
|
||||
|
|
@ -50,4 +50,5 @@ class MailboxConfigSpec extends JUnitSuite {
|
|||
t.interrupt
|
||||
assert(result === true)
|
||||
}
|
||||
*/
|
||||
}
|
||||
|
|
|
|||
5
akka-actor/src/test/scala/japi/JavaAPITest.scala
Normal file
5
akka-actor/src/test/scala/japi/JavaAPITest.scala
Normal file
|
|
@ -0,0 +1,5 @@
|
|||
package se.scalablesolutions.akka.japi
|
||||
|
||||
import org.scalatest.junit.JUnitSuite
|
||||
|
||||
class JavaAPITest extends JavaAPITestBase with JUnitSuite
|
||||
|
|
@ -98,7 +98,7 @@ class SchedulerSpec extends JUnitSuite {
|
|||
val pingLatch = new CountDownLatch(6)
|
||||
|
||||
val actor = actorOf(new Actor {
|
||||
self.lifeCycle = Some(LifeCycle(Permanent))
|
||||
self.lifeCycle = Permanent
|
||||
|
||||
def receive = {
|
||||
case Ping => pingLatch.countDown
|
||||
|
|
@ -113,7 +113,7 @@ class SchedulerSpec extends JUnitSuite {
|
|||
List(classOf[Exception])),
|
||||
Supervise(
|
||||
actor,
|
||||
LifeCycle(Permanent))
|
||||
Permanent)
|
||||
:: Nil)).start
|
||||
|
||||
Scheduler.schedule(actor, Ping, 500, 500, TimeUnit.MILLISECONDS)
|
||||
|
|
|
|||
|
|
@ -12,7 +12,7 @@ import ConnectionFactory._
|
|||
import com.rabbitmq.client.AMQP.BasicProperties
|
||||
import java.lang.{String, IllegalArgumentException}
|
||||
import reflect.Manifest
|
||||
import se.scalablesolutions.akka.util.Procedure
|
||||
import se.scalablesolutions.akka.japi.Procedure
|
||||
|
||||
/**
|
||||
* AMQP Actor API. Implements Connection, Producer and Consumer materialized as Actors.
|
||||
|
|
@ -451,8 +451,7 @@ object AMQP {
|
|||
class AMQPSupervisorActor extends Actor {
|
||||
import self._
|
||||
|
||||
faultHandler = Some(OneForOneStrategy(None, None)) // never die
|
||||
trapExit = List(classOf[Throwable])
|
||||
faultHandler = OneForOneStrategy(List(classOf[Throwable]))
|
||||
|
||||
def receive = {
|
||||
case _ => {} // ignore all messages
|
||||
|
|
|
|||
|
|
@ -8,15 +8,16 @@ import java.util.{TimerTask, Timer}
|
|||
import java.io.IOException
|
||||
import com.rabbitmq.client._
|
||||
import se.scalablesolutions.akka.amqp.AMQP.ConnectionParameters
|
||||
import se.scalablesolutions.akka.actor.{Exit, Actor}
|
||||
import se.scalablesolutions.akka.config.ScalaConfig.{Permanent, LifeCycle}
|
||||
import se.scalablesolutions.akka.config.ScalaConfig.{Permanent}
|
||||
import se.scalablesolutions.akka.config.OneForOneStrategy
|
||||
import se.scalablesolutions.akka.actor.{Exit, Actor}
|
||||
|
||||
private[amqp] class FaultTolerantConnectionActor(connectionParameters: ConnectionParameters) extends Actor {
|
||||
import connectionParameters._
|
||||
|
||||
self.id = "amqp-connection-%s".format(host)
|
||||
self.lifeCycle = Some(LifeCycle(Permanent))
|
||||
self.lifeCycle = Permanent
|
||||
self.faultHandler = OneForOneStrategy(List(classOf[Throwable]))
|
||||
|
||||
self.trapExit = List(classOf[Throwable])
|
||||
self.faultHandler = Some(OneForOneStrategy(None, None)) // never die
|
||||
|
|
@ -70,8 +71,9 @@ private[amqp] class FaultTolerantConnectionActor(connectionParameters: Connectio
|
|||
}
|
||||
})
|
||||
log.info("Successfully (re)connected to AMQP Server %s:%s [%s]", host, port, self.id)
|
||||
log.debug("Sending new channel to %d already linked actors", self.linkedActorsAsList.size)
|
||||
self.linkedActorsAsList.foreach(_ ! conn.createChannel)
|
||||
log.debug("Sending new channel to %d already linked actors", self.linkedActors.size)
|
||||
import scala.collection.JavaConversions._
|
||||
self.linkedActors.values.iterator.foreach(_ ! conn.createChannel)
|
||||
notifyCallback(Connected)
|
||||
}
|
||||
} catch {
|
||||
|
|
|
|||
|
|
@ -1,5 +1,5 @@
|
|||
/**
|
||||
* Copyright (C) 2009-2010 Scalable Solutions AB <http://scalablesolutions.se>
|
||||
* Copyright (C) 2009-2010 Scalable Solutions AB <http://scalablesolutions.se>
|
||||
*/
|
||||
|
||||
package se.scalablesolutions.akka.camel
|
||||
|
|
@ -10,10 +10,11 @@ import org.apache.camel.{ProducerTemplate, CamelContext}
|
|||
import org.apache.camel.impl.DefaultCamelContext
|
||||
|
||||
import se.scalablesolutions.akka.camel.component.TypedActorComponent
|
||||
import se.scalablesolutions.akka.japi.{Option => JOption}
|
||||
import se.scalablesolutions.akka.util.Logging
|
||||
|
||||
/**
|
||||
* Defines the lifecycle of a CamelContext. Allowed state transitions are
|
||||
* Manages the lifecycle of a CamelContext. Allowed transitions are
|
||||
* init -> start -> stop -> init -> ... etc.
|
||||
*
|
||||
* @author Martin Krasser
|
||||
|
|
@ -22,8 +23,8 @@ trait CamelContextLifecycle extends Logging {
|
|||
// TODO: enforce correct state transitions
|
||||
// valid: init -> start -> stop -> init ...
|
||||
|
||||
private var _context: CamelContext = _
|
||||
private var _template: ProducerTemplate = _
|
||||
private var _context: Option[CamelContext] = None
|
||||
private var _template: Option[ProducerTemplate] = None
|
||||
|
||||
private var _initialized = false
|
||||
private var _started = false
|
||||
|
|
@ -35,52 +36,102 @@ trait CamelContextLifecycle extends Logging {
|
|||
|
||||
/**
|
||||
* Registry in which typed actors are TEMPORARILY registered during
|
||||
* creation of Camel routes to typed actors.
|
||||
* creation of Camel routes to these actors.
|
||||
*/
|
||||
private[camel] var typedActorRegistry: Map[String, AnyRef] = _
|
||||
|
||||
/**
|
||||
* Returns the managed CamelContext.
|
||||
* Returns <code>Some(CamelContext)</code> (containing the current CamelContext)
|
||||
* if <code>CamelContextLifecycle</code> has been initialized, otherwise <code>None</code>.
|
||||
*/
|
||||
protected def context: CamelContext = _context
|
||||
def context: Option[CamelContext] = _context
|
||||
|
||||
/**
|
||||
* Returns the managed ProducerTemplate.
|
||||
* Returns <code>Some(ProducerTemplate)</code> (containing the current ProducerTemplate)
|
||||
* if <code>CamelContextLifecycle</code> has been initialized, otherwise <code>None</code>.
|
||||
*/
|
||||
protected def template: ProducerTemplate = _template
|
||||
def template: Option[ProducerTemplate] = _template
|
||||
|
||||
/**
|
||||
* Sets the managed CamelContext.
|
||||
* Returns <code>Some(CamelContext)</code> (containing the current CamelContext)
|
||||
* if <code>CamelContextLifecycle</code> has been initialized, otherwise <code>None</code>.
|
||||
* <p>
|
||||
* Java API.
|
||||
*/
|
||||
protected def context_= (context: CamelContext) { _context = context }
|
||||
def getContext: JOption[CamelContext] = context
|
||||
|
||||
/**
|
||||
* Sets the managed ProducerTemplate.
|
||||
* Returns <code>Some(ProducerTemplate)</code> (containing the current ProducerTemplate)
|
||||
* if <code>CamelContextLifecycle</code> has been initialized, otherwise <code>None</code>.
|
||||
* <p>
|
||||
* Java API.
|
||||
*/
|
||||
protected def template_= (template: ProducerTemplate) { _template = template }
|
||||
def getTemplate: JOption[ProducerTemplate] = template
|
||||
|
||||
/**
|
||||
* Returns the current <code>CamelContext</code> if this <code>CamelContextLifecycle</code>
|
||||
* has been initialized, otherwise throws an <code>IllegalStateException</code>.
|
||||
*/
|
||||
def mandatoryContext =
|
||||
if (context.isDefined) context.get
|
||||
else throw new IllegalStateException("no current CamelContext")
|
||||
|
||||
/**
|
||||
* Returns the current <code>ProducerTemplate</code> if this <code>CamelContextLifecycle</code>
|
||||
* has been initialized, otherwise throws an <code>IllegalStateException</code>.
|
||||
*/
|
||||
def mandatoryTemplate =
|
||||
if (template.isDefined) template.get
|
||||
else throw new IllegalStateException("no current ProducerTemplate")
|
||||
|
||||
/**
|
||||
* Returns the current <code>CamelContext</code> if this <code>CamelContextLifecycle</code>
|
||||
* has been initialized, otherwise throws an <code>IllegalStateException</code>.
|
||||
* <p>
|
||||
* Java API.
|
||||
*/
|
||||
def getMandatoryContext = mandatoryContext
|
||||
|
||||
/**
|
||||
* Returns the current <code>ProducerTemplate</code> if this <code>CamelContextLifecycle</code>
|
||||
* has been initialized, otherwise throws an <code>IllegalStateException</code>.
|
||||
* <p>
|
||||
* Java API.
|
||||
*/
|
||||
def getMandatoryTemplate = mandatoryTemplate
|
||||
|
||||
def initialized = _initialized
|
||||
def started = _started
|
||||
|
||||
/**
|
||||
* Starts the CamelContext and ProducerTemplate.
|
||||
* Starts the CamelContext and an associated ProducerTemplate.
|
||||
*/
|
||||
def start = {
|
||||
context.start
|
||||
template.start
|
||||
_started = true
|
||||
log.info("Camel context started")
|
||||
for {
|
||||
c <- context
|
||||
t <- template
|
||||
} {
|
||||
c.start
|
||||
t.start
|
||||
_started = true
|
||||
log.info("Camel context started")
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Stops the CamelContext and ProducerTemplate.
|
||||
* Stops the CamelContext and the associated ProducerTemplate.
|
||||
*/
|
||||
def stop = {
|
||||
template.stop
|
||||
context.stop
|
||||
_initialized = false
|
||||
_started = false
|
||||
log.info("Camel context stopped")
|
||||
for {
|
||||
t <- template
|
||||
c <- context
|
||||
} {
|
||||
t.stop
|
||||
c.stop
|
||||
_started = false
|
||||
_initialized = false
|
||||
log.info("Camel context stopped")
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
|
|
@ -90,29 +141,62 @@ trait CamelContextLifecycle extends Logging {
|
|||
|
||||
/**
|
||||
* Initializes this lifecycle object with the given CamelContext. For the passed
|
||||
* CamelContext stream-caching is enabled. If applications want to disable stream-
|
||||
* CamelContext, stream-caching is enabled. If applications want to disable stream-
|
||||
* caching they can do so after this method returned and prior to calling start.
|
||||
* This method also registers a new
|
||||
* {@link se.scalablesolutions.akka.camel.component.TypedActorComponent} at
|
||||
* <code>context</code> under a name defined by TypedActorComponent.InternalSchema.
|
||||
* This method also registers a new TypedActorComponent at the passes CamelContext
|
||||
* under a name defined by TypedActorComponent.InternalSchema.
|
||||
*/
|
||||
def init(context: CamelContext) {
|
||||
this.typedActorComponent = new TypedActorComponent
|
||||
this.typedActorRegistry = typedActorComponent.typedActorRegistry
|
||||
this.context = context
|
||||
this.context.setStreamCaching(true)
|
||||
this.context.addComponent(TypedActorComponent.InternalSchema, typedActorComponent)
|
||||
this.template = context.createProducerTemplate
|
||||
|
||||
context.setStreamCaching(true)
|
||||
context.addComponent(TypedActorComponent.InternalSchema, typedActorComponent)
|
||||
|
||||
this._context = Some(context)
|
||||
this._template = Some(context.createProducerTemplate)
|
||||
|
||||
_initialized = true
|
||||
log.info("Camel context initialized")
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Makes a global CamelContext and ProducerTemplate accessible to applications. The lifecycle
|
||||
* of these objects is managed by se.scalablesolutions.akka.camel.CamelService.
|
||||
* Manages a global CamelContext and an associated ProducerTemplate.
|
||||
*/
|
||||
object CamelContextManager extends CamelContextLifecycle {
|
||||
override def context: CamelContext = super.context
|
||||
override def template: ProducerTemplate = super.template
|
||||
|
||||
// -----------------------------------------------------
|
||||
// The inherited getters aren't statically accessible
|
||||
// from Java. Therefore, they are redefined here.
|
||||
// TODO: investigate if this is a Scala bug.
|
||||
// -----------------------------------------------------
|
||||
|
||||
/**
|
||||
* see CamelContextLifecycle.getContext
|
||||
* <p>
|
||||
* Java API.
|
||||
*/
|
||||
override def getContext: JOption[CamelContext] = super.getContext
|
||||
|
||||
/**
|
||||
* see CamelContextLifecycle.getTemplate
|
||||
* <p>
|
||||
* Java API.
|
||||
*/
|
||||
override def getTemplate: JOption[ProducerTemplate] = super.getTemplate
|
||||
|
||||
/**
|
||||
* see CamelContextLifecycle.getMandatoryContext
|
||||
* <p>
|
||||
* Java API.
|
||||
*/
|
||||
override def getMandatoryContext = super.getMandatoryContext
|
||||
|
||||
/**
|
||||
* see CamelContextLifecycle.getMandatoryTemplate
|
||||
* <p>
|
||||
* Java API.
|
||||
*/
|
||||
override def getMandatoryTemplate = super.getMandatoryTemplate
|
||||
}
|
||||
|
|
|
|||
|
|
@ -9,12 +9,15 @@ import org.apache.camel.CamelContext
|
|||
|
||||
import se.scalablesolutions.akka.actor.Actor._
|
||||
import se.scalablesolutions.akka.actor.{AspectInitRegistry, ActorRegistry}
|
||||
import se.scalablesolutions.akka.util.{Bootable, Logging}
|
||||
import se.scalablesolutions.akka.config.Config._
|
||||
import se.scalablesolutions.akka.japi.{Option => JOption}
|
||||
import se.scalablesolutions.akka.util.{Logging, Bootable}
|
||||
|
||||
/**
|
||||
* Used by applications (and the Kernel) to publish consumer actors and typed actors via
|
||||
* Camel endpoints and to manage the life cycle of a a global CamelContext which can be
|
||||
* accessed via <code>se.scalablesolutions.akka.camel.CamelContextManager.context</code>.
|
||||
* Publishes (untyped) consumer actors and typed consumer actors via Camel endpoints. Actors
|
||||
* are published (asynchronously) when they are started and unpublished (asynchronously) when
|
||||
* they are stopped. The CamelService is notified about actor start- and stop-events by
|
||||
* registering listeners at ActorRegistry and AspectInitRegistry.
|
||||
*
|
||||
* @author Martin Krasser
|
||||
*/
|
||||
|
|
@ -29,16 +32,36 @@ trait CamelService extends Bootable with Logging {
|
|||
AspectInitRegistry.addListener(publishRequestor)
|
||||
|
||||
/**
|
||||
* Starts the CamelService. Any started actor that is a consumer actor will be (asynchronously)
|
||||
* published as Camel endpoint. Consumer actors that are started after this method returned will
|
||||
* be published as well. Actor publishing is done asynchronously. A started (loaded) CamelService
|
||||
* also publishes <code>@consume</code> annotated methods of typed actors that have been created
|
||||
* with <code>TypedActor.newInstance(..)</code> (and <code>TypedActor.newInstance(..)</code>
|
||||
* on a remote node).
|
||||
* Starts this CamelService unless <code>akka.camel.service</code> is set to <code>false</code>.
|
||||
*/
|
||||
abstract override def onLoad = {
|
||||
super.onLoad
|
||||
if (config.getBool("akka.camel.service", true)) start
|
||||
}
|
||||
|
||||
/**
|
||||
* Stops this CamelService unless <code>akka.camel.service</code> is set to <code>false</code>.
|
||||
*/
|
||||
abstract override def onUnload = {
|
||||
if (config.getBool("akka.camel.service", true)) stop
|
||||
super.onUnload
|
||||
}
|
||||
|
||||
@deprecated("use start() instead")
|
||||
def load = start
|
||||
|
||||
@deprecated("use stop() instead")
|
||||
def unload = stop
|
||||
|
||||
/**
|
||||
* Starts this CamelService. Any started actor that is a consumer actor will be (asynchronously)
|
||||
* published as Camel endpoint. Consumer actors that are started after this method returned will
|
||||
* be published as well. Actor publishing is done asynchronously. A started (loaded) CamelService
|
||||
* also publishes <code>@consume</code> annotated methods of typed actors that have been created
|
||||
* with <code>TypedActor.newInstance(..)</code> (and <code>TypedActor.newRemoteInstance(..)</code>
|
||||
* on a remote node).
|
||||
*/
|
||||
def start: CamelService = {
|
||||
// Only init and start if not already done by application
|
||||
if (!CamelContextManager.initialized) CamelContextManager.init
|
||||
if (!CamelContextManager.started) CamelContextManager.start
|
||||
|
|
@ -49,14 +72,16 @@ trait CamelService extends Bootable with Logging {
|
|||
// init publishRequestor so that buffered and future events are delivered to consumerPublisher
|
||||
publishRequestor ! PublishRequestorInit(consumerPublisher)
|
||||
|
||||
// Register this instance as current CamelService
|
||||
// Register this instance as current CamelService and return it
|
||||
CamelServiceManager.register(this)
|
||||
CamelServiceManager.mandatoryService
|
||||
}
|
||||
|
||||
/**
|
||||
* Stops the CamelService.
|
||||
* Stops this CamelService. All published consumer actors and typed consumer actor methods will be
|
||||
* unpublished asynchronously.
|
||||
*/
|
||||
abstract override def onUnload = {
|
||||
def stop = {
|
||||
// Unregister this instance as current CamelService
|
||||
CamelServiceManager.unregister(this)
|
||||
|
||||
|
|
@ -67,55 +92,27 @@ trait CamelService extends Bootable with Logging {
|
|||
// Stop related services
|
||||
consumerPublisher.stop
|
||||
CamelContextManager.stop
|
||||
|
||||
super.onUnload
|
||||
}
|
||||
|
||||
@deprecated("use start() instead")
|
||||
def load: CamelService = {
|
||||
onLoad
|
||||
this
|
||||
}
|
||||
|
||||
@deprecated("use stop() instead")
|
||||
def unload = onUnload
|
||||
|
||||
/**
|
||||
* Starts the CamelService.
|
||||
*
|
||||
* @see onLoad
|
||||
*/
|
||||
def start: CamelService = {
|
||||
onLoad
|
||||
this
|
||||
}
|
||||
|
||||
/**
|
||||
* Stops the CamelService.
|
||||
*
|
||||
* @see onUnload
|
||||
*/
|
||||
def stop = onUnload
|
||||
|
||||
/**
|
||||
* Sets an expectation of the number of upcoming endpoint activations and returns
|
||||
* a {@link CountDownLatch} that can be used to wait for the activations to occur.
|
||||
* Endpoint activations that occurred in the past are not considered.
|
||||
* Sets an expectation on the number of upcoming endpoint activations and returns
|
||||
* a CountDownLatch that can be used to wait for the activations to occur. Endpoint
|
||||
* activations that occurred in the past are not considered.
|
||||
*/
|
||||
def expectEndpointActivationCount(count: Int): CountDownLatch =
|
||||
(consumerPublisher !! SetExpectedRegistrationCount(count)).as[CountDownLatch].get
|
||||
|
||||
/**
|
||||
* Sets an expectation of the number of upcoming endpoint de-activations and returns
|
||||
* a {@link CountDownLatch} that can be used to wait for the de-activations to occur.
|
||||
* Endpoint de-activations that occurred in the past are not considered.
|
||||
* Sets an expectation on the number of upcoming endpoint de-activations and returns
|
||||
* a CountDownLatch that can be used to wait for the de-activations to occur. Endpoint
|
||||
* de-activations that occurred in the past are not considered.
|
||||
*/
|
||||
def expectEndpointDeactivationCount(count: Int): CountDownLatch =
|
||||
(consumerPublisher !! SetExpectedUnregistrationCount(count)).as[CountDownLatch].get
|
||||
}
|
||||
|
||||
/**
|
||||
* ...
|
||||
* Manages a global CamelService (the 'current' CamelService).
|
||||
*
|
||||
* @author Martin Krasser
|
||||
*/
|
||||
|
|
@ -128,22 +125,49 @@ object CamelServiceManager {
|
|||
|
||||
/**
|
||||
* Starts a new CamelService and makes it the current CamelService.
|
||||
*
|
||||
* @see CamelService#start
|
||||
* @see CamelService#onLoad
|
||||
*/
|
||||
def startCamelService = CamelServiceFactory.createCamelService.start
|
||||
|
||||
/**
|
||||
* Stops the current CamelService.
|
||||
*
|
||||
* @see CamelService#stop
|
||||
* @see CamelService#onUnload
|
||||
*/
|
||||
def stopCamelService = service.stop
|
||||
def stopCamelService = for (s <- service) s.stop
|
||||
|
||||
/**
|
||||
* Returns the current CamelService.
|
||||
*
|
||||
* @throws IllegalStateException if there's no current CamelService.
|
||||
* Returns <code>Some(CamelService)</code> if this <code>CamelService</code>
|
||||
* has been started, <code>None</code> otherwise.
|
||||
*/
|
||||
def service =
|
||||
def service = _current
|
||||
|
||||
/**
|
||||
* Returns the current <code>CamelService</code> if <code>CamelService</code>
|
||||
* has been started, otherwise throws an <code>IllegalStateException</code>.
|
||||
* <p>
|
||||
* Java API
|
||||
*/
|
||||
def getService: JOption[CamelService] = CamelServiceManager.service
|
||||
|
||||
/**
|
||||
* Returns <code>Some(CamelService)</code> (containing the current CamelService)
|
||||
* if this <code>CamelService</code>has been started, <code>None</code> otherwise.
|
||||
*/
|
||||
def mandatoryService =
|
||||
if (_current.isDefined) _current.get
|
||||
else throw new IllegalStateException("no current CamelService")
|
||||
else throw new IllegalStateException("co current Camel service")
|
||||
|
||||
/**
|
||||
* Returns <code>Some(CamelService)</code> (containing the current CamelService)
|
||||
* if this <code>CamelService</code>has been started, <code>None</code> otherwise.
|
||||
* <p>
|
||||
* Java API
|
||||
*/
|
||||
def getMandatoryService = mandatoryService
|
||||
|
||||
private[camel] def register(service: CamelService) =
|
||||
if (_current.isDefined) throw new IllegalStateException("current CamelService already registered")
|
||||
|
|
@ -159,12 +183,12 @@ object CamelServiceManager {
|
|||
*/
|
||||
object CamelServiceFactory {
|
||||
/**
|
||||
* Creates a new CamelService instance
|
||||
* Creates a new CamelService instance.
|
||||
*/
|
||||
def createCamelService: CamelService = new CamelService { }
|
||||
|
||||
/**
|
||||
* Creates a new CamelService instance
|
||||
* Creates a new CamelService instance and initializes it with the given CamelContext.
|
||||
*/
|
||||
def createCamelService(camelContext: CamelContext): CamelService = {
|
||||
CamelContextManager.init(camelContext)
|
||||
|
|
|
|||
|
|
@ -20,30 +20,24 @@ trait Consumer { self: Actor =>
|
|||
def endpointUri: String
|
||||
|
||||
/**
|
||||
* Determines whether two-way communications with this consumer actor should
|
||||
* be done in blocking or non-blocking mode (default is non-blocking). One-way
|
||||
* communications never block.
|
||||
* Determines whether two-way communications between an endpoint and this consumer actor
|
||||
* should be done in blocking or non-blocking mode (default is non-blocking). This method
|
||||
* doesn't have any effect on one-way communications (they'll never block).
|
||||
*/
|
||||
def blocking = false
|
||||
}
|
||||
|
||||
/**
|
||||
* Java-friendly {@link Consumer} inherited by
|
||||
* Java-friendly Consumer.
|
||||
*
|
||||
* <ul>
|
||||
* <li>{@link UntypedConsumerActor}</li>
|
||||
* <li>{@link RemoteUntypedConsumerActor}</li>
|
||||
* <li>{@link UntypedConsumerTransactor}</li>
|
||||
* </ul>
|
||||
*
|
||||
* implementations.
|
||||
* @see UntypedConsumerActor
|
||||
* @see RemoteUntypedConsumerActor
|
||||
* @see UntypedConsumerTransactor
|
||||
*
|
||||
* @author Martin Krasser
|
||||
*/
|
||||
trait UntypedConsumer extends Consumer { self: UntypedActor =>
|
||||
|
||||
final override def endpointUri = getEndpointUri
|
||||
|
||||
final override def blocking = isBlocking
|
||||
|
||||
/**
|
||||
|
|
@ -52,9 +46,9 @@ trait UntypedConsumer extends Consumer { self: UntypedActor =>
|
|||
def getEndpointUri(): String
|
||||
|
||||
/**
|
||||
* Determines whether two-way communications with this consumer actor should
|
||||
* be done in blocking or non-blocking mode (default is non-blocking). One-way
|
||||
* communications never block.
|
||||
* Determines whether two-way communications between an endpoint and this consumer actor
|
||||
* should be done in blocking or non-blocking mode (default is non-blocking). This method
|
||||
* doesn't have any effect on one-way communications (they'll never block).
|
||||
*/
|
||||
def isBlocking() = super.blocking
|
||||
}
|
||||
|
|
@ -89,7 +83,7 @@ private[camel] object Consumer {
|
|||
* reference with a target actor that implements the <code>Consumer</code> trait. The
|
||||
* target <code>Consumer</code> object is passed as argument to <code>f</code>. This
|
||||
* method returns <code>None</code> if <code>actorRef</code> is not a valid reference
|
||||
* to a consumer actor, <code>Some</code> result otherwise.
|
||||
* to a consumer actor, <code>Some</code> consumer actor otherwise.
|
||||
*/
|
||||
def forConsumer[T](actorRef: ActorRef)(f: Consumer => T): Option[T] = {
|
||||
if (!actorRef.actor.isInstanceOf[Consumer]) None
|
||||
|
|
|
|||
|
|
@ -23,15 +23,15 @@ private[camel] object ConsumerPublisher extends Logging {
|
|||
* Creates a route to the registered consumer actor.
|
||||
*/
|
||||
def handleConsumerRegistered(event: ConsumerRegistered) {
|
||||
CamelContextManager.context.addRoutes(new ConsumerActorRoute(event.uri, event.uuid, event.blocking))
|
||||
CamelContextManager.mandatoryContext.addRoutes(new ConsumerActorRoute(event.uri, event.uuid, event.blocking))
|
||||
log.info("published actor %s at endpoint %s" format (event.actorRef, event.uri))
|
||||
}
|
||||
|
||||
/**
|
||||
* Stops route to the already un-registered consumer actor.
|
||||
* Stops the route to the already un-registered consumer actor.
|
||||
*/
|
||||
def handleConsumerUnregistered(event: ConsumerUnregistered) {
|
||||
CamelContextManager.context.stopRoute(event.uuid.toString)
|
||||
CamelContextManager.mandatoryContext.stopRoute(event.uuid.toString)
|
||||
log.info("unpublished actor %s from endpoint %s" format (event.actorRef, event.uri))
|
||||
}
|
||||
|
||||
|
|
@ -43,29 +43,29 @@ private[camel] object ConsumerPublisher extends Logging {
|
|||
val objectId = "%s_%s" format (event.init.actorRef.uuid, targetMethod)
|
||||
|
||||
CamelContextManager.typedActorRegistry.put(objectId, event.typedActor)
|
||||
CamelContextManager.context.addRoutes(new ConsumerMethodRoute(event.uri, objectId, targetMethod))
|
||||
CamelContextManager.mandatoryContext.addRoutes(new ConsumerMethodRoute(event.uri, objectId, targetMethod))
|
||||
log.info("published method %s of %s at endpoint %s" format (targetMethod, event.typedActor, event.uri))
|
||||
}
|
||||
|
||||
/**
|
||||
* Stops route to the already un-registered consumer actor method.
|
||||
* Stops the route to the already un-registered consumer actor method.
|
||||
*/
|
||||
def handleConsumerMethodUnregistered(event: ConsumerMethodUnregistered) {
|
||||
val targetMethod = event.method.getName
|
||||
val objectId = "%s_%s" format (event.init.actorRef.uuid, targetMethod)
|
||||
|
||||
CamelContextManager.typedActorRegistry.remove(objectId)
|
||||
CamelContextManager.context.stopRoute(objectId)
|
||||
CamelContextManager.mandatoryContext.stopRoute(objectId)
|
||||
log.info("unpublished method %s of %s from endpoint %s" format (targetMethod, event.typedActor, event.uri))
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Actor that publishes consumer actors and typed actor methods at Camel endpoints.
|
||||
* The Camel context used for publishing is CamelContextManager.context. This actor
|
||||
* accepts messages of type
|
||||
* The Camel context used for publishing is obtained via CamelContextManager.context.
|
||||
* This actor accepts messages of type
|
||||
* se.scalablesolutions.akka.camel.ConsumerRegistered,
|
||||
* se.scalablesolutions.akka.camel.ConsumerUnregistered.
|
||||
* se.scalablesolutions.akka.camel.ConsumerUnregistered,
|
||||
* se.scalablesolutions.akka.camel.ConsumerMethodRegistered and
|
||||
* se.scalablesolutions.akka.camel.ConsumerMethodUnregistered.
|
||||
*
|
||||
|
|
@ -110,7 +110,7 @@ private[camel] case class SetExpectedRegistrationCount(num: Int)
|
|||
private[camel] case class SetExpectedUnregistrationCount(num: Int)
|
||||
|
||||
/**
|
||||
* Defines an abstract route to a target which is either an actor or an typed actor method..
|
||||
* Abstract route to a target which is either an actor or an typed actor method.
|
||||
*
|
||||
* @param endpointUri endpoint URI of the consumer actor or typed actor method.
|
||||
* @param id actor identifier or typed actor identifier (registry key).
|
||||
|
|
@ -135,9 +135,9 @@ private[camel] abstract class ConsumerRoute(endpointUri: String, id: String) ext
|
|||
}
|
||||
|
||||
/**
|
||||
* Defines the route to a consumer actor.
|
||||
* Defines the route to a (untyped) consumer actor.
|
||||
*
|
||||
* @param endpointUri endpoint URI of the consumer actor
|
||||
* @param endpointUri endpoint URI of the (untyped) consumer actor
|
||||
* @param uuid actor uuid
|
||||
* @param blocking true for blocking in-out exchanges, false otherwise
|
||||
*
|
||||
|
|
@ -148,7 +148,7 @@ private[camel] class ConsumerActorRoute(endpointUri: String, uuid: Uuid, blockin
|
|||
}
|
||||
|
||||
/**
|
||||
* Defines the route to an typed actor method..
|
||||
* Defines the route to a typed actor method.
|
||||
*
|
||||
* @param endpointUri endpoint URI of the consumer actor method
|
||||
* @param id typed actor identifier
|
||||
|
|
@ -162,10 +162,10 @@ private[camel] class ConsumerMethodRoute(val endpointUri: String, id: String, me
|
|||
|
||||
/**
|
||||
* A registration listener that triggers publication of consumer actors and typed actor
|
||||
* methods as well as un-publication of consumer actors. This actor needs to be initialized
|
||||
* with a <code>PublishRequestorInit</code> command message for obtaining a reference to
|
||||
* a <code>publisher</code> actor. Before initialization it buffers all outbound messages
|
||||
* and delivers them to the <code>publisher</code> when receiving a
|
||||
* methods as well as un-publication of consumer actors and typed actor methods. This actor
|
||||
* needs to be initialized with a <code>PublishRequestorInit</code> command message for
|
||||
* obtaining a reference to a <code>publisher</code> actor. Before initialization it buffers
|
||||
* all outbound messages and delivers them to the <code>publisher</code> when receiving a
|
||||
* <code>PublishRequestorInit</code> message. After initialization, outbound messages are
|
||||
* delivered directly without buffering.
|
||||
*
|
||||
|
|
@ -273,7 +273,7 @@ private[camel] case class ConsumerMethodUnregistered(typedActor: AnyRef, init: A
|
|||
*/
|
||||
private[camel] object ConsumerRegistered {
|
||||
/**
|
||||
* Optionally creates an ConsumerRegistered event message for a consumer actor or None if
|
||||
* Creates an ConsumerRegistered event message for a consumer actor or None if
|
||||
* <code>actorRef</code> is not a consumer actor.
|
||||
*/
|
||||
def forConsumer(actorRef: ActorRef): Option[ConsumerRegistered] = {
|
||||
|
|
@ -288,7 +288,7 @@ private[camel] object ConsumerRegistered {
|
|||
*/
|
||||
private[camel] object ConsumerUnregistered {
|
||||
/**
|
||||
* Optionally creates an ConsumerUnregistered event message for a consumer actor or None if
|
||||
* Creates an ConsumerUnregistered event message for a consumer actor or None if
|
||||
* <code>actorRef</code> is not a consumer actor.
|
||||
*/
|
||||
def forConsumer(actorRef: ActorRef): Option[ConsumerUnregistered] = {
|
||||
|
|
@ -327,8 +327,8 @@ private[camel] object ConsumerMethod {
|
|||
*/
|
||||
private[camel] object ConsumerMethodRegistered {
|
||||
/**
|
||||
* Creates a list of ConsumerMethodRegistered event messages for an typed actor or an empty
|
||||
* list if the typed actor is a proxy for an remote typed actor or the typed actor doesn't
|
||||
* Creates a list of ConsumerMethodRegistered event messages for a typed actor or an empty
|
||||
* list if the typed actor is a proxy for a remote typed actor or the typed actor doesn't
|
||||
* have any <code>@consume</code> annotated methods.
|
||||
*/
|
||||
def forConsumer(typedActor: AnyRef, init: AspectInit): List[ConsumerMethodRegistered] = {
|
||||
|
|
@ -343,8 +343,8 @@ private[camel] object ConsumerMethodRegistered {
|
|||
*/
|
||||
private[camel] object ConsumerMethodUnregistered {
|
||||
/**
|
||||
* Creates a list of ConsumerMethodUnregistered event messages for an typed actor or an empty
|
||||
* list if the typed actor is a proxy for an remote typed actor or the typed actor doesn't
|
||||
* Creates a list of ConsumerMethodUnregistered event messages for a typed actor or an empty
|
||||
* list if the typed actor is a proxy for a remote typed actor or the typed actor doesn't
|
||||
* have any <code>@consume</code> annotated methods.
|
||||
*/
|
||||
def forConsumer(typedActor: AnyRef, init: AspectInit): List[ConsumerMethodUnregistered] = {
|
||||
|
|
|
|||
|
|
@ -10,7 +10,7 @@ import org.apache.camel.util.ExchangeHelper
|
|||
/**
|
||||
* An immutable representation of a Camel message. Actor classes that mix in
|
||||
* se.scalablesolutions.akka.camel.Producer or
|
||||
* se.scalablesolutions.akka.camel.Consumer use this message type for communication.
|
||||
* se.scalablesolutions.akka.camel.Consumer usually use this message type for communication.
|
||||
*
|
||||
* @author Martin Krasser
|
||||
*/
|
||||
|
|
@ -24,7 +24,7 @@ case class Message(val body: Any, val headers: Map[String, Any] = Map.empty) {
|
|||
* @see CamelContextManager.
|
||||
*/
|
||||
def bodyAs[T](clazz: Class[T]): T =
|
||||
CamelContextManager.context.getTypeConverter.mandatoryConvertTo[T](clazz, body)
|
||||
CamelContextManager.mandatoryContext.getTypeConverter.mandatoryConvertTo[T](clazz, body)
|
||||
|
||||
/**
|
||||
* Returns the body of the message converted to the type <code>T</code>. Conversion is done
|
||||
|
|
@ -35,7 +35,7 @@ case class Message(val body: Any, val headers: Map[String, Any] = Map.empty) {
|
|||
* @see CamelContextManager.
|
||||
*/
|
||||
def bodyAs[T](implicit m: Manifest[T]): T =
|
||||
CamelContextManager.context.getTypeConverter.mandatoryConvertTo[T](m.erasure.asInstanceOf[Class[T]], body)
|
||||
CamelContextManager.mandatoryContext.getTypeConverter.mandatoryConvertTo[T](m.erasure.asInstanceOf[Class[T]], body)
|
||||
|
||||
/**
|
||||
* Returns those headers from this message whose name is contained in <code>names</code>.
|
||||
|
|
@ -53,14 +53,14 @@ case class Message(val body: Any, val headers: Map[String, Any] = Map.empty) {
|
|||
* <code>NoSuchElementException</code> if the header doesn't exist.
|
||||
*/
|
||||
def headerAs[T](name: String)(implicit m: Manifest[T]): T =
|
||||
CamelContextManager.context.getTypeConverter.mandatoryConvertTo[T](m.erasure.asInstanceOf[Class[T]], header(name))
|
||||
CamelContextManager.mandatoryContext.getTypeConverter.mandatoryConvertTo[T](m.erasure.asInstanceOf[Class[T]], header(name))
|
||||
|
||||
/**
|
||||
* Returns the header with given <code>name</code> converted to type given by the <code>clazz</code>
|
||||
* argument. Throws <code>NoSuchElementException</code> if the header doesn't exist.
|
||||
*/
|
||||
def headerAs[T](name: String, clazz: Class[T]): T =
|
||||
CamelContextManager.context.getTypeConverter.mandatoryConvertTo[T](clazz, header(name))
|
||||
CamelContextManager.mandatoryContext.getTypeConverter.mandatoryConvertTo[T](clazz, header(name))
|
||||
|
||||
/**
|
||||
* Creates a Message with a new <code>body</code> using a <code>transformer</code> function.
|
||||
|
|
@ -264,8 +264,8 @@ class CamelMessageAdapter(val cm: CamelMessage) {
|
|||
|
||||
/**
|
||||
* Defines conversion methods to CamelExchangeAdapter and CamelMessageAdapter.
|
||||
* Imported by applications
|
||||
* that implicitly want to use conversion methods of CamelExchangeAdapter and CamelMessageAdapter.
|
||||
* Imported by applications that implicitly want to use conversion methods of
|
||||
* CamelExchangeAdapter and CamelMessageAdapter.
|
||||
*/
|
||||
object CamelMessageConversion {
|
||||
|
||||
|
|
|
|||
|
|
@ -24,10 +24,10 @@ trait ProducerSupport { this: Actor =>
|
|||
private val headersToCopyDefault = Set(Message.MessageExchangeId)
|
||||
|
||||
/**
|
||||
* <code>Endpoint</code> object resolved from current CamelContext with
|
||||
* <code>Endpoint</code> object resolved from the current CamelContext with
|
||||
* <code>endpointUri</code>.
|
||||
*/
|
||||
private lazy val endpoint = CamelContextManager.context.getEndpoint(endpointUri)
|
||||
private lazy val endpoint = CamelContextManager.mandatoryContext.getEndpoint(endpointUri)
|
||||
|
||||
/**
|
||||
* <code>SendProcessor</code> for producing messages to <code>endpoint</code>.
|
||||
|
|
@ -36,8 +36,8 @@ trait ProducerSupport { this: Actor =>
|
|||
|
||||
/**
|
||||
* If set to false (default), this producer expects a response message from the Camel endpoint.
|
||||
* If set to true, this producer communicates with the Camel endpoint with an in-only message
|
||||
* exchange pattern (fire and forget).
|
||||
* If set to true, this producer initiates an in-only message exchange with the Camel endpoint
|
||||
* (fire and forget).
|
||||
*/
|
||||
def oneway: Boolean = false
|
||||
|
||||
|
|
@ -62,13 +62,17 @@ trait ProducerSupport { this: Actor =>
|
|||
}
|
||||
|
||||
/**
|
||||
* Produces <code>msg</code> as exchange of given <code>pattern</code> to the endpoint specified by
|
||||
* <code>endpointUri</code>. After producing to the endpoint the processing result is passed as argument
|
||||
* to <code>receiveAfterProduce</code>. If the result was returned synchronously by the endpoint then
|
||||
* <code>receiveAfterProduce</code> is called synchronously as well. If the result was returned asynchronously,
|
||||
* the <code>receiveAfterProduce</code> is called asynchronously as well. This is done by wrapping the result,
|
||||
* adding it to this producers mailbox, unwrapping it once it is received and calling
|
||||
* <code>receiveAfterProduce</code>. The original sender and senderFuture are thereby preserved.
|
||||
* Initiates a message exchange of given <code>pattern</code> with the endpoint specified by
|
||||
* <code>endpointUri</code>. The in-message of the initiated exchange is the canonical form
|
||||
* of <code>msg</code>. After sending the in-message, the processing result (response) is passed
|
||||
* as argument to <code>receiveAfterProduce</code>. If the response is received synchronously from
|
||||
* the endpoint then <code>receiveAfterProduce</code> is called synchronously as well. If the
|
||||
* response is received asynchronously, the <code>receiveAfterProduce</code> is called
|
||||
* asynchronously. This is done by wrapping the response, adding it to this producers
|
||||
* mailbox, unwrapping it and calling <code>receiveAfterProduce</code>. The original
|
||||
* sender and senderFuture are thereby preserved.
|
||||
*
|
||||
* @see Message#canonicalize(Any)
|
||||
*
|
||||
* @param msg message to produce
|
||||
* @param pattern exchange pattern
|
||||
|
|
@ -106,8 +110,8 @@ trait ProducerSupport { this: Actor =>
|
|||
|
||||
/**
|
||||
* Produces <code>msg</code> to the endpoint specified by <code>endpointUri</code>. Before the message is
|
||||
* actually produced it is pre-processed by calling <code>receiveBeforeProduce</code>. If <code>oneway</code>
|
||||
* is true an in-only message exchange is initiated, otherwise an in-out message exchange.
|
||||
* actually sent it is pre-processed by calling <code>receiveBeforeProduce</code>. If <code>oneway</code>
|
||||
* is <code>true</code>, an in-only message exchange is initiated, otherwise an in-out message exchange.
|
||||
*
|
||||
* @see Producer#produce(Any, ExchangePattern)
|
||||
*/
|
||||
|
|
@ -132,17 +136,18 @@ trait ProducerSupport { this: Actor =>
|
|||
}
|
||||
|
||||
/**
|
||||
* Called after the a result was received from the endpoint specified by <code>endpointUri</code>. The
|
||||
* result is passed as argument. By default, this method replies the result back to the original sender
|
||||
* if <code>oneway</code> is false. If <code>oneway</code> is true then nothing is done. This method may
|
||||
* be overridden by subtraits or subclasses.
|
||||
* Called after a response was received from the endpoint specified by <code>endpointUri</code>. The
|
||||
* response is passed as argument. By default, this method sends the response back to the original sender
|
||||
* if <code>oneway</code> is <code>false</code>. If <code>oneway</code> is <code>true</code>, nothing is
|
||||
* done. This method may be overridden by subtraits or subclasses (e.g. to forward responses to another
|
||||
* actor).
|
||||
*/
|
||||
protected def receiveAfterProduce: Receive = {
|
||||
case msg => if (!oneway) self.reply(msg)
|
||||
}
|
||||
|
||||
/**
|
||||
* Creates a new Exchange with given <code>pattern</code> from the endpoint specified by
|
||||
* Creates a new Exchange of given <code>pattern</code> from the endpoint specified by
|
||||
* <code>endpointUri</code>.
|
||||
*/
|
||||
private def createExchange(pattern: ExchangePattern): Exchange = endpoint.createExchange(pattern)
|
||||
|
|
@ -158,25 +163,26 @@ trait ProducerSupport { this: Actor =>
|
|||
}
|
||||
|
||||
/**
|
||||
* Mixed in by Actor implementations that produce messages to Camel endpoints.
|
||||
* Mixed in by Actor implementations to produce messages to Camel endpoints.
|
||||
*/
|
||||
trait Producer extends ProducerSupport { this: Actor =>
|
||||
|
||||
/**
|
||||
* Default implementation of Actor.receive
|
||||
* Default implementation of Actor.receive. Any messages received by this actors
|
||||
* will be produced to the endpoint specified by <code>endpointUri</code>.
|
||||
*/
|
||||
protected def receive = produce
|
||||
}
|
||||
|
||||
/**
|
||||
* Java-friendly {@link ProducerSupport} inherited by {@link UntypedProducerActor} implementations.
|
||||
* Java-friendly ProducerSupport.
|
||||
*
|
||||
* @see UntypedProducerActor
|
||||
*
|
||||
* @author Martin Krasser
|
||||
*/
|
||||
trait UntypedProducer extends ProducerSupport { this: UntypedActor =>
|
||||
|
||||
final override def endpointUri = getEndpointUri
|
||||
|
||||
final override def oneway = isOneway
|
||||
|
||||
final override def receiveBeforeProduce = {
|
||||
|
|
@ -213,10 +219,10 @@ trait UntypedProducer extends ProducerSupport { this: UntypedActor =>
|
|||
def onReceiveBeforeProduce(message: Any): Any = super.receiveBeforeProduce(message)
|
||||
|
||||
/**
|
||||
* Called after the a result was received from the endpoint specified by <code>getEndpointUri</code>. The
|
||||
* result is passed as argument. By default, this method replies the result back to the original sender
|
||||
* if <code>isOneway</code> returns false. If <code>isOneway</code> returns true then nothing is done. This
|
||||
* method may be overridden by subclasses.
|
||||
* Called after a response was received from the endpoint specified by <code>endpointUri</code>. The
|
||||
* response is passed as argument. By default, this method sends the response back to the original sender
|
||||
* if <code>oneway</code> is <code>false</code>. If <code>oneway</code> is <code>true</code>, nothing is
|
||||
* done. This method may be overridden by subclasses (e.g. to forward responses to another actor).
|
||||
*/
|
||||
@throws(classOf[Exception])
|
||||
def onReceiveAfterProduce(message: Any): Unit = super.receiveAfterProduce(message)
|
||||
|
|
|
|||
|
|
@ -14,16 +14,13 @@ import jsr166x.Deque
|
|||
import org.apache.camel._
|
||||
import org.apache.camel.impl.{DefaultProducer, DefaultEndpoint, DefaultComponent}
|
||||
|
||||
import se.scalablesolutions.akka.camel.{Failure, CamelMessageConversion, Message}
|
||||
import CamelMessageConversion.toExchangeAdapter
|
||||
import se.scalablesolutions.akka.actor._
|
||||
import se.scalablesolutions.akka.camel.{Failure, Message}
|
||||
import se.scalablesolutions.akka.camel.CamelMessageConversion.toExchangeAdapter
|
||||
import se.scalablesolutions.akka.dispatch.{CompletableFuture, MessageInvocation, MessageDispatcher}
|
||||
import se.scalablesolutions.akka.stm.TransactionConfig
|
||||
import se.scalablesolutions.akka.actor.{ScalaActorRef, ActorRegistry, Actor, ActorRef, Uuid, uuidFrom}
|
||||
|
||||
import se.scalablesolutions.akka.AkkaException
|
||||
|
||||
import scala.reflect.BeanProperty
|
||||
import se.scalablesolutions.akka.actor._
|
||||
|
||||
/**
|
||||
* Camel component for sending messages to and receiving replies from (untyped) actors.
|
||||
|
|
@ -48,12 +45,13 @@ class ActorComponent extends DefaultComponent {
|
|||
}
|
||||
|
||||
/**
|
||||
* Camel endpoint for referencing an (untyped) actor. The actor reference is given by the endpoint URI.
|
||||
* An actor can be referenced by its <code>ActorRef.id</code> or its <code>ActorRef.uuid</code>.
|
||||
* Supported endpoint URI formats are
|
||||
* <code>actor:<actorid></code>,
|
||||
* <code>actor:id:<actorid></code> and
|
||||
* <code>actor:uuid:<actoruuid></code>.
|
||||
* Camel endpoint for sending messages to and receiving replies from (untyped) actors. Actors
|
||||
* are referenced using <code>actor</code> endpoint URIs of the following format:
|
||||
* <code>actor:<actor-id></code>,
|
||||
* <code>actor:id:<actor-id></code> and
|
||||
* <code>actor:uuid:<actor-uuid></code>,
|
||||
* where <code>actor-id</code> refers to <code>ActorRef.id</code> and <code>actor-uuid</code>
|
||||
* refers to the String-representation od <code>ActorRef.uuid</code>.
|
||||
*
|
||||
* @see se.scalablesolutions.akka.camel.component.ActorComponent
|
||||
* @see se.scalablesolutions.akka.camel.component.ActorProducer
|
||||
|
|
@ -66,8 +64,9 @@ class ActorEndpoint(uri: String,
|
|||
val uuid: Option[Uuid]) extends DefaultEndpoint(uri, comp) {
|
||||
|
||||
/**
|
||||
* Blocking of caller thread during two-way message exchanges with consumer actors. This is set
|
||||
* via the <code>blocking=true|false</code> endpoint URI parameter. If omitted blocking is false.
|
||||
* Whether to block caller thread during two-way message exchanges with (untyped) actors. This is
|
||||
* set via the <code>blocking=true|false</code> endpoint URI parameter. Default value is
|
||||
* <code>false</code>.
|
||||
*/
|
||||
@BeanProperty var blocking: Boolean = false
|
||||
|
||||
|
|
@ -89,9 +88,18 @@ class ActorEndpoint(uri: String,
|
|||
}
|
||||
|
||||
/**
|
||||
* Sends the in-message of an exchange to an (untyped) actor. If the exchange pattern is out-capable and
|
||||
* <code>blocking</code> is enabled then the producer waits for a reply (using the !! operator),
|
||||
* otherwise the ! operator is used for sending the message.
|
||||
* Sends the in-message of an exchange to an (untyped) actor.
|
||||
* <ul>
|
||||
* <li>If the exchange pattern is out-capable and <code>blocking</code> is set to
|
||||
* <code>true</code> then the producer waits for a reply, using the !! operator.</li>
|
||||
* <li>If the exchange pattern is out-capable and <code>blocking</code> is set to
|
||||
* <code>false</code> then the producer sends the message using the ! operator, together
|
||||
* with a callback handler. The callback handler is an <code>ActorRef</code> that can be
|
||||
* used by the receiving actor to asynchronously reply to the route that is sending the
|
||||
* message.</li>
|
||||
* <li>If the exchange pattern is in-only then the producer sends the message using the
|
||||
* ! operator.</li>
|
||||
* </ul>
|
||||
*
|
||||
* @see se.scalablesolutions.akka.camel.component.ActorComponent
|
||||
* @see se.scalablesolutions.akka.camel.component.ActorEndpoint
|
||||
|
|
@ -186,11 +194,11 @@ private[akka] object AsyncCallbackAdapter {
|
|||
}
|
||||
|
||||
/**
|
||||
* Adapts an <code>AsyncCallback</code> to <code>ActorRef.!</code>. Used by other actors to reply
|
||||
* asynchronously to Camel with <code>ActorRef.reply</code>.
|
||||
* Adapts an <code>ActorRef</code> to a Camel <code>AsyncCallback</code>. Used by receiving actors to reply
|
||||
* asynchronously to Camel routes with <code>ActorRef.reply</code>.
|
||||
* <p>
|
||||
* <em>Please note</em> that this adapter can only be used locally at the moment which should not
|
||||
* be a problem is most situations as Camel endpoints are only activated for local actor references,
|
||||
* be a problem is most situations since Camel endpoints are only activated for local actor references,
|
||||
* never for remote references.
|
||||
*
|
||||
* @author Martin Krasser
|
||||
|
|
@ -207,8 +215,9 @@ private[akka] class AsyncCallbackAdapter(exchange: Exchange, callback: AsyncCall
|
|||
}
|
||||
|
||||
/**
|
||||
* Writes the reply <code>message</code> to <code>exchange</code> and uses <code>callback</code> to
|
||||
* generate completion notifications.
|
||||
* Populates the initial <code>exchange</code> with the reply <code>message</code> and uses the
|
||||
* <code>callback</code> handler to notify Camel about the asynchronous completion of the message
|
||||
* exchange.
|
||||
*
|
||||
* @param message reply message
|
||||
* @param sender ignored
|
||||
|
|
|
|||
|
|
@ -21,7 +21,7 @@ object TypedActorComponent {
|
|||
|
||||
/**
|
||||
* Camel component for exchanging messages with typed actors. This component
|
||||
* tries to obtain the typed actor from the <code>typedActorRegistry</code>
|
||||
* tries to obtain the typed actor from its <code>typedActorRegistry</code>
|
||||
* first. If it's not there it tries to obtain it from the CamelContext's registry.
|
||||
*
|
||||
* @see org.apache.camel.component.bean.BeanComponent
|
||||
|
|
@ -32,9 +32,9 @@ class TypedActorComponent extends BeanComponent {
|
|||
val typedActorRegistry = new ConcurrentHashMap[String, AnyRef]
|
||||
|
||||
/**
|
||||
* Creates a {@link org.apache.camel.component.bean.BeanEndpoint} with a custom
|
||||
* bean holder that uses <code>typedActorRegistry</code> for getting access to
|
||||
* typed actors (beans).
|
||||
* Creates an <code>org.apache.camel.component.bean.BeanEndpoint</code> with a custom
|
||||
* bean holder that uses <code>typedActorRegistry</code> for getting access to typed
|
||||
* actors (beans).
|
||||
*
|
||||
* @see se.scalablesolutions.akka.camel.component.TypedActorHolder
|
||||
*/
|
||||
|
|
@ -51,7 +51,7 @@ class TypedActorComponent extends BeanComponent {
|
|||
}
|
||||
|
||||
/**
|
||||
* {@link org.apache.camel.component.bean.BeanHolder} implementation that uses a custom
|
||||
* <code>org.apache.camel.component.bean.BeanHolder</code> implementation that uses a custom
|
||||
* registry for getting access to typed actors.
|
||||
*
|
||||
* @author Martin Krasser
|
||||
|
|
@ -60,13 +60,16 @@ class TypedActorHolder(typedActorRegistry: Map[String, AnyRef], context: CamelCo
|
|||
extends RegistryBean(context, name) {
|
||||
|
||||
/**
|
||||
* Returns an {@link se.scalablesolutions.akka.camel.component.TypedActorInfo} instance.
|
||||
* Returns an <code>se.scalablesolutions.akka.camel.component.TypedActorInfo</code> instance.
|
||||
*/
|
||||
override def getBeanInfo: BeanInfo =
|
||||
new TypedActorInfo(getContext, getBean.getClass, getParameterMappingStrategy)
|
||||
|
||||
/**
|
||||
* Obtains an typed actor from <code>typedActorRegistry</code>.
|
||||
* Obtains a typed actor from <code>typedActorRegistry</code>. If the typed actor cannot
|
||||
* be found then this method tries to obtain the actor from the CamelContext's registry.
|
||||
*
|
||||
* @return a typed actor or <code>null</code>.
|
||||
*/
|
||||
override def getBean: AnyRef = {
|
||||
val bean = typedActorRegistry.get(getName)
|
||||
|
|
@ -75,7 +78,7 @@ class TypedActorHolder(typedActorRegistry: Map[String, AnyRef], context: CamelCo
|
|||
}
|
||||
|
||||
/**
|
||||
* Provides typed actor meta information.
|
||||
* Typed actor meta information.
|
||||
*
|
||||
* @author Martin Krasser
|
||||
*/
|
||||
|
|
@ -101,7 +104,7 @@ class TypedActorInfo(context: CamelContext, clazz: Class[_], strategy: Parameter
|
|||
}
|
||||
}
|
||||
val superclass = clazz.getSuperclass
|
||||
if (superclass != null && !superclass.equals(classOf[AnyRef])) {
|
||||
if ((superclass ne null) && !superclass.equals(classOf[AnyRef])) {
|
||||
introspect(superclass)
|
||||
}
|
||||
}
|
||||
|
|
|
|||
|
|
@ -13,6 +13,6 @@ public class SampleUntypedForwardingProducer extends UntypedProducerActor {
|
|||
public void onReceiveAfterProduce(Object message) {
|
||||
Message msg = (Message)message;
|
||||
String body = msg.bodyAs(String.class);
|
||||
CamelContextManager.template().sendBody("direct:forward-test-1", body);
|
||||
CamelContextManager.getMandatoryTemplate().sendBody("direct:forward-test-1", body);
|
||||
}
|
||||
}
|
||||
|
|
|
|||
|
|
@ -6,22 +6,30 @@ import org.scalatest.junit.JUnitSuite
|
|||
|
||||
class CamelContextLifecycleTest extends JUnitSuite with CamelContextLifecycle {
|
||||
@Test def shouldManageCustomCamelContext {
|
||||
assert(context === null)
|
||||
assert(template === null)
|
||||
assert(context === None)
|
||||
assert(template === None)
|
||||
|
||||
intercept[IllegalStateException] { mandatoryContext }
|
||||
intercept[IllegalStateException] { mandatoryTemplate }
|
||||
|
||||
val ctx = new TestCamelContext
|
||||
assert(ctx.isStreamCaching === false)
|
||||
|
||||
init(ctx)
|
||||
assert(context.isStreamCaching === true)
|
||||
assert(!context.asInstanceOf[TestCamelContext].isStarted)
|
||||
// In Camel 2.3 CamelComtext.createProducerTemplate starts
|
||||
// the template before returning it (wasn't started in 2.2)
|
||||
assert(template.asInstanceOf[DefaultProducerTemplate].isStarted)
|
||||
|
||||
assert(mandatoryContext.isStreamCaching === true)
|
||||
assert(!mandatoryContext.asInstanceOf[TestCamelContext].isStarted)
|
||||
assert(mandatoryTemplate.asInstanceOf[DefaultProducerTemplate].isStarted)
|
||||
|
||||
start
|
||||
assert(context.asInstanceOf[TestCamelContext].isStarted)
|
||||
assert(template.asInstanceOf[DefaultProducerTemplate].isStarted)
|
||||
|
||||
assert(mandatoryContext.asInstanceOf[TestCamelContext].isStarted)
|
||||
assert(mandatoryTemplate.asInstanceOf[DefaultProducerTemplate].isStarted)
|
||||
|
||||
stop
|
||||
assert(!context.asInstanceOf[TestCamelContext].isStarted)
|
||||
assert(!template.asInstanceOf[DefaultProducerTemplate].isStarted)
|
||||
|
||||
assert(!mandatoryContext.asInstanceOf[TestCamelContext].isStarted)
|
||||
assert(!mandatoryTemplate.asInstanceOf[DefaultProducerTemplate].isStarted)
|
||||
}
|
||||
|
||||
class TestCamelContext extends DefaultCamelContext
|
||||
|
|
|
|||
|
|
@ -8,21 +8,24 @@ import se.scalablesolutions.akka.actor.ActorRegistry
|
|||
/**
|
||||
* @author Martin Krasser
|
||||
*/
|
||||
class CamelServiceManagerSpec extends WordSpec with BeforeAndAfterAll with MustMatchers {
|
||||
class CamelServiceManagerTest extends WordSpec with BeforeAndAfterAll with MustMatchers {
|
||||
|
||||
override def afterAll = ActorRegistry.shutdownAll
|
||||
override def afterAll = {
|
||||
CamelServiceManager.stopCamelService
|
||||
ActorRegistry.shutdownAll
|
||||
}
|
||||
|
||||
"A CamelServiceManager" when {
|
||||
"the startCamelService method been has been called" must {
|
||||
"have registered the started CamelService instance" in {
|
||||
val service = CamelServiceManager.startCamelService
|
||||
CamelServiceManager.service must be theSameInstanceAs (service)
|
||||
CamelServiceManager.mandatoryService must be theSameInstanceAs (service)
|
||||
}
|
||||
}
|
||||
"the stopCamelService method been has been called" must {
|
||||
"have unregistered the current CamelService instance" in {
|
||||
val service = CamelServiceManager.stopCamelService
|
||||
intercept[IllegalStateException] { CamelServiceManager.service }
|
||||
CamelServiceManager.service must be (None)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
@ -32,13 +35,13 @@ class CamelServiceManagerSpec extends WordSpec with BeforeAndAfterAll with MustM
|
|||
"a CamelService instance has been started externally" must {
|
||||
"have registered the started CamelService instance" in {
|
||||
service.start
|
||||
CamelServiceManager.service must be theSameInstanceAs (service)
|
||||
CamelServiceManager.mandatoryService must be theSameInstanceAs (service)
|
||||
}
|
||||
}
|
||||
"the current CamelService instance has been stopped externally" must {
|
||||
"have unregistered the current CamelService instance" in {
|
||||
service.stop
|
||||
intercept[IllegalStateException] { CamelServiceManager.service }
|
||||
CamelServiceManager.service must be (None)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
@ -54,10 +57,6 @@ class CamelServiceManagerSpec extends WordSpec with BeforeAndAfterAll with MustM
|
|||
"only allow the current CamelService instance to be stopped" in {
|
||||
intercept[IllegalStateException] { CamelServiceFactory.createCamelService.stop }
|
||||
}
|
||||
"ensure that the current CamelService instance has been actually started" in {
|
||||
CamelServiceManager.stopCamelService
|
||||
intercept[IllegalStateException] { CamelServiceManager.stopCamelService }
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
@ -13,9 +13,9 @@ import se.scalablesolutions.akka.actor._
|
|||
/**
|
||||
* @author Martin Krasser
|
||||
*/
|
||||
class ConsumerSpec extends WordSpec with BeforeAndAfterAll with MustMatchers {
|
||||
import CamelContextManager.template
|
||||
import ConsumerSpec._
|
||||
class ConsumerTest extends WordSpec with BeforeAndAfterAll with MustMatchers {
|
||||
import CamelContextManager.mandatoryTemplate
|
||||
import ConsumerTest._
|
||||
|
||||
var service: CamelService = _
|
||||
|
||||
|
|
@ -45,12 +45,12 @@ class ConsumerSpec extends WordSpec with BeforeAndAfterAll with MustMatchers {
|
|||
val consumer = actorOf(new TestConsumer("direct:publish-test-2"))
|
||||
"started before starting the CamelService" must {
|
||||
"support an in-out message exchange via its endpoint" in {
|
||||
template.requestBody("direct:publish-test-1", "msg1") must equal ("received msg1")
|
||||
mandatoryTemplate.requestBody("direct:publish-test-1", "msg1") must equal ("received msg1")
|
||||
}
|
||||
}
|
||||
"not started" must {
|
||||
"not have an associated endpoint in the CamelContext" in {
|
||||
CamelContextManager.context.hasEndpoint("direct:publish-test-2") must be (null)
|
||||
CamelContextManager.mandatoryContext.hasEndpoint("direct:publish-test-2") must be (null)
|
||||
}
|
||||
}
|
||||
"started" must {
|
||||
|
|
@ -58,10 +58,10 @@ class ConsumerSpec extends WordSpec with BeforeAndAfterAll with MustMatchers {
|
|||
val latch = service.expectEndpointActivationCount(1)
|
||||
consumer.start
|
||||
latch.await(5000, TimeUnit.MILLISECONDS) must be (true)
|
||||
template.requestBody("direct:publish-test-2", "msg2") must equal ("received msg2")
|
||||
mandatoryTemplate.requestBody("direct:publish-test-2", "msg2") must equal ("received msg2")
|
||||
}
|
||||
"have an associated endpoint in the CamelContext" in {
|
||||
CamelContextManager.context.hasEndpoint("direct:publish-test-2") must not be (null)
|
||||
CamelContextManager.mandatoryContext.hasEndpoint("direct:publish-test-2") must not be (null)
|
||||
}
|
||||
}
|
||||
"stopped" must {
|
||||
|
|
@ -70,7 +70,7 @@ class ConsumerSpec extends WordSpec with BeforeAndAfterAll with MustMatchers {
|
|||
consumer.stop
|
||||
latch.await(5000, TimeUnit.MILLISECONDS) must be (true)
|
||||
intercept[CamelExecutionException] {
|
||||
template.requestBody("direct:publish-test-2", "msg2")
|
||||
mandatoryTemplate.requestBody("direct:publish-test-2", "msg2")
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
@ -83,9 +83,9 @@ class ConsumerSpec extends WordSpec with BeforeAndAfterAll with MustMatchers {
|
|||
val latch = service.expectEndpointActivationCount(3)
|
||||
actor = TypedActor.newInstance(classOf[SampleTypedConsumer], classOf[SampleTypedConsumerImpl])
|
||||
latch.await(5000, TimeUnit.MILLISECONDS) must be (true)
|
||||
template.requestBodyAndHeader("direct:m2", "x", "test", "y") must equal ("m2: x y")
|
||||
template.requestBodyAndHeader("direct:m3", "x", "test", "y") must equal ("m3: x y")
|
||||
template.requestBodyAndHeader("direct:m4", "x", "test", "y") must equal ("m4: x y")
|
||||
mandatoryTemplate.requestBodyAndHeader("direct:m2", "x", "test", "y") must equal ("m2: x y")
|
||||
mandatoryTemplate.requestBodyAndHeader("direct:m3", "x", "test", "y") must equal ("m3: x y")
|
||||
mandatoryTemplate.requestBodyAndHeader("direct:m4", "x", "test", "y") must equal ("m4: x y")
|
||||
}
|
||||
}
|
||||
"stopped" must {
|
||||
|
|
@ -94,13 +94,13 @@ class ConsumerSpec extends WordSpec with BeforeAndAfterAll with MustMatchers {
|
|||
TypedActor.stop(actor)
|
||||
latch.await(5000, TimeUnit.MILLISECONDS) must be (true)
|
||||
intercept[CamelExecutionException] {
|
||||
template.requestBodyAndHeader("direct:m2", "x", "test", "y")
|
||||
mandatoryTemplate.requestBodyAndHeader("direct:m2", "x", "test", "y")
|
||||
}
|
||||
intercept[CamelExecutionException] {
|
||||
template.requestBodyAndHeader("direct:m3", "x", "test", "y")
|
||||
mandatoryTemplate.requestBodyAndHeader("direct:m3", "x", "test", "y")
|
||||
}
|
||||
intercept[CamelExecutionException] {
|
||||
template.requestBodyAndHeader("direct:m4", "x", "test", "y")
|
||||
mandatoryTemplate.requestBodyAndHeader("direct:m4", "x", "test", "y")
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
@ -113,8 +113,8 @@ class ConsumerSpec extends WordSpec with BeforeAndAfterAll with MustMatchers {
|
|||
val latch = service.expectEndpointActivationCount(2)
|
||||
actor = TypedActor.newInstance(classOf[TestTypedConsumer], classOf[TestTypedConsumerImpl])
|
||||
latch.await(5000, TimeUnit.MILLISECONDS) must be (true)
|
||||
template.requestBody("direct:publish-test-3", "x") must equal ("foo: x")
|
||||
template.requestBody("direct:publish-test-4", "x") must equal ("bar: x")
|
||||
mandatoryTemplate.requestBody("direct:publish-test-3", "x") must equal ("foo: x")
|
||||
mandatoryTemplate.requestBody("direct:publish-test-4", "x") must equal ("bar: x")
|
||||
}
|
||||
}
|
||||
"stopped" must {
|
||||
|
|
@ -123,10 +123,10 @@ class ConsumerSpec extends WordSpec with BeforeAndAfterAll with MustMatchers {
|
|||
TypedActor.stop(actor)
|
||||
latch.await(5000, TimeUnit.MILLISECONDS) must be (true)
|
||||
intercept[CamelExecutionException] {
|
||||
template.requestBody("direct:publish-test-3", "x")
|
||||
mandatoryTemplate.requestBody("direct:publish-test-3", "x")
|
||||
}
|
||||
intercept[CamelExecutionException] {
|
||||
template.requestBody("direct:publish-test-4", "x")
|
||||
mandatoryTemplate.requestBody("direct:publish-test-4", "x")
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
@ -139,7 +139,7 @@ class ConsumerSpec extends WordSpec with BeforeAndAfterAll with MustMatchers {
|
|||
val latch = service.expectEndpointActivationCount(1)
|
||||
consumer.start
|
||||
latch.await(5000, TimeUnit.MILLISECONDS) must be (true)
|
||||
template.requestBodyAndHeader("direct:test-untyped-consumer", "x", "test", "y") must equal ("x y")
|
||||
mandatoryTemplate.requestBodyAndHeader("direct:test-untyped-consumer", "x", "test", "y") must equal ("x y")
|
||||
}
|
||||
}
|
||||
"stopped" must {
|
||||
|
|
@ -148,7 +148,7 @@ class ConsumerSpec extends WordSpec with BeforeAndAfterAll with MustMatchers {
|
|||
consumer.stop
|
||||
latch.await(5000, TimeUnit.MILLISECONDS) must be (true)
|
||||
intercept[CamelExecutionException] {
|
||||
template.sendBodyAndHeader("direct:test-untyped-consumer", "blah", "test", "blub")
|
||||
mandatoryTemplate.sendBodyAndHeader("direct:test-untyped-consumer", "blah", "test", "blub")
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
@ -162,7 +162,7 @@ class ConsumerSpec extends WordSpec with BeforeAndAfterAll with MustMatchers {
|
|||
latch.await(5000, TimeUnit.MILLISECONDS) must be (true)
|
||||
|
||||
try {
|
||||
template.requestBody("direct:publish-test-5", "msg3")
|
||||
mandatoryTemplate.requestBody("direct:publish-test-5", "msg3")
|
||||
fail("expected TimoutException not thrown")
|
||||
} catch {
|
||||
case e => {
|
||||
|
|
@ -174,7 +174,7 @@ class ConsumerSpec extends WordSpec with BeforeAndAfterAll with MustMatchers {
|
|||
}
|
||||
}
|
||||
|
||||
object ConsumerSpec {
|
||||
object ConsumerTest {
|
||||
class TestConsumer(uri: String) extends Actor with Consumer {
|
||||
def endpointUri = uri
|
||||
protected def receive = {
|
||||
|
|
@ -14,7 +14,7 @@ class ProducerFeatureTest extends FeatureSpec with BeforeAndAfterAll with Before
|
|||
override protected def beforeAll = {
|
||||
ActorRegistry.shutdownAll
|
||||
CamelContextManager.init
|
||||
CamelContextManager.context.addRoutes(new TestRoute)
|
||||
CamelContextManager.mandatoryContext.addRoutes(new TestRoute)
|
||||
CamelContextManager.start
|
||||
}
|
||||
|
||||
|
|
@ -239,7 +239,7 @@ class ProducerFeatureTest extends FeatureSpec with BeforeAndAfterAll with Before
|
|||
}
|
||||
}
|
||||
|
||||
private def mockEndpoint = CamelContextManager.context.getEndpoint("mock:mock", classOf[MockEndpoint])
|
||||
private def mockEndpoint = CamelContextManager.mandatoryContext.getEndpoint("mock:mock", classOf[MockEndpoint])
|
||||
}
|
||||
|
||||
object ProducerFeatureTest {
|
||||
|
|
|
|||
|
|
@ -45,12 +45,12 @@ class RemoteConsumerTest extends FeatureSpec with BeforeAndAfterAll with GivenWh
|
|||
val consumer = actorOf[RemoteConsumer].start
|
||||
|
||||
when("remote consumer publication is triggered")
|
||||
var latch = service.expectEndpointActivationCount(1)
|
||||
var latch = mandatoryService.expectEndpointActivationCount(1)
|
||||
consumer !! "init"
|
||||
assert(latch.await(5000, TimeUnit.MILLISECONDS))
|
||||
|
||||
then("the published consumer is accessible via its endpoint URI")
|
||||
val response = CamelContextManager.template.requestBody("direct:remote-consumer", "test")
|
||||
val response = CamelContextManager.mandatoryTemplate.requestBody("direct:remote-consumer", "test")
|
||||
assert(response === "remote actor: test")
|
||||
}
|
||||
}
|
||||
|
|
@ -61,12 +61,12 @@ class RemoteConsumerTest extends FeatureSpec with BeforeAndAfterAll with GivenWh
|
|||
val consumer = TypedActor.newRemoteInstance(classOf[SampleRemoteTypedConsumer], classOf[SampleRemoteTypedConsumerImpl], host, port)
|
||||
|
||||
when("remote typed consumer publication is triggered")
|
||||
var latch = service.expectEndpointActivationCount(1)
|
||||
var latch = mandatoryService.expectEndpointActivationCount(1)
|
||||
consumer.foo("init")
|
||||
assert(latch.await(5000, TimeUnit.MILLISECONDS))
|
||||
|
||||
then("the published method is accessible via its endpoint URI")
|
||||
val response = CamelContextManager.template.requestBody("direct:remote-typed-consumer", "test")
|
||||
val response = CamelContextManager.mandatoryTemplate.requestBody("direct:remote-typed-consumer", "test")
|
||||
assert(response === "remote typed actor: test")
|
||||
}
|
||||
}
|
||||
|
|
@ -77,12 +77,12 @@ class RemoteConsumerTest extends FeatureSpec with BeforeAndAfterAll with GivenWh
|
|||
val consumer = UntypedActor.actorOf(classOf[SampleRemoteUntypedConsumer]).start
|
||||
|
||||
when("remote untyped consumer publication is triggered")
|
||||
var latch = service.expectEndpointActivationCount(1)
|
||||
var latch = mandatoryService.expectEndpointActivationCount(1)
|
||||
consumer.sendRequestReply(Message("init", Map("test" -> "init")))
|
||||
assert(latch.await(5000, TimeUnit.MILLISECONDS))
|
||||
|
||||
then("the published untyped consumer is accessible via its endpoint URI")
|
||||
val response = CamelContextManager.template.requestBodyAndHeader("direct:remote-untyped-consumer", "a", "test", "b")
|
||||
val response = CamelContextManager.mandatoryTemplate.requestBodyAndHeader("direct:remote-untyped-consumer", "a", "test", "b")
|
||||
assert(response === "a b")
|
||||
}
|
||||
}
|
||||
|
|
|
|||
|
|
@ -14,7 +14,7 @@ class UntypedProducerFeatureTest extends FeatureSpec with BeforeAndAfterAll with
|
|||
override protected def beforeAll = {
|
||||
ActorRegistry.shutdownAll
|
||||
CamelContextManager.init
|
||||
CamelContextManager.context.addRoutes(new TestRoute)
|
||||
CamelContextManager.mandatoryContext.addRoutes(new TestRoute)
|
||||
CamelContextManager.start
|
||||
}
|
||||
|
||||
|
|
@ -78,7 +78,7 @@ class UntypedProducerFeatureTest extends FeatureSpec with BeforeAndAfterAll with
|
|||
|
||||
}
|
||||
|
||||
private def mockEndpoint = CamelContextManager.context.getEndpoint("mock:mock", classOf[MockEndpoint])
|
||||
private def mockEndpoint = CamelContextManager.mandatoryContext.getEndpoint("mock:mock", classOf[MockEndpoint])
|
||||
}
|
||||
|
||||
object UntypedProducerFeatureTest {
|
||||
|
|
|
|||
|
|
@ -18,7 +18,7 @@ class ActorComponentFeatureTest extends FeatureSpec with BeforeAndAfterAll with
|
|||
override protected def beforeAll = {
|
||||
ActorRegistry.shutdownAll
|
||||
CamelContextManager.init
|
||||
CamelContextManager.context.addRoutes(new TestRoute)
|
||||
CamelContextManager.mandatoryContext.addRoutes(new TestRoute)
|
||||
CamelContextManager.start
|
||||
}
|
||||
|
||||
|
|
@ -30,12 +30,12 @@ class ActorComponentFeatureTest extends FeatureSpec with BeforeAndAfterAll with
|
|||
}
|
||||
|
||||
feature("Communicate with an actor via an actor:uuid endpoint") {
|
||||
import CamelContextManager.template
|
||||
import CamelContextManager.mandatoryTemplate
|
||||
|
||||
scenario("one-way communication") {
|
||||
val actor = actorOf[Tester1].start
|
||||
val latch = (actor !! SetExpectedMessageCount(1)).as[CountDownLatch].get
|
||||
template.sendBody("actor:uuid:%s" format actor.uuid, "Martin")
|
||||
mandatoryTemplate.sendBody("actor:uuid:%s" format actor.uuid, "Martin")
|
||||
assert(latch.await(5000, TimeUnit.MILLISECONDS))
|
||||
val reply = (actor !! GetRetainedMessage).get.asInstanceOf[Message]
|
||||
assert(reply.body === "Martin")
|
||||
|
|
@ -43,36 +43,36 @@ class ActorComponentFeatureTest extends FeatureSpec with BeforeAndAfterAll with
|
|||
|
||||
scenario("two-way communication") {
|
||||
val actor = actorOf[Tester2].start
|
||||
assert(template.requestBody("actor:uuid:%s" format actor.uuid, "Martin") === "Hello Martin")
|
||||
assert(mandatoryTemplate.requestBody("actor:uuid:%s" format actor.uuid, "Martin") === "Hello Martin")
|
||||
}
|
||||
|
||||
scenario("two-way communication with timeout") {
|
||||
val actor = actorOf[Tester3].start
|
||||
intercept[RuntimeCamelException] {
|
||||
template.requestBody("actor:uuid:%s?blocking=true" format actor.uuid, "Martin")
|
||||
mandatoryTemplate.requestBody("actor:uuid:%s?blocking=true" format actor.uuid, "Martin")
|
||||
}
|
||||
}
|
||||
|
||||
scenario("two-way communication via a custom route with failure response") {
|
||||
mockEndpoint.expectedBodiesReceived("whatever")
|
||||
template.requestBody("direct:failure-test-1", "whatever")
|
||||
mandatoryTemplate.requestBody("direct:failure-test-1", "whatever")
|
||||
mockEndpoint.assertIsSatisfied
|
||||
}
|
||||
|
||||
scenario("two-way communication via a custom route with exception") {
|
||||
mockEndpoint.expectedBodiesReceived("whatever")
|
||||
template.requestBody("direct:failure-test-2", "whatever")
|
||||
mandatoryTemplate.requestBody("direct:failure-test-2", "whatever")
|
||||
mockEndpoint.assertIsSatisfied
|
||||
}
|
||||
}
|
||||
|
||||
feature("Communicate with an actor via an actor:id endpoint") {
|
||||
import CamelContextManager.template
|
||||
import CamelContextManager.mandatoryTemplate
|
||||
|
||||
scenario("one-way communication") {
|
||||
val actor = actorOf[Tester1].start
|
||||
val latch = (actor !! SetExpectedMessageCount(1)).as[CountDownLatch].get
|
||||
template.sendBody("actor:%s" format actor.id, "Martin")
|
||||
mandatoryTemplate.sendBody("actor:%s" format actor.id, "Martin")
|
||||
assert(latch.await(5000, TimeUnit.MILLISECONDS))
|
||||
val reply = (actor !! GetRetainedMessage).get.asInstanceOf[Message]
|
||||
assert(reply.body === "Martin")
|
||||
|
|
@ -80,17 +80,17 @@ class ActorComponentFeatureTest extends FeatureSpec with BeforeAndAfterAll with
|
|||
|
||||
scenario("two-way communication") {
|
||||
val actor = actorOf[Tester2].start
|
||||
assert(template.requestBody("actor:%s" format actor.id, "Martin") === "Hello Martin")
|
||||
assert(mandatoryTemplate.requestBody("actor:%s" format actor.id, "Martin") === "Hello Martin")
|
||||
}
|
||||
|
||||
scenario("two-way communication via a custom route") {
|
||||
val actor = actorOf[CustomIdActor].start
|
||||
assert(template.requestBody("direct:custom-id-test-1", "Martin") === "Received Martin")
|
||||
assert(template.requestBody("direct:custom-id-test-2", "Martin") === "Received Martin")
|
||||
assert(mandatoryTemplate.requestBody("direct:custom-id-test-1", "Martin") === "Received Martin")
|
||||
assert(mandatoryTemplate.requestBody("direct:custom-id-test-2", "Martin") === "Received Martin")
|
||||
}
|
||||
}
|
||||
|
||||
private def mockEndpoint = CamelContextManager.context.getEndpoint("mock:mock", classOf[MockEndpoint])
|
||||
private def mockEndpoint = CamelContextManager.mandatoryContext.getEndpoint("mock:mock", classOf[MockEndpoint])
|
||||
}
|
||||
|
||||
object ActorComponentFeatureTest {
|
||||
|
|
|
|||
|
|
@ -4,6 +4,7 @@ import org.apache.camel.{Endpoint, AsyncProcessor}
|
|||
import org.apache.camel.impl.DefaultCamelContext
|
||||
import org.junit._
|
||||
import org.scalatest.junit.JUnitSuite
|
||||
|
||||
import se.scalablesolutions.akka.actor.uuidFrom
|
||||
|
||||
class ActorComponentTest extends JUnitSuite {
|
||||
|
|
|
|||
|
|
@ -1,20 +1,19 @@
|
|||
package se.scalablesolutions.akka.camel.component
|
||||
|
||||
import org.apache.camel._
|
||||
import org.apache.camel.builder.RouteBuilder
|
||||
import org.apache.camel.impl.{DefaultCamelContext, SimpleRegistry}
|
||||
import org.scalatest.{BeforeAndAfterEach, BeforeAndAfterAll, FeatureSpec}
|
||||
|
||||
import org.apache.camel.builder.RouteBuilder
|
||||
import se.scalablesolutions.akka.actor.Actor._
|
||||
import se.scalablesolutions.akka.actor.{ActorRegistry, TypedActor}
|
||||
import se.scalablesolutions.akka.camel._
|
||||
import org.apache.camel.impl.{DefaultCamelContext, SimpleRegistry}
|
||||
import org.apache.camel.{ResolveEndpointFailedException, ExchangePattern, Exchange, Processor}
|
||||
|
||||
/**
|
||||
* @author Martin Krasser
|
||||
*/
|
||||
class TypedActorComponentFeatureTest extends FeatureSpec with BeforeAndAfterAll with BeforeAndAfterEach {
|
||||
import TypedActorComponentFeatureTest._
|
||||
import CamelContextManager.template
|
||||
import CamelContextManager.mandatoryTemplate
|
||||
|
||||
override protected def beforeAll = {
|
||||
val typedActor = TypedActor.newInstance(classOf[SampleTypedActor], classOf[SampleTypedActorImpl]) // not a consumer
|
||||
|
|
@ -25,7 +24,7 @@ class TypedActorComponentFeatureTest extends FeatureSpec with BeforeAndAfterAll
|
|||
registry.put("ta", typedActor)
|
||||
|
||||
CamelContextManager.init(new DefaultCamelContext(registry))
|
||||
CamelContextManager.context.addRoutes(new CustomRouteBuilder)
|
||||
CamelContextManager.mandatoryContext.addRoutes(new CustomRouteBuilder)
|
||||
CamelContextManager.start
|
||||
|
||||
// Internal registration
|
||||
|
|
@ -42,19 +41,19 @@ class TypedActorComponentFeatureTest extends FeatureSpec with BeforeAndAfterAll
|
|||
import ExchangePattern._
|
||||
|
||||
scenario("two-way communication with method returning String") {
|
||||
val result1 = template.requestBodyAndHeader("%s:tc?method=m2" format InternalSchema, "x", "test", "y")
|
||||
val result2 = template.requestBodyAndHeader("%s:tc?method=m4" format InternalSchema, "x", "test", "y")
|
||||
val result1 = mandatoryTemplate.requestBodyAndHeader("%s:tc?method=m2" format InternalSchema, "x", "test", "y")
|
||||
val result2 = mandatoryTemplate.requestBodyAndHeader("%s:tc?method=m4" format InternalSchema, "x", "test", "y")
|
||||
assert(result1 === "m2: x y")
|
||||
assert(result2 === "m4: x y")
|
||||
}
|
||||
|
||||
scenario("two-way communication with method returning void") {
|
||||
val result = template.requestBodyAndHeader("%s:tc?method=m5" format InternalSchema, "x", "test", "y")
|
||||
val result = mandatoryTemplate.requestBodyAndHeader("%s:tc?method=m5" format InternalSchema, "x", "test", "y")
|
||||
assert(result === "x") // returns initial body
|
||||
}
|
||||
|
||||
scenario("one-way communication with method returning String") {
|
||||
val result = template.send("%s:tc?method=m2" format InternalSchema, InOnly, new Processor {
|
||||
val result = mandatoryTemplate.send("%s:tc?method=m2" format InternalSchema, InOnly, new Processor {
|
||||
def process(exchange: Exchange) = {
|
||||
exchange.getIn.setBody("x")
|
||||
exchange.getIn.setHeader("test", "y")
|
||||
|
|
@ -66,7 +65,7 @@ class TypedActorComponentFeatureTest extends FeatureSpec with BeforeAndAfterAll
|
|||
}
|
||||
|
||||
scenario("one-way communication with method returning void") {
|
||||
val result = template.send("%s:tc?method=m5" format InternalSchema, InOnly, new Processor {
|
||||
val result = mandatoryTemplate.send("%s:tc?method=m5" format InternalSchema, InOnly, new Processor {
|
||||
def process(exchange: Exchange) = {
|
||||
exchange.getIn.setBody("x")
|
||||
exchange.getIn.setHeader("test", "y")
|
||||
|
|
@ -82,19 +81,19 @@ class TypedActorComponentFeatureTest extends FeatureSpec with BeforeAndAfterAll
|
|||
feature("Communicate with an internally-registered typed actor using typed-actor endpoint URIs") {
|
||||
scenario("communication not possible") {
|
||||
intercept[ResolveEndpointFailedException] {
|
||||
template.requestBodyAndHeader("typed-actor:tc?method=m2", "x", "test", "y")
|
||||
mandatoryTemplate.requestBodyAndHeader("typed-actor:tc?method=m2", "x", "test", "y")
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
feature("Communicate with an externally-registered typed actor using typed-actor endpoint URIs") {
|
||||
scenario("two-way communication with method returning String") {
|
||||
val result = template.requestBody("typed-actor:ta?method=foo", "test")
|
||||
val result = mandatoryTemplate.requestBody("typed-actor:ta?method=foo", "test")
|
||||
assert(result === "foo: test")
|
||||
}
|
||||
|
||||
scenario("two-way communication with method returning String via custom route") {
|
||||
val result = template.requestBody("direct:test", "test")
|
||||
val result = mandatoryTemplate.requestBody("direct:test", "test")
|
||||
assert(result === "foo: test")
|
||||
}
|
||||
}
|
||||
|
|
|
|||
|
|
@ -5,23 +5,27 @@
|
|||
package se.scalablesolutions.akka.comet
|
||||
|
||||
import org.atmosphere.cpr.{AtmosphereResourceEvent, AtmosphereResource}
|
||||
|
||||
import se.scalablesolutions.akka.actor.Actor._
|
||||
import se.scalablesolutions.akka.actor.Actor
|
||||
import se.scalablesolutions.akka.dispatch.Dispatchers
|
||||
import org.atmosphere.jersey.util.JerseyBroadcasterUtil
|
||||
|
||||
object AkkaBroadcaster {
|
||||
val broadcasterDispatcher = Dispatchers.fromConfig("akka.rest.comet-dispatcher")
|
||||
|
||||
type Event = AtmosphereResourceEvent[_,_]
|
||||
type Resource = AtmosphereResource[_,_]
|
||||
}
|
||||
|
||||
class AkkaBroadcaster extends org.atmosphere.jersey.JerseyBroadcaster {
|
||||
class AkkaBroadcaster extends org.atmosphere.jersey.util.JerseySimpleBroadcaster {
|
||||
import AkkaBroadcaster._
|
||||
name = classOf[AkkaBroadcaster].getName
|
||||
|
||||
//FIXME should be supervised
|
||||
val caster = actorOf(new Actor {
|
||||
lazy val caster = actorOf(new Actor {
|
||||
self.dispatcher = broadcasterDispatcher
|
||||
def receive = {
|
||||
case f : Function0[_] => f()
|
||||
case (r: Resource,e: Event) => JerseyBroadcasterUtil.broadcast(r,e)
|
||||
}
|
||||
}).start
|
||||
|
||||
|
|
@ -30,7 +34,7 @@ class AkkaBroadcaster extends org.atmosphere.jersey.JerseyBroadcaster {
|
|||
caster.stop
|
||||
}
|
||||
|
||||
protected override def broadcast(r : AtmosphereResource[_,_], e : AtmosphereResourceEvent[_,_]) = {
|
||||
caster ! (() => super.broadcast(r,e))
|
||||
protected override def broadcast(r: Resource, e : Event) {
|
||||
caster ! ((r,e))
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
@ -42,32 +42,30 @@ class AtmosphereRestServlet extends ServletContainer with AtmosphereServletProce
|
|||
* <p/>
|
||||
* Used by the Akka Kernel to bootstrap REST and Comet.
|
||||
*/
|
||||
class AkkaServlet extends AtmosphereServlet with Logging {
|
||||
class AkkaServlet extends AtmosphereServlet {
|
||||
import se.scalablesolutions.akka.config.Config.{config => c}
|
||||
|
||||
/*
|
||||
* Configure Atmosphere and Jersey (default, fall-back values)
|
||||
*/
|
||||
addInitParameter(AtmosphereServlet.DISABLE_ONSTATE_EVENT,"true")
|
||||
addInitParameter(AtmosphereServlet.BROADCASTER_CLASS,classOf[AkkaBroadcaster].getName)
|
||||
addInitParameter(AtmosphereServlet.PROPERTY_USE_STREAM,"true")
|
||||
addInitParameter("com.sun.jersey.config.property.packages",c.getList("akka.rest.resource_packages").mkString(";"))
|
||||
addInitParameter("com.sun.jersey.spi.container.ResourceFilters",c.getList("akka.rest.filters").mkString(","))
|
||||
|
||||
c.getInt("akka.rest.maxInactiveActivity") foreach { value =>
|
||||
log.info("MAX_INACTIVE:%s",value.toString)
|
||||
addInitParameter(CometSupport.MAX_INACTIVE,value.toString)
|
||||
}
|
||||
c.getInt("akka.rest.maxInactiveActivity") foreach { value => addInitParameter(CometSupport.MAX_INACTIVE,value.toString) }
|
||||
c.getString("akka.rest.cometSupport") foreach { value => addInitParameter("cometSupport",value) }
|
||||
|
||||
c.getString("akka.rest.cometSupport") foreach { value =>
|
||||
addInitParameter("cometSupport",value)
|
||||
}
|
||||
|
||||
|
||||
val servlet = new AtmosphereRestServlet {
|
||||
override def getInitParameter(key : String) = AkkaServlet.this.getInitParameter(key)
|
||||
override def getInitParameterNames() = AkkaServlet.this.getInitParameterNames()
|
||||
}
|
||||
|
||||
override def getInitParameter(key : String) = Option(super.getInitParameter(key)).getOrElse(initParams.get(key))
|
||||
/*
|
||||
* Provide a fallback for default values
|
||||
*/
|
||||
override def getInitParameter(key : String) =
|
||||
Option(super.getInitParameter(key)).getOrElse(initParams get key)
|
||||
|
||||
/*
|
||||
* Provide a fallback for default values
|
||||
*/
|
||||
override def getInitParameterNames() = {
|
||||
import scala.collection.JavaConversions._
|
||||
initParams.keySet.iterator ++ super.getInitParameterNames
|
||||
|
|
@ -80,24 +78,24 @@ class AkkaServlet extends AtmosphereServlet with Logging {
|
|||
override def loadConfiguration(sc: ServletConfig) {
|
||||
config.setSupportSession(false)
|
||||
isBroadcasterSpecified = true
|
||||
|
||||
//The bridge between Atmosphere and Jersey
|
||||
val servlet = new AtmosphereRestServlet {
|
||||
//These are needed to make sure that Jersey is reading the config from the outer servlet
|
||||
override def getInitParameter(key : String) = AkkaServlet.this.getInitParameter(key)
|
||||
override def getInitParameterNames() = AkkaServlet.this.getInitParameterNames()
|
||||
}
|
||||
|
||||
addAtmosphereHandler("/*", servlet, new AkkaBroadcaster)
|
||||
}
|
||||
|
||||
/**
|
||||
* This method is overridden because Akka Kernel is bundles with Grizzly, so if we deploy the Kernel in another container,
|
||||
* we need to handle that.
|
||||
*/
|
||||
override def createCometSupportResolver() : CometSupportResolver = {
|
||||
import scala.collection.JavaConversions._
|
||||
override lazy val createCometSupportResolver: CometSupportResolver = new DefaultCometSupportResolver(config) {
|
||||
import scala.collection.JavaConversions._
|
||||
|
||||
new DefaultCometSupportResolver(config) {
|
||||
type CS = CometSupport[_ <: AtmosphereResource[_,_]]
|
||||
lazy val desiredCometSupport =
|
||||
Option(AkkaServlet.this.getInitParameter("cometSupport")) filter testClassExists map newCometSupport
|
||||
|
||||
override def resolve(useNativeIfPossible : Boolean, useBlockingAsDefault : Boolean) : CS = {
|
||||
val predef = config.getInitParameter("cometSupport")
|
||||
if (testClassExists(predef)) newCometSupport(predef)
|
||||
else super.resolve(useNativeIfPossible, useBlockingAsDefault)
|
||||
}
|
||||
}
|
||||
override def resolve(useNativeIfPossible : Boolean, useBlockingAsDefault : Boolean) : CometSupport[_ <: AtmosphereResource[_,_]] =
|
||||
desiredCometSupport.getOrElse(super.resolve(useNativeIfPossible, useBlockingAsDefault))
|
||||
}
|
||||
}
|
||||
|
|
|
|||
29
akka-http/src/main/scala/DefaultAkkaLoader.scala
Normal file
29
akka-http/src/main/scala/DefaultAkkaLoader.scala
Normal file
|
|
@ -0,0 +1,29 @@
|
|||
/**
|
||||
* Copyright (C) 2009-2010 Scalable Solutions AB <http://scalablesolutions.se>
|
||||
*/
|
||||
|
||||
package se.scalablesolutions.akka.http
|
||||
|
||||
import se.scalablesolutions.akka.config.Config
|
||||
import se.scalablesolutions.akka.util.{Logging, Bootable}
|
||||
import se.scalablesolutions.akka.camel.CamelService
|
||||
import se.scalablesolutions.akka.remote.BootableRemoteActorService
|
||||
import se.scalablesolutions.akka.actor.BootableActorLoaderService
|
||||
import se.scalablesolutions.akka.servlet.AkkaLoader
|
||||
|
||||
class DefaultAkkaLoader extends AkkaLoader {
|
||||
def boot(): Unit = boot(true,
|
||||
new EmbeddedAppServer with BootableActorLoaderService
|
||||
with BootableRemoteActorService
|
||||
with CamelService)
|
||||
}
|
||||
|
||||
|
||||
/**
|
||||
* Can be used to boot Akka
|
||||
*
|
||||
* java -cp ... se.scalablesolutions.akka.http.Main
|
||||
*/
|
||||
object Main extends DefaultAkkaLoader {
|
||||
def main(args: Array[String]) = boot
|
||||
}
|
||||
|
|
@ -2,7 +2,7 @@
|
|||
* Copyright (C) 2009-2010 Scalable Solutions AB <http://scalablesolutions.se>
|
||||
*/
|
||||
|
||||
package se.scalablesolutions.akka.kernel
|
||||
package se.scalablesolutions.akka.http
|
||||
|
||||
import javax.ws.rs.core.UriBuilder
|
||||
import javax.servlet.ServletConfig
|
||||
|
|
@ -207,7 +207,7 @@ trait AuthenticationActor[C <: Credentials] extends Actor {
|
|||
//Turns the aforementioned header value into an option
|
||||
def authOption(r: Req): Option[String] = {
|
||||
val a = auth(r)
|
||||
if (a != null && a.length > 0) Some(a) else None
|
||||
if ((a ne null) && a.length > 0) Some(a) else None
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
|||
|
|
@ -221,7 +221,7 @@ trait TransactionProtocol extends Logging {
|
|||
private def storeInThreadLocal(tx: Transaction) = suspendedTx.set(tx)
|
||||
|
||||
private def fetchFromThreadLocal: Option[Transaction] = {
|
||||
if (suspendedTx != null && suspendedTx.get() != null) Some(suspendedTx.get.asInstanceOf[Transaction])
|
||||
if ((suspendedTx ne null) && (suspendedTx.get() ne null)) Some(suspendedTx.get.asInstanceOf[Transaction])
|
||||
else None
|
||||
}
|
||||
}
|
||||
|
|
|
|||
|
|
@ -4,11 +4,8 @@
|
|||
|
||||
package se.scalablesolutions.akka.kernel
|
||||
|
||||
import se.scalablesolutions.akka.servlet.AkkaLoader
|
||||
import se.scalablesolutions.akka.http.{ EmbeddedAppServer, DefaultAkkaLoader }
|
||||
import se.scalablesolutions.akka.remote.BootableRemoteActorService
|
||||
import se.scalablesolutions.akka.actor.BootableActorLoaderService
|
||||
import se.scalablesolutions.akka.camel.CamelService
|
||||
import se.scalablesolutions.akka.config.Config
|
||||
|
||||
object Main {
|
||||
def main(args: Array[String]) = Kernel.boot
|
||||
|
|
@ -19,18 +16,10 @@ object Main {
|
|||
*
|
||||
* @author <a href="http://jonasboner.com">Jonas Bonér</a>
|
||||
*/
|
||||
object Kernel extends AkkaLoader {
|
||||
/**
|
||||
* Boots up the Kernel with default bootables
|
||||
*/
|
||||
def boot(): Unit = boot(true,
|
||||
new EmbeddedAppServer with BootableActorLoaderService
|
||||
with BootableRemoteActorService
|
||||
with CamelService)
|
||||
|
||||
//For testing purposes only
|
||||
object Kernel extends DefaultAkkaLoader {
|
||||
//For testing purposes only
|
||||
def startRemoteService(): Unit = bundles.foreach( _ match {
|
||||
case x: BootableRemoteActorService => x.startRemoteService
|
||||
case _ =>
|
||||
})
|
||||
}
|
||||
}
|
||||
|
|
@ -7,6 +7,7 @@ package se.scalablesolutions.akka.persistence.common
|
|||
import se.scalablesolutions.akka.stm._
|
||||
import se.scalablesolutions.akka.stm.TransactionManagement.transaction
|
||||
import se.scalablesolutions.akka.util.Logging
|
||||
import collection.mutable.ArraySeq
|
||||
|
||||
// FIXME move to 'stm' package + add message with more info
|
||||
class NoTransactionInScopeException extends RuntimeException
|
||||
|
|
@ -47,30 +48,51 @@ trait Storage {
|
|||
type ElementType
|
||||
|
||||
def newMap: PersistentMap[ElementType, ElementType]
|
||||
|
||||
def newVector: PersistentVector[ElementType]
|
||||
|
||||
def newRef: PersistentRef[ElementType]
|
||||
|
||||
def newQueue: PersistentQueue[ElementType] = // only implemented for redis
|
||||
throw new UnsupportedOperationException
|
||||
|
||||
def newSortedSet: PersistentSortedSet[ElementType] = // only implemented for redis
|
||||
throw new UnsupportedOperationException
|
||||
|
||||
def getMap(id: String): PersistentMap[ElementType, ElementType]
|
||||
|
||||
def getVector(id: String): PersistentVector[ElementType]
|
||||
|
||||
def getRef(id: String): PersistentRef[ElementType]
|
||||
|
||||
def getQueue(id: String): PersistentQueue[ElementType] = // only implemented for redis
|
||||
throw new UnsupportedOperationException
|
||||
|
||||
def getSortedSet(id: String): PersistentSortedSet[ElementType] = // only implemented for redis
|
||||
throw new UnsupportedOperationException
|
||||
|
||||
def newMap(id: String): PersistentMap[ElementType, ElementType]
|
||||
|
||||
def newVector(id: String): PersistentVector[ElementType]
|
||||
|
||||
def newRef(id: String): PersistentRef[ElementType]
|
||||
|
||||
def newQueue(id: String): PersistentQueue[ElementType] = // only implemented for redis
|
||||
throw new UnsupportedOperationException
|
||||
|
||||
def newSortedSet(id: String): PersistentSortedSet[ElementType] = // only implemented for redis
|
||||
throw new UnsupportedOperationException
|
||||
}
|
||||
|
||||
private[akka] object PersistentMap {
|
||||
// operations on the Map
|
||||
sealed trait Op
|
||||
case object PUT extends Op
|
||||
case object REM extends Op
|
||||
case object UPD extends Op
|
||||
case object CLR extends Op
|
||||
}
|
||||
|
||||
/**
|
||||
* Implementation of <tt>PersistentMap</tt> for every concrete
|
||||
* storage will have the same workflow. This abstracts the workflow.
|
||||
|
|
@ -81,15 +103,10 @@ trait Storage {
|
|||
* @author <a href="http://jonasboner.com">Jonas Bonér</a>
|
||||
*/
|
||||
trait PersistentMap[K, V] extends scala.collection.mutable.Map[K, V]
|
||||
with Transactional with Committable with Abortable with Logging {
|
||||
with Transactional with Committable with Abortable with Logging {
|
||||
|
||||
// operations on the Map
|
||||
trait Op
|
||||
case object GET extends Op
|
||||
case object PUT extends Op
|
||||
case object REM extends Op
|
||||
case object UPD extends Op
|
||||
case object CLR extends Op
|
||||
//Import Ops
|
||||
import PersistentMap._
|
||||
|
||||
// append only log: records all mutating operations
|
||||
protected val appendOnlyTxLog = TransactionalVector[LogEntry]()
|
||||
|
|
@ -114,7 +131,7 @@ trait PersistentMap[K, V] extends scala.collection.mutable.Map[K, V]
|
|||
protected def clearDistinctKeys = keysInCurrentTx.clear
|
||||
|
||||
protected def filterTxLogByKey(key: K): IndexedSeq[LogEntry] =
|
||||
appendOnlyTxLog filter(e => e.key.map(equal(_, key)).getOrElse(true))
|
||||
appendOnlyTxLog filter (e => e.key.map(equal(_, key)).getOrElse(true))
|
||||
|
||||
// need to get current value considering the underlying storage as well as the transaction log
|
||||
protected def getCurrentValue(key: K): Option[V] = {
|
||||
|
|
@ -125,7 +142,7 @@ trait PersistentMap[K, V] extends scala.collection.mutable.Map[K, V]
|
|||
// get the snapshot from the underlying store for this key
|
||||
val underlying = try {
|
||||
storage.getMapStorageEntryFor(uuid, key)
|
||||
} catch { case e: Exception => None }
|
||||
} catch {case e: Exception => None}
|
||||
|
||||
if (txEntries.isEmpty) underlying
|
||||
else txEntries.last match {
|
||||
|
|
@ -142,12 +159,14 @@ trait PersistentMap[K, V] extends scala.collection.mutable.Map[K, V]
|
|||
case None => Map.empty[K, V]
|
||||
case Some(v) => Map((key, v))
|
||||
}
|
||||
txEntries.foreach {case LogEntry(k, v, o) => o match {
|
||||
case PUT => m.put(k.get, v.get)
|
||||
case REM => m -= k.get
|
||||
case UPD => m.update(k.get, v.get)
|
||||
case CLR => Map.empty[K, V]
|
||||
}}
|
||||
txEntries.foreach {
|
||||
case LogEntry(k, v, o) => o match {
|
||||
case PUT => m.put(k.get, v.get)
|
||||
case REM => m -= k.get
|
||||
case UPD => m.update(k.get, v.get)
|
||||
case CLR => Map.empty[K, V]
|
||||
}
|
||||
}
|
||||
m get key
|
||||
}
|
||||
|
||||
|
|
@ -155,12 +174,14 @@ trait PersistentMap[K, V] extends scala.collection.mutable.Map[K, V]
|
|||
val storage: MapStorageBackend[K, V]
|
||||
|
||||
def commit = {
|
||||
appendOnlyTxLog.foreach { case LogEntry(k, v, o) => o match {
|
||||
case PUT => storage.insertMapStorageEntryFor(uuid, k.get, v.get)
|
||||
case UPD => storage.insertMapStorageEntryFor(uuid, k.get, v.get)
|
||||
case REM => storage.removeMapStorageFor(uuid, k.get)
|
||||
case CLR => storage.removeMapStorageFor(uuid)
|
||||
}}
|
||||
appendOnlyTxLog.foreach {
|
||||
case LogEntry(k, v, o) => o match {
|
||||
case PUT => storage.insertMapStorageEntryFor(uuid, k.get, v.get)
|
||||
case UPD => storage.insertMapStorageEntryFor(uuid, k.get, v.get)
|
||||
case REM => storage.removeMapStorageFor(uuid, k.get)
|
||||
case CLR => storage.removeMapStorageFor(uuid)
|
||||
}
|
||||
}
|
||||
|
||||
appendOnlyTxLog.clear
|
||||
clearDistinctKeys
|
||||
|
|
@ -176,8 +197,8 @@ trait PersistentMap[K, V] extends scala.collection.mutable.Map[K, V]
|
|||
this
|
||||
}
|
||||
|
||||
override def +=(kv : (K,V)) = {
|
||||
put(kv._1,kv._2)
|
||||
override def +=(kv: (K, V)) = {
|
||||
put(kv._1, kv._2)
|
||||
this
|
||||
}
|
||||
|
||||
|
|
@ -226,10 +247,10 @@ trait PersistentMap[K, V] extends scala.collection.mutable.Map[K, V]
|
|||
case Seq() => // current tx doesn't use this
|
||||
storage.getMapStorageEntryFor(uuid, key).isDefined // check storage
|
||||
case txs => // present in log
|
||||
val lastOp = txs.last.op
|
||||
val lastOp = txs.last.op
|
||||
lastOp != REM && lastOp != CLR // last entry cannot be a REM
|
||||
}
|
||||
} catch { case e: Exception => false }
|
||||
}
|
||||
} catch {case e: Exception => false}
|
||||
|
||||
protected def existsInStorage(key: K): Option[V] = try {
|
||||
storage.getMapStorageEntryFor(uuid, key)
|
||||
|
|
@ -239,72 +260,84 @@ trait PersistentMap[K, V] extends scala.collection.mutable.Map[K, V]
|
|||
|
||||
override def size: Int = try {
|
||||
// partition key set affected in current tx into those which r added & which r deleted
|
||||
val (keysAdded, keysRemoved) = keysInCurrentTx.map {
|
||||
val (keysAdded, keysRemoved) = keysInCurrentTx.map {
|
||||
case (kseq, k) => ((kseq, k), getCurrentValue(k))
|
||||
}.partition(_._2.isDefined)
|
||||
|
||||
// keys which existed in storage but removed in current tx
|
||||
val inStorageRemovedInTx =
|
||||
keysRemoved.keySet
|
||||
.map(_._2)
|
||||
.filter(k => existsInStorage(k).isDefined)
|
||||
.size
|
||||
val inStorageRemovedInTx =
|
||||
keysRemoved.keySet
|
||||
.map(_._2)
|
||||
.filter(k => existsInStorage(k).isDefined)
|
||||
.size
|
||||
|
||||
// all keys in storage
|
||||
val keysInStorage =
|
||||
storage.getMapStorageFor(uuid)
|
||||
.map { case (k, v) => toEquals(k) }
|
||||
.toSet
|
||||
val keysInStorage =
|
||||
storage.getMapStorageFor(uuid)
|
||||
.map {case (k, v) => toEquals(k)}
|
||||
.toSet
|
||||
|
||||
// (keys that existed UNION keys added ) - (keys removed)
|
||||
(keysInStorage union keysAdded.keySet.map(_._1)).size - inStorageRemovedInTx
|
||||
} catch {
|
||||
case e: Exception => 0
|
||||
} catch {
|
||||
case e: Exception => 0
|
||||
}
|
||||
|
||||
// get must consider underlying storage & current uncommitted tx log
|
||||
override def get(key: K): Option[V] = getCurrentValue(key)
|
||||
|
||||
def iterator: Iterator[Tuple2[K, V]]
|
||||
def iterator: Iterator[Tuple2[K, V]]
|
||||
|
||||
private def register = {
|
||||
protected def register = {
|
||||
if (transaction.get.isEmpty) throw new NoTransactionInScopeException
|
||||
transaction.get.get.register(uuid, this)
|
||||
}
|
||||
}
|
||||
|
||||
object PersistentMapBinary {
|
||||
object COrdering {
|
||||
//frontend
|
||||
implicit object ArraySeqOrdering extends Ordering[ArraySeq[Byte]] {
|
||||
def compare(o1: ArraySeq[Byte], o2: ArraySeq[Byte]) =
|
||||
ArrayOrdering.compare(o1.toArray, o2.toArray)
|
||||
}
|
||||
//backend
|
||||
implicit object ArrayOrdering extends Ordering[Array[Byte]] {
|
||||
def compare(o1: Array[Byte], o2: Array[Byte]) =
|
||||
new String(o1) compare new String(o2)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
trait PersistentMapBinary extends PersistentMap[Array[Byte], Array[Byte]] {
|
||||
import scala.collection.mutable.ArraySeq
|
||||
|
||||
type T = ArraySeq[Byte]
|
||||
|
||||
def toEquals(k: Array[Byte]) = ArraySeq(k: _*)
|
||||
|
||||
override def equal(k1: Array[Byte], k2: Array[Byte]): Boolean = k1 sameElements k2
|
||||
|
||||
object COrdering {
|
||||
implicit object ArraySeqOrdering extends Ordering[ArraySeq[Byte]] {
|
||||
def compare(o1: ArraySeq[Byte], o2: ArraySeq[Byte]) =
|
||||
new String(o1.toArray) compare new String(o2.toArray)
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
import scala.collection.immutable.{TreeMap, SortedMap}
|
||||
private def replayAllKeys: SortedMap[ArraySeq[Byte], Array[Byte]] = {
|
||||
import COrdering._
|
||||
import PersistentMapBinary.COrdering._
|
||||
|
||||
// need ArraySeq for ordering
|
||||
val fromStorage =
|
||||
TreeMap(storage.getMapStorageFor(uuid).map { case (k, v) => (ArraySeq(k: _*), v) }: _*)
|
||||
val fromStorage =
|
||||
TreeMap(storage.getMapStorageFor(uuid).map {case (k, v) => (ArraySeq(k: _*), v)}: _*)
|
||||
|
||||
val (keysAdded, keysRemoved) = keysInCurrentTx.map {
|
||||
val (keysAdded, keysRemoved) = keysInCurrentTx.map {
|
||||
case (_, k) => (k, getCurrentValue(k))
|
||||
}.partition(_._2.isDefined)
|
||||
|
||||
val inStorageRemovedInTx =
|
||||
keysRemoved.keySet
|
||||
.filter(k => existsInStorage(k).isDefined)
|
||||
.map(k => ArraySeq(k: _*))
|
||||
val inStorageRemovedInTx =
|
||||
keysRemoved.keySet
|
||||
.filter(k => existsInStorage(k).isDefined)
|
||||
.map(k => ArraySeq(k: _*))
|
||||
|
||||
(fromStorage -- inStorageRemovedInTx) ++ keysAdded.map { case (k, Some(v)) => (ArraySeq(k: _*), v) }
|
||||
(fromStorage -- inStorageRemovedInTx) ++ keysAdded.map {case (k, v) => (ArraySeq(k: _*), v.get)}
|
||||
}
|
||||
|
||||
override def slice(start: Option[Array[Byte]], finish: Option[Array[Byte]], count: Int): List[(Array[Byte], Array[Byte])] = try {
|
||||
|
|
@ -313,66 +346,73 @@ trait PersistentMapBinary extends PersistentMap[Array[Byte], Array[Byte]] {
|
|||
if (newMap isEmpty) List[(Array[Byte], Array[Byte])]()
|
||||
|
||||
val startKey =
|
||||
start match {
|
||||
case Some(bytes) => Some(ArraySeq(bytes: _*))
|
||||
case None => None
|
||||
}
|
||||
start match {
|
||||
case Some(bytes) => Some(ArraySeq(bytes: _*))
|
||||
case None => None
|
||||
}
|
||||
|
||||
val endKey =
|
||||
finish match {
|
||||
case Some(bytes) => Some(ArraySeq(bytes: _*))
|
||||
case None => None
|
||||
}
|
||||
finish match {
|
||||
case Some(bytes) => Some(ArraySeq(bytes: _*))
|
||||
case None => None
|
||||
}
|
||||
|
||||
((startKey, endKey, count): @unchecked) match {
|
||||
case ((Some(s), Some(e), _)) =>
|
||||
newMap.range(s, e)
|
||||
.toList
|
||||
.map(e => (e._1.toArray, e._2))
|
||||
.toList
|
||||
.toList
|
||||
.map(e => (e._1.toArray, e._2))
|
||||
.toList
|
||||
case ((Some(s), None, c)) if c > 0 =>
|
||||
newMap.from(s)
|
||||
.iterator
|
||||
.take(count)
|
||||
.map(e => (e._1.toArray, e._2))
|
||||
.toList
|
||||
.iterator
|
||||
.take(count)
|
||||
.map(e => (e._1.toArray, e._2))
|
||||
.toList
|
||||
case ((Some(s), None, _)) =>
|
||||
newMap.from(s)
|
||||
.toList
|
||||
.map(e => (e._1.toArray, e._2))
|
||||
.toList
|
||||
.toList
|
||||
.map(e => (e._1.toArray, e._2))
|
||||
.toList
|
||||
case ((None, Some(e), _)) =>
|
||||
newMap.until(e)
|
||||
.toList
|
||||
.map(e => (e._1.toArray, e._2))
|
||||
.toList
|
||||
.toList
|
||||
.map(e => (e._1.toArray, e._2))
|
||||
.toList
|
||||
}
|
||||
} catch { case e: Exception => Nil }
|
||||
} catch {case e: Exception => Nil}
|
||||
|
||||
override def iterator: Iterator[(Array[Byte], Array[Byte])] = {
|
||||
override def iterator: Iterator[(Array[Byte], Array[Byte])] = {
|
||||
new Iterator[(Array[Byte], Array[Byte])] {
|
||||
private var elements = replayAllKeys
|
||||
|
||||
override def next: (Array[Byte], Array[Byte]) = synchronized {
|
||||
val (k, v) = elements.head
|
||||
elements = elements.tail
|
||||
(k.toArray, v)
|
||||
}
|
||||
override def hasNext: Boolean = synchronized { !elements.isEmpty }
|
||||
|
||||
override def hasNext: Boolean = synchronized {!elements.isEmpty}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
private[akka] object PersistentVector {
|
||||
// operations on the Vector
|
||||
sealed trait Op
|
||||
case object ADD extends Op
|
||||
case object UPD extends Op
|
||||
case object POP extends Op
|
||||
}
|
||||
|
||||
/**
|
||||
* Implements a template for a concrete persistent transactional vector based storage.
|
||||
*
|
||||
* @author <a href="http://jonasboner.com">Jonas Bonér</a>
|
||||
*/
|
||||
trait PersistentVector[T] extends IndexedSeq[T] with Transactional with Committable with Abortable {
|
||||
// operations on the Vector
|
||||
trait Op
|
||||
case object ADD extends Op
|
||||
case object UPD extends Op
|
||||
case object POP extends Op
|
||||
//Import Ops
|
||||
import PersistentVector._
|
||||
|
||||
// append only log: records all mutating operations
|
||||
protected val appendOnlyTxLog = TransactionalVector[LogEntry]()
|
||||
|
|
@ -385,8 +425,8 @@ trait PersistentVector[T] extends IndexedSeq[T] with Transactional with Committa
|
|||
val storage: VectorStorageBackend[T]
|
||||
|
||||
def commit = {
|
||||
for(entry <- appendOnlyTxLog) {
|
||||
entry match {
|
||||
for (entry <- appendOnlyTxLog) {
|
||||
(entry: @unchecked) match {
|
||||
case LogEntry(_, Some(v), ADD) => storage.insertVectorStorageEntryFor(uuid, v)
|
||||
case LogEntry(Some(i), Some(v), UPD) => storage.updateVectorStorageEntryFor(uuid, i, v)
|
||||
case LogEntry(_, _, POP) => //..
|
||||
|
|
@ -403,8 +443,8 @@ trait PersistentVector[T] extends IndexedSeq[T] with Transactional with Committa
|
|||
import scala.collection.mutable.ArrayBuffer
|
||||
var elemsStorage = ArrayBuffer(storage.getVectorStorageRangeFor(uuid, None, None, storage.getVectorStorageSizeFor(uuid)).reverse: _*)
|
||||
|
||||
for(entry <- appendOnlyTxLog) {
|
||||
entry match {
|
||||
for (entry <- appendOnlyTxLog) {
|
||||
(entry: @unchecked) match {
|
||||
case LogEntry(_, Some(v), ADD) => elemsStorage += v
|
||||
case LogEntry(Some(i), Some(v), UPD) => elemsStorage.update(i, v)
|
||||
case LogEntry(_, _, POP) => elemsStorage = elemsStorage.drop(1)
|
||||
|
|
@ -437,11 +477,11 @@ trait PersistentVector[T] extends IndexedSeq[T] with Transactional with Committa
|
|||
val curr = replay
|
||||
val s = if (start.isDefined) start.get else 0
|
||||
val cnt =
|
||||
if (finish.isDefined) {
|
||||
val f = finish.get
|
||||
if (f >= s) (f - s) else count
|
||||
}
|
||||
else count
|
||||
if (finish.isDefined) {
|
||||
val f = finish.get
|
||||
if (f >= s) (f - s) else count
|
||||
}
|
||||
else count
|
||||
if (s == 0 && cnt == 0) List().toIndexedSeq
|
||||
else curr.slice(s, s + cnt).toIndexedSeq
|
||||
}
|
||||
|
|
@ -466,7 +506,7 @@ trait PersistentVector[T] extends IndexedSeq[T] with Transactional with Committa
|
|||
|
||||
def length: Int = replay.length
|
||||
|
||||
private def register = {
|
||||
protected def register = {
|
||||
if (transaction.get.isEmpty) throw new NoTransactionInScopeException
|
||||
transaction.get.get.register(uuid, this)
|
||||
}
|
||||
|
|
@ -504,12 +544,19 @@ trait PersistentRef[T] extends Transactional with Committable with Abortable {
|
|||
else default
|
||||
}
|
||||
|
||||
private def register = {
|
||||
protected def register = {
|
||||
if (transaction.get.isEmpty) throw new NoTransactionInScopeException
|
||||
transaction.get.get.register(uuid, this)
|
||||
}
|
||||
}
|
||||
|
||||
private[akka] object PersistentQueue {
|
||||
//Operations for PersistentQueue
|
||||
sealed trait QueueOp
|
||||
case object ENQ extends QueueOp
|
||||
case object DEQ extends QueueOp
|
||||
}
|
||||
|
||||
/**
|
||||
* Implementation of <tt>PersistentQueue</tt> for every concrete
|
||||
* storage will have the same workflow. This abstracts the workflow.
|
||||
|
|
@ -536,12 +583,10 @@ trait PersistentRef[T] extends Transactional with Committable with Abortable {
|
|||
* @author <a href="http://debasishg.blogspot.com">Debasish Ghosh</a>
|
||||
*/
|
||||
trait PersistentQueue[A] extends scala.collection.mutable.Queue[A]
|
||||
with Transactional with Committable with Abortable with Logging {
|
||||
|
||||
sealed trait QueueOp
|
||||
case object ENQ extends QueueOp
|
||||
case object DEQ extends QueueOp
|
||||
with Transactional with Committable with Abortable with Logging {
|
||||
|
||||
//Import Ops
|
||||
import PersistentQueue._
|
||||
import scala.collection.immutable.Queue
|
||||
|
||||
// current trail that will be played on commit to the underlying store
|
||||
|
|
@ -561,11 +606,12 @@ trait PersistentQueue[A] extends scala.collection.mutable.Queue[A]
|
|||
val storage: QueueStorageBackend[A]
|
||||
|
||||
def commit = {
|
||||
enqueuedNDequeuedEntries.toList.foreach { e =>
|
||||
e._2 match {
|
||||
case ENQ => storage.enqueue(uuid, e._1.get)
|
||||
case DEQ => storage.dequeue(uuid)
|
||||
}
|
||||
enqueuedNDequeuedEntries.toList.foreach {
|
||||
e =>
|
||||
e._2 match {
|
||||
case ENQ => storage.enqueue(uuid, e._1.get)
|
||||
case DEQ => storage.dequeue(uuid)
|
||||
}
|
||||
}
|
||||
if (shouldClearOnCommit.isDefined && shouldClearOnCommit.get) {
|
||||
storage.remove(uuid)
|
||||
|
|
@ -604,7 +650,7 @@ trait PersistentQueue[A] extends scala.collection.mutable.Queue[A]
|
|||
storage.peek(uuid, i, 1)(0)
|
||||
} else {
|
||||
// check we have transient candidates in localQ for DQ
|
||||
if (localQ.get.isEmpty == false) {
|
||||
if (!localQ.get.isEmpty) {
|
||||
val (a, q) = localQ.get.dequeue
|
||||
localQ.swap(q)
|
||||
a
|
||||
|
|
@ -621,7 +667,7 @@ trait PersistentQueue[A] extends scala.collection.mutable.Queue[A]
|
|||
|
||||
override def size: Int = try {
|
||||
storage.size(uuid) + localQ.get.length
|
||||
} catch { case e: Exception => 0 }
|
||||
} catch {case e: Exception => 0}
|
||||
|
||||
override def isEmpty: Boolean =
|
||||
size == 0
|
||||
|
|
@ -630,10 +676,12 @@ trait PersistentQueue[A] extends scala.collection.mutable.Queue[A]
|
|||
enqueue(elem)
|
||||
this
|
||||
}
|
||||
|
||||
def ++=(elems: Iterator[A]) = {
|
||||
enqueue(elems.toList: _*)
|
||||
this
|
||||
}
|
||||
|
||||
def ++=(elems: Iterable[A]): Unit = this ++= elems.iterator
|
||||
|
||||
override def dequeueFirst(p: A => Boolean): Option[A] =
|
||||
|
|
@ -642,7 +690,7 @@ trait PersistentQueue[A] extends scala.collection.mutable.Queue[A]
|
|||
override def dequeueAll(p: A => Boolean): scala.collection.mutable.Seq[A] =
|
||||
throw new UnsupportedOperationException("dequeueAll not supported")
|
||||
|
||||
private def register = {
|
||||
protected def register = {
|
||||
if (transaction.get.isEmpty) throw new NoTransactionInScopeException
|
||||
transaction.get.get.register(uuid, this)
|
||||
}
|
||||
|
|
@ -656,24 +704,24 @@ trait PersistentQueue[A] extends scala.collection.mutable.Queue[A]
|
|||
* <p/>
|
||||
* zscore can be implemented in a variety of ways by the calling class:
|
||||
* <pre>
|
||||
* trait ZScorable {
|
||||
* trait ZScorable {
|
||||
* def toZScore: Float
|
||||
* }
|
||||
*
|
||||
* class Foo extends ZScorable {
|
||||
* class Foo extends ZScorable {
|
||||
* //.. implemnetation
|
||||
* }
|
||||
* </pre>
|
||||
* Or we can also use views:
|
||||
* <pre>
|
||||
* class Foo {
|
||||
* class Foo {
|
||||
* //..
|
||||
* }
|
||||
*
|
||||
* implicit def Foo2Scorable(foo: Foo): ZScorable = new ZScorable {
|
||||
* def toZScore = {
|
||||
* implicit def Foo2Scorable(foo: Foo): ZScorable = new ZScorable {
|
||||
* def toZScore = {
|
||||
* //..
|
||||
* }
|
||||
* }
|
||||
* }
|
||||
* </pre>
|
||||
*
|
||||
|
|
@ -682,7 +730,6 @@ trait PersistentQueue[A] extends scala.collection.mutable.Queue[A]
|
|||
* @author <a href="http://debasishg.blogspot.com"</a>
|
||||
*/
|
||||
trait PersistentSortedSet[A] extends Transactional with Committable with Abortable {
|
||||
|
||||
protected val newElems = TransactionalMap[A, Float]()
|
||||
protected val removedElems = TransactionalVector[A]()
|
||||
|
||||
|
|
@ -715,8 +762,8 @@ trait PersistentSortedSet[A] extends Transactional with Committable with Abortab
|
|||
}
|
||||
|
||||
private def inStorage(elem: A): Option[Float] = storage.zscore(uuid, elem) match {
|
||||
case Some(s) => Some(s.toFloat)
|
||||
case None => None
|
||||
case Some(s) => Some(s.toFloat)
|
||||
case None => None
|
||||
}
|
||||
|
||||
def contains(elem: A): Boolean = {
|
||||
|
|
@ -744,11 +791,10 @@ trait PersistentSortedSet[A] extends Transactional with Committable with Abortab
|
|||
def compare(that: (A, Float)) = x._2 compare that._2
|
||||
}
|
||||
|
||||
implicit def ordering = new scala.math.Ordering[(A,Float)] {
|
||||
def compare(x: (A, Float),y : (A,Float)) = x._2 compare y._2
|
||||
implicit def ordering = new scala.math.Ordering[(A, Float)] {
|
||||
def compare(x: (A, Float), y: (A, Float)) = x._2 compare y._2
|
||||
}
|
||||
|
||||
|
||||
def zrange(start: Int, end: Int): List[(A, Float)] = {
|
||||
// need to operate on the whole range
|
||||
// get all from the underlying storage
|
||||
|
|
@ -759,14 +805,14 @@ trait PersistentSortedSet[A] extends Transactional with Committable with Abortab
|
|||
// -1 means the last element, -2 means the second last
|
||||
val s = if (start < 0) start + l else start
|
||||
val e =
|
||||
if (end < 0) end + l
|
||||
else if (end >= l) (l - 1)
|
||||
else end
|
||||
if (end < 0) end + l
|
||||
else if (end >= l) (l - 1)
|
||||
else end
|
||||
// slice is open at the end, we need a closed end range
|
||||
ts.iterator.slice(s, e + 1).toList
|
||||
}
|
||||
|
||||
private def register = {
|
||||
protected def register = {
|
||||
if (transaction.get.isEmpty) throw new NoTransactionInScopeException
|
||||
transaction.get.get.register(uuid, this)
|
||||
}
|
||||
|
|
|
|||
|
|
@ -0,0 +1,161 @@
|
|||
/**
|
||||
* Copyright (C) 2009-2010 Scalable Solutions AB <http://scalablesolutions.se>
|
||||
*/
|
||||
|
||||
package se.scalablesolutions.akka.persistence.common
|
||||
|
||||
import org.scalatest.matchers.ShouldMatchers
|
||||
import se.scalablesolutions.akka.util.Logging
|
||||
import org.scalatest.{BeforeAndAfterEach, Spec}
|
||||
import scala.util.Random
|
||||
import collection.immutable.{TreeMap, HashMap, HashSet}
|
||||
import se.scalablesolutions.akka.persistence.common.PersistentMapBinary.COrdering._
|
||||
|
||||
|
||||
/**
|
||||
* Implementation Compatibility test for PersistentMap backend implementations.
|
||||
*/
|
||||
|
||||
trait MapStorageBackendTest extends Spec with ShouldMatchers with BeforeAndAfterEach with Logging {
|
||||
def storage: MapStorageBackend[Array[Byte], Array[Byte]]
|
||||
|
||||
def dropMaps: Unit
|
||||
|
||||
override def beforeEach = {
|
||||
log.info("beforeEach: dropping maps")
|
||||
dropMaps
|
||||
}
|
||||
|
||||
override def afterEach = {
|
||||
log.info("afterEach: dropping maps")
|
||||
dropMaps
|
||||
}
|
||||
|
||||
|
||||
describe("A Properly functioning MapStorageBackend") {
|
||||
it("should remove map storage properly") {
|
||||
val mapName = "removeTest"
|
||||
val mkey = "removeTestKey".getBytes
|
||||
val value = "removeTestValue".getBytes
|
||||
|
||||
storage.insertMapStorageEntryFor(mapName, mkey, value)
|
||||
storage.getMapStorageEntryFor(mapName, mkey).isDefined should be(true)
|
||||
storage.removeMapStorageFor(mapName, mkey)
|
||||
storage.getMapStorageEntryFor(mapName, mkey) should be(None)
|
||||
|
||||
storage.insertMapStorageEntryFor(mapName, mkey, value)
|
||||
storage.getMapStorageEntryFor(mapName, mkey).isDefined should be(true)
|
||||
storage.removeMapStorageFor(mapName)
|
||||
storage.getMapStorageEntryFor(mapName, mkey) should be(None)
|
||||
}
|
||||
|
||||
it("should insert a single map storage element properly") {
|
||||
val mapName = "insertSingleTest"
|
||||
val mkey = "insertSingleTestKey".getBytes
|
||||
val value = "insertSingleTestValue".getBytes
|
||||
|
||||
storage.insertMapStorageEntryFor(mapName, mkey, value)
|
||||
storage.getMapStorageEntryFor(mapName, mkey).get should be(value)
|
||||
storage.removeMapStorageFor(mapName, mkey)
|
||||
storage.getMapStorageEntryFor(mapName, mkey) should be(None)
|
||||
|
||||
storage.insertMapStorageEntryFor(mapName, mkey, value)
|
||||
storage.getMapStorageEntryFor(mapName, mkey).get should be(value)
|
||||
storage.removeMapStorageFor(mapName)
|
||||
storage.getMapStorageEntryFor(mapName, mkey) should be(None)
|
||||
}
|
||||
|
||||
|
||||
it("should insert multiple map storage elements properly") {
|
||||
val mapName = "insertMultipleTest"
|
||||
val rand = new Random(3).nextInt(100)
|
||||
val entries = (1 to rand).toList.map {
|
||||
index =>
|
||||
(("insertMultipleTestKey" + index).getBytes -> ("insertMutlipleTestValue" + index).getBytes)
|
||||
}
|
||||
|
||||
storage.insertMapStorageEntriesFor(mapName, entries)
|
||||
entries foreach {
|
||||
_ match {
|
||||
case (mkey, value) => {
|
||||
storage.getMapStorageEntryFor(mapName, mkey).isDefined should be(true)
|
||||
storage.getMapStorageEntryFor(mapName, mkey).get should be(value)
|
||||
}
|
||||
}
|
||||
}
|
||||
storage.removeMapStorageFor(mapName)
|
||||
entries foreach {
|
||||
_ match {
|
||||
case (mkey, value) => {
|
||||
storage.getMapStorageEntryFor(mapName, mkey) should be(None)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
it("should accurately track the number of key value pairs in a map") {
|
||||
val mapName = "sizeTest"
|
||||
val rand = new Random(3).nextInt(100)
|
||||
val entries = (1 to rand).toList.map {
|
||||
index =>
|
||||
(("sizeTestKey" + index).getBytes -> ("sizeTestValue" + index).getBytes)
|
||||
}
|
||||
|
||||
storage.insertMapStorageEntriesFor(mapName, entries)
|
||||
storage.getMapStorageSizeFor(mapName) should be(rand)
|
||||
}
|
||||
|
||||
|
||||
|
||||
it("should return all the key value pairs in the map in the correct order when getMapStorageFor(name) is called") {
|
||||
val mapName = "allTest"
|
||||
val rand = new Random(3).nextInt(100)
|
||||
var entries = new TreeMap[Array[Byte], Array[Byte]]()(ArrayOrdering)
|
||||
(1 to rand).foreach {
|
||||
index =>
|
||||
entries += (("allTestKey" + index).getBytes -> ("allTestValue" + index).getBytes)
|
||||
}
|
||||
|
||||
storage.insertMapStorageEntriesFor(mapName, entries.toList)
|
||||
val retrieved = storage.getMapStorageFor(mapName)
|
||||
retrieved.size should be(rand)
|
||||
entries.size should be(rand)
|
||||
|
||||
|
||||
|
||||
val entryMap = new HashMap[String, String] ++ entries.map {_ match {case (k, v) => (new String(k), new String(v))}}
|
||||
val retrievedMap = new HashMap[String, String] ++ entries.map {_ match {case (k, v) => (new String(k), new String(v))}}
|
||||
|
||||
entryMap should equal(retrievedMap)
|
||||
|
||||
(0 until rand).foreach {
|
||||
i: Int => {
|
||||
new String(entries.toList(i)._1) should be(new String(retrieved(i)._1))
|
||||
}
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
it("should return all the key->value pairs that exist in the map that are between start and end, up to count pairs when getMapStorageRangeFor is called") {
|
||||
//implement if this method will be used
|
||||
}
|
||||
|
||||
|
||||
it("should return Some(null), not None, for a key that has had the value null set and None for a key with no value set") {
|
||||
val mapName = "nullTest"
|
||||
val key = "key".getBytes
|
||||
storage.insertMapStorageEntryFor(mapName, key, null)
|
||||
storage.getMapStorageEntryFor(mapName, key).get should be(null)
|
||||
storage.removeMapStorageFor(mapName, key)
|
||||
storage.getMapStorageEntryFor(mapName, key) should be(None)
|
||||
}
|
||||
|
||||
it("should not throw an exception when size is called on a non existent map?") {
|
||||
storage.getMapStorageSizeFor("nonExistent") should be(0)
|
||||
}
|
||||
|
||||
|
||||
}
|
||||
|
||||
}
|
||||
|
|
@ -0,0 +1,123 @@
|
|||
/**
|
||||
* Copyright (C) 2009-2010 Scalable Solutions AB <http://scalablesolutions.se>
|
||||
*/
|
||||
|
||||
package se.scalablesolutions.akka.persistence.common
|
||||
|
||||
import org.scalatest.matchers.ShouldMatchers
|
||||
import se.scalablesolutions.akka.util.Logging
|
||||
import org.scalatest.{BeforeAndAfterEach, Spec}
|
||||
import scala.util.Random
|
||||
|
||||
/**
|
||||
* Implementation Compatibility test for PersistentQueue backend implementations.
|
||||
*/
|
||||
|
||||
trait QueueStorageBackendTest extends Spec with ShouldMatchers with BeforeAndAfterEach with Logging {
|
||||
def storage: QueueStorageBackend[Array[Byte]]
|
||||
|
||||
def dropQueues: Unit
|
||||
|
||||
override def beforeEach = {
|
||||
log.info("beforeEach: dropping queues")
|
||||
dropQueues
|
||||
}
|
||||
|
||||
override def afterEach = {
|
||||
log.info("afterEach: dropping queues")
|
||||
dropQueues
|
||||
}
|
||||
|
||||
|
||||
|
||||
describe("A Properly functioning QueueStorage Backend") {
|
||||
it("should enqueue properly when there is capacity in the queue") {
|
||||
val queue = "enqueueTest"
|
||||
val value = "enqueueTestValue".getBytes
|
||||
storage.size(queue) should be(0)
|
||||
storage.enqueue(queue, value).get should be(1)
|
||||
storage.size(queue) should be(1)
|
||||
}
|
||||
|
||||
it("should return None when enqueue is called on a full queue?") {
|
||||
|
||||
}
|
||||
|
||||
it("should dequeue properly when the queue is not empty") {
|
||||
val queue = "dequeueTest"
|
||||
val value = "dequeueTestValue".getBytes
|
||||
storage.size(queue) should be(0)
|
||||
storage.enqueue(queue, value)
|
||||
storage.size(queue) should be(1)
|
||||
storage.dequeue(queue).get should be(value)
|
||||
}
|
||||
|
||||
it("should return None when dequeue is called on an empty queue") {
|
||||
val queue = "dequeueTest2"
|
||||
val value = "dequeueTestValue2".getBytes
|
||||
storage.size(queue) should be(0)
|
||||
storage.dequeue(queue) should be(None)
|
||||
}
|
||||
|
||||
it("should accurately reflect the size of the queue") {
|
||||
val queue = "sizeTest"
|
||||
val rand = new Random(3).nextInt(100)
|
||||
val values = (1 to rand).toList.map {i: Int => ("sizeTestValue" + i).getBytes}
|
||||
values.foreach {storage.enqueue(queue, _)}
|
||||
storage.size(queue) should be(rand)
|
||||
val drand = new Random(3).nextInt(rand)
|
||||
(1 to drand).foreach {
|
||||
i: Int => {
|
||||
storage.dequeue(queue).isDefined should be(true)
|
||||
storage.size(queue) should be(rand - i)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
it("should support peek properly") {
|
||||
val queue = "sizeTest"
|
||||
val rand = new Random(3).nextInt(100)
|
||||
val values = (1 to rand).toList.map {i: Int => ("peekTestValue" + i)}
|
||||
storage.remove(queue)
|
||||
values.foreach {s: String => storage.enqueue(queue, s.getBytes)}
|
||||
(1 to rand).foreach {
|
||||
index => {
|
||||
val peek = storage.peek(queue, 0, index).map {new String(_)}
|
||||
peek.size should be(index)
|
||||
values.dropRight(values.size - index).equals(peek) should be(true)
|
||||
}
|
||||
}
|
||||
(0 until rand).foreach {
|
||||
index => {
|
||||
val peek = storage.peek(queue, index, rand - index).map {new String(_)}
|
||||
peek.size should be(rand - index)
|
||||
values.drop(index).equals(peek) should be(true)
|
||||
}
|
||||
}
|
||||
|
||||
//Should we test counts greater than queue size? or greater than queue size - count???
|
||||
}
|
||||
|
||||
it("should not throw an exception when remove is called on a non-existent queue") {
|
||||
storage.remove("exceptionTest")
|
||||
}
|
||||
|
||||
it("should remove queue storage properly") {
|
||||
val queue = "removeTest"
|
||||
val rand = new Random(3).nextInt(100)
|
||||
val values = (1 to rand).toList.map {i: Int => ("removeValue" + i).getBytes}
|
||||
values.foreach {storage.enqueue(queue, _)}
|
||||
storage.size(queue) should be(rand)
|
||||
storage.remove(queue)
|
||||
storage.size(queue) should be(0)
|
||||
}
|
||||
|
||||
it("should accept null as a value to enqueue and return Some(null) when that value is dequeued") {
|
||||
val queue = "nullTest"
|
||||
storage.enqueue(queue, null).get should be(1)
|
||||
storage.dequeue(queue).get should be(null)
|
||||
storage.dequeue(queue) should be(None)
|
||||
}
|
||||
}
|
||||
|
||||
}
|
||||
|
|
@ -0,0 +1,52 @@
|
|||
/**
|
||||
* Copyright (C) 2009-2010 Scalable Solutions AB <http://scalablesolutions.se>
|
||||
*/
|
||||
|
||||
package se.scalablesolutions.akka.persistence.common
|
||||
|
||||
import org.scalatest.matchers.ShouldMatchers
|
||||
import se.scalablesolutions.akka.util.Logging
|
||||
import org.scalatest.{BeforeAndAfterEach, Spec}
|
||||
|
||||
/**
|
||||
* Implementation Compatibility test for PersistentRef backend implementations.
|
||||
*/
|
||||
|
||||
trait RefStorageBackendTest extends Spec with ShouldMatchers with BeforeAndAfterEach with Logging {
|
||||
def storage: RefStorageBackend[Array[Byte]]
|
||||
|
||||
def dropRefs: Unit
|
||||
|
||||
override def beforeEach = {
|
||||
log.info("beforeEach: dropping refs")
|
||||
dropRefs
|
||||
}
|
||||
|
||||
override def afterEach = {
|
||||
log.info("afterEach: dropping refs")
|
||||
dropRefs
|
||||
}
|
||||
|
||||
|
||||
describe("A Properly functioning RefStorageBackend") {
|
||||
it("should successfully insert ref storage") {
|
||||
val name = "RefStorageTest #1"
|
||||
val value = name.getBytes
|
||||
storage.insertRefStorageFor(name, value)
|
||||
storage.getRefStorageFor(name).get should be(value)
|
||||
}
|
||||
|
||||
it("should return None when getRefStorage is called when no value has been inserted") {
|
||||
val name = "RefStorageTest #2"
|
||||
val value = name.getBytes
|
||||
storage.getRefStorageFor(name) should be(None)
|
||||
}
|
||||
|
||||
it("Should return None, not Some(null) when getRefStorageFor is called when null has been set") {
|
||||
val name = "RefStorageTest #3"
|
||||
storage.insertRefStorageFor(name, null)
|
||||
storage.getRefStorageFor(name) should be(None)
|
||||
}
|
||||
}
|
||||
|
||||
}
|
||||
|
|
@ -0,0 +1,35 @@
|
|||
/**
|
||||
* Copyright (C) 2009-2010 Scalable Solutions AB <http://scalablesolutions.se>
|
||||
*/
|
||||
|
||||
package se.scalablesolutions.akka.persistence.common
|
||||
|
||||
import org.scalatest.matchers.ShouldMatchers
|
||||
import se.scalablesolutions.akka.util.Logging
|
||||
import org.scalatest.{BeforeAndAfterEach, Spec}
|
||||
|
||||
/**
|
||||
* Implementation Compatibility test for PersistentSortedSet backend implementations.
|
||||
*/
|
||||
|
||||
trait SortedSetStorageBackendTest extends Spec with ShouldMatchers with BeforeAndAfterEach with Logging {
|
||||
def storage: SortedSetStorageBackend[Array[Byte]]
|
||||
|
||||
def dropSortedSets: Unit
|
||||
|
||||
override def beforeEach = {
|
||||
log.info("beforeEach: dropping sorted sets")
|
||||
dropSortedSets
|
||||
}
|
||||
|
||||
override def afterEach = {
|
||||
log.info("afterEach: dropping sorted sets")
|
||||
dropSortedSets
|
||||
}
|
||||
|
||||
|
||||
describe("A Properly functioning SortedSetStorageBackend Backend") {
|
||||
|
||||
}
|
||||
|
||||
}
|
||||
|
|
@ -0,0 +1,362 @@
|
|||
/**
|
||||
* Copyright (C) 2009-2010 Scalable Solutions AB <http://scalablesolutions.se>
|
||||
*/
|
||||
|
||||
package se.scalablesolutions.akka.persistence.common
|
||||
|
||||
import org.scalatest.Spec
|
||||
import org.scalatest.matchers.ShouldMatchers
|
||||
import org.scalatest.{BeforeAndAfterAll, BeforeAndAfterEach}
|
||||
import org.scalatest.junit.JUnitRunner
|
||||
import org.junit.runner.RunWith
|
||||
|
||||
import se.scalablesolutions.akka.actor.{Actor, ActorRef}
|
||||
import se.scalablesolutions.akka.config.OneForOneStrategy
|
||||
import Actor._
|
||||
import se.scalablesolutions.akka.stm.global._
|
||||
import se.scalablesolutions.akka.config.ScalaConfig._
|
||||
import se.scalablesolutions.akka.util.Logging
|
||||
import StorageObj._
|
||||
|
||||
|
||||
case class GET(k: String)
|
||||
case class SET(k: String, v: String)
|
||||
case class REM(k: String)
|
||||
case class CONTAINS(k: String)
|
||||
case object MAP_SIZE
|
||||
case class MSET(kvs: List[(String, String)])
|
||||
case class REMOVE_AFTER_PUT(kvsToAdd: List[(String, String)], ksToRem: List[String])
|
||||
case class CLEAR_AFTER_PUT(kvsToAdd: List[(String, String)])
|
||||
case class PUT_WITH_SLICE(kvsToAdd: List[(String, String)], start: String, cnt: Int)
|
||||
case class PUT_REM_WITH_SLICE(kvsToAdd: List[(String, String)], ksToRem: List[String], start: String, cnt: Int)
|
||||
|
||||
case class VADD(v: String)
|
||||
case class VUPD(i: Int, v: String)
|
||||
case class VUPD_AND_ABORT(i: Int, v: String)
|
||||
case class VGET(i: Int)
|
||||
case object VSIZE
|
||||
case class VGET_AFTER_VADD(vsToAdd: List[String], isToFetch: List[Int])
|
||||
case class VADD_WITH_SLICE(vsToAdd: List[String], start: Int, cnt: Int)
|
||||
|
||||
|
||||
object StorageObj {
|
||||
var getMap: String => PersistentMap[Array[Byte], Array[Byte]] = _
|
||||
var getVector: String => PersistentVector[Array[Byte]] = _
|
||||
|
||||
class SampleMapStorage extends Actor {
|
||||
self.lifeCycle = Permanent
|
||||
val FOO_MAP = "akka.sample.map"
|
||||
|
||||
private var fooMap = atomic {StorageObj.getMap(FOO_MAP)}
|
||||
|
||||
def receive = {
|
||||
case SET(k, v) =>
|
||||
atomic {
|
||||
fooMap += (k.getBytes, v.getBytes)
|
||||
}
|
||||
self.reply((k, v))
|
||||
|
||||
case GET(k) =>
|
||||
val v = atomic {
|
||||
fooMap.get(k.getBytes).map(new String(_)).getOrElse(k + " Not found")
|
||||
}
|
||||
self.reply(v)
|
||||
|
||||
case REM(k) =>
|
||||
val v = atomic {
|
||||
fooMap -= k.getBytes
|
||||
}
|
||||
self.reply(k)
|
||||
|
||||
case CONTAINS(k) =>
|
||||
val v = atomic {
|
||||
fooMap contains k.getBytes
|
||||
}
|
||||
self.reply(v)
|
||||
|
||||
case MAP_SIZE =>
|
||||
val v = atomic {
|
||||
fooMap.size
|
||||
}
|
||||
self.reply(v)
|
||||
|
||||
case MSET(kvs) => atomic {
|
||||
kvs.foreach {kv => fooMap += (kv._1.getBytes, kv._2.getBytes)}
|
||||
}
|
||||
self.reply(kvs.size)
|
||||
|
||||
case REMOVE_AFTER_PUT(kvs2add, ks2rem) => atomic {
|
||||
kvs2add.foreach {
|
||||
kv =>
|
||||
fooMap += (kv._1.getBytes, kv._2.getBytes)
|
||||
}
|
||||
|
||||
ks2rem.foreach {
|
||||
k =>
|
||||
fooMap -= k.getBytes
|
||||
}
|
||||
}
|
||||
self.reply(fooMap.size)
|
||||
|
||||
case CLEAR_AFTER_PUT(kvs2add) => atomic {
|
||||
kvs2add.foreach {
|
||||
kv =>
|
||||
fooMap += (kv._1.getBytes, kv._2.getBytes)
|
||||
}
|
||||
fooMap.clear
|
||||
}
|
||||
self.reply(true)
|
||||
|
||||
case PUT_WITH_SLICE(kvs2add, from, cnt) =>
|
||||
val v = atomic {
|
||||
kvs2add.foreach {
|
||||
kv =>
|
||||
fooMap += (kv._1.getBytes, kv._2.getBytes)
|
||||
}
|
||||
fooMap.slice(Some(from.getBytes), cnt)
|
||||
}
|
||||
self.reply(v: List[(Array[Byte], Array[Byte])])
|
||||
|
||||
case PUT_REM_WITH_SLICE(kvs2add, ks2rem, from, cnt) =>
|
||||
val v = atomic {
|
||||
kvs2add.foreach {
|
||||
kv =>
|
||||
fooMap += (kv._1.getBytes, kv._2.getBytes)
|
||||
}
|
||||
ks2rem.foreach {
|
||||
k =>
|
||||
fooMap -= k.getBytes
|
||||
}
|
||||
fooMap.slice(Some(from.getBytes), cnt)
|
||||
}
|
||||
self.reply(v: List[(Array[Byte], Array[Byte])])
|
||||
}
|
||||
}
|
||||
|
||||
class SampleVectorStorage extends Actor {
|
||||
self.lifeCycle = Permanent
|
||||
val FOO_VECTOR = "akka.sample.vector"
|
||||
|
||||
private var fooVector = atomic {StorageObj.getVector(FOO_VECTOR)}
|
||||
|
||||
def receive = {
|
||||
case VADD(v) =>
|
||||
val size =
|
||||
atomic {
|
||||
fooVector + v.getBytes
|
||||
fooVector length
|
||||
}
|
||||
self.reply(size)
|
||||
|
||||
case VGET(index) =>
|
||||
val ind =
|
||||
atomic {
|
||||
fooVector get index
|
||||
}
|
||||
self.reply(ind)
|
||||
|
||||
case VGET_AFTER_VADD(vs, is) =>
|
||||
val els =
|
||||
atomic {
|
||||
vs.foreach(fooVector + _.getBytes)
|
||||
(is.foldRight(List[Array[Byte]]())(fooVector.get(_) :: _)).map(new String(_))
|
||||
}
|
||||
self.reply(els)
|
||||
|
||||
case VUPD_AND_ABORT(index, value) =>
|
||||
val l =
|
||||
atomic {
|
||||
fooVector.update(index, value.getBytes)
|
||||
// force fail
|
||||
fooVector get 100
|
||||
}
|
||||
self.reply(index)
|
||||
|
||||
case VADD_WITH_SLICE(vs, s, c) =>
|
||||
val l =
|
||||
atomic {
|
||||
vs.foreach(fooVector + _.getBytes)
|
||||
fooVector.slice(Some(s), None, c)
|
||||
}
|
||||
self.reply(l.map(new String(_)))
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
|
||||
trait Ticket343Test extends
|
||||
Spec with
|
||||
ShouldMatchers with
|
||||
BeforeAndAfterEach {
|
||||
def getMap: String => PersistentMap[Array[Byte], Array[Byte]]
|
||||
|
||||
def getVector: String => PersistentVector[Array[Byte]]
|
||||
|
||||
|
||||
def dropMapsAndVectors: Unit
|
||||
|
||||
override def beforeEach {
|
||||
StorageObj.getMap = getMap
|
||||
StorageObj.getVector = getVector
|
||||
dropMapsAndVectors
|
||||
println("** dropMapsAndVectors")
|
||||
}
|
||||
|
||||
override def afterEach {
|
||||
dropMapsAndVectors
|
||||
println("** dropMapsAndVectors")
|
||||
}
|
||||
|
||||
describe("Ticket 343 Issue #1") {
|
||||
it("remove after put should work within the same transaction") {
|
||||
val proc = actorOf[SampleMapStorage]
|
||||
proc.start
|
||||
|
||||
(proc !! SET("debasish", "anshinsoft")).getOrElse("Set failed") should equal(("debasish", "anshinsoft"))
|
||||
(proc !! GET("debasish")).getOrElse("Get failed") should equal("anshinsoft")
|
||||
(proc !! MAP_SIZE).getOrElse("Size failed") should equal(1)
|
||||
|
||||
(proc !! MSET(List(("dg", "1"), ("mc", "2"), ("nd", "3")))).getOrElse("Mset failed") should equal(3)
|
||||
|
||||
(proc !! GET("dg")).getOrElse("Get failed") should equal("1")
|
||||
(proc !! GET("mc")).getOrElse("Get failed") should equal("2")
|
||||
(proc !! GET("nd")).getOrElse("Get failed") should equal("3")
|
||||
|
||||
(proc !! MAP_SIZE).getOrElse("Size failed") should equal(4)
|
||||
|
||||
val add = List(("a", "1"), ("b", "2"), ("c", "3"))
|
||||
val rem = List("a", "debasish")
|
||||
(proc !! REMOVE_AFTER_PUT(add, rem)).getOrElse("REMOVE_AFTER_PUT failed") should equal(5)
|
||||
|
||||
(proc !! GET("debasish")).getOrElse("debasish not found") should equal("debasish Not found")
|
||||
(proc !! GET("a")).getOrElse("a not found") should equal("a Not found")
|
||||
|
||||
(proc !! GET("b")).getOrElse("b not found") should equal("2")
|
||||
|
||||
(proc !! CONTAINS("b")).getOrElse("b not found") should equal(true)
|
||||
(proc !! CONTAINS("debasish")).getOrElse("debasish not found") should equal(false)
|
||||
(proc !! MAP_SIZE).getOrElse("Size failed") should equal(5)
|
||||
proc.stop
|
||||
}
|
||||
}
|
||||
|
||||
describe("Ticket 343 Issue #2") {
|
||||
it("clear after put should work within the same transaction") {
|
||||
val proc = actorOf[SampleMapStorage]
|
||||
proc.start
|
||||
|
||||
(proc !! SET("debasish", "anshinsoft")).getOrElse("Set failed") should equal(("debasish", "anshinsoft"))
|
||||
(proc !! GET("debasish")).getOrElse("Get failed") should equal("anshinsoft")
|
||||
(proc !! MAP_SIZE).getOrElse("Size failed") should equal(1)
|
||||
|
||||
val add = List(("a", "1"), ("b", "2"), ("c", "3"))
|
||||
(proc !! CLEAR_AFTER_PUT(add)).getOrElse("CLEAR_AFTER_PUT failed") should equal(true)
|
||||
|
||||
(proc !! MAP_SIZE).getOrElse("Size failed") should equal(0)
|
||||
proc.stop
|
||||
}
|
||||
}
|
||||
|
||||
describe("Ticket 343 Issue #3") {
|
||||
it("map size should change after the transaction") {
|
||||
val proc = actorOf[SampleMapStorage]
|
||||
proc.start
|
||||
|
||||
(proc !! SET("debasish", "anshinsoft")).getOrElse("Set failed") should equal(("debasish", "anshinsoft"))
|
||||
(proc !! GET("debasish")).getOrElse("Get failed") should equal("anshinsoft")
|
||||
(proc !! MAP_SIZE).getOrElse("Size failed") should equal(1)
|
||||
|
||||
(proc !! MSET(List(("dg", "1"), ("mc", "2"), ("nd", "3")))).getOrElse("Mset failed") should equal(3)
|
||||
(proc !! MAP_SIZE).getOrElse("Size failed") should equal(4)
|
||||
|
||||
(proc !! GET("dg")).getOrElse("Get failed") should equal("1")
|
||||
(proc !! GET("mc")).getOrElse("Get failed") should equal("2")
|
||||
(proc !! GET("nd")).getOrElse("Get failed") should equal("3")
|
||||
proc.stop
|
||||
}
|
||||
}
|
||||
|
||||
describe("slice test") {
|
||||
it("should pass") {
|
||||
val proc = actorOf[SampleMapStorage]
|
||||
proc.start
|
||||
|
||||
(proc !! SET("debasish", "anshinsoft")).getOrElse("Set failed") should equal(("debasish", "anshinsoft"))
|
||||
(proc !! GET("debasish")).getOrElse("Get failed") should equal("anshinsoft")
|
||||
// (proc !! MAP_SIZE).getOrElse("Size failed") should equal(1)
|
||||
|
||||
(proc !! MSET(List(("dg", "1"), ("mc", "2"), ("nd", "3")))).getOrElse("Mset failed") should equal(3)
|
||||
(proc !! MAP_SIZE).getOrElse("Size failed") should equal(4)
|
||||
|
||||
(proc !! PUT_WITH_SLICE(List(("ec", "1"), ("tb", "2"), ("mc", "10")), "dg", 3)).get.asInstanceOf[List[(Array[Byte], Array[Byte])]].map {case (k, v) => (new String(k), new String(v))} should equal(List(("dg", "1"), ("ec", "1"), ("mc", "10")))
|
||||
|
||||
(proc !! PUT_REM_WITH_SLICE(List(("fc", "1"), ("gb", "2"), ("xy", "10")), List("tb", "fc"), "dg", 5)).get.asInstanceOf[List[(Array[Byte], Array[Byte])]].map {case (k, v) => (new String(k), new String(v))} should equal(List(("dg", "1"), ("ec", "1"), ("gb", "2"), ("mc", "10"), ("nd", "3")))
|
||||
proc.stop
|
||||
}
|
||||
}
|
||||
|
||||
describe("Ticket 343 Issue #4") {
|
||||
it("vector get should not ignore elements that were in vector before transaction") {
|
||||
|
||||
val proc = actorOf[SampleVectorStorage]
|
||||
proc.start
|
||||
|
||||
// add 4 elements in separate transactions
|
||||
(proc !! VADD("debasish")).getOrElse("VADD failed") should equal(1)
|
||||
(proc !! VADD("maulindu")).getOrElse("VADD failed") should equal(2)
|
||||
(proc !! VADD("ramanendu")).getOrElse("VADD failed") should equal(3)
|
||||
(proc !! VADD("nilanjan")).getOrElse("VADD failed") should equal(4)
|
||||
|
||||
new String((proc !! VGET(0)).get.asInstanceOf[Array[Byte]]) should equal("nilanjan")
|
||||
new String((proc !! VGET(1)).get.asInstanceOf[Array[Byte]]) should equal("ramanendu")
|
||||
new String((proc !! VGET(2)).get.asInstanceOf[Array[Byte]]) should equal("maulindu")
|
||||
new String((proc !! VGET(3)).get.asInstanceOf[Array[Byte]]) should equal("debasish")
|
||||
|
||||
// now add 3 more and do gets in the same transaction
|
||||
(proc !! VGET_AFTER_VADD(List("a", "b", "c"), List(0, 2, 4))).get.asInstanceOf[List[String]] should equal(List("c", "a", "ramanendu"))
|
||||
proc.stop
|
||||
}
|
||||
}
|
||||
|
||||
describe("Ticket 343 Issue #6") {
|
||||
it("vector update should not ignore transaction") {
|
||||
val proc = actorOf[SampleVectorStorage]
|
||||
proc.start
|
||||
|
||||
// add 4 elements in separate transactions
|
||||
(proc !! VADD("debasish")).getOrElse("VADD failed") should equal(1)
|
||||
(proc !! VADD("maulindu")).getOrElse("VADD failed") should equal(2)
|
||||
(proc !! VADD("ramanendu")).getOrElse("VADD failed") should equal(3)
|
||||
(proc !! VADD("nilanjan")).getOrElse("VADD failed") should equal(4)
|
||||
|
||||
evaluating {
|
||||
(proc !! VUPD_AND_ABORT(0, "virat")).getOrElse("VUPD_AND_ABORT failed")
|
||||
} should produce[Exception]
|
||||
|
||||
// update aborts and hence values will remain unchanged
|
||||
new String((proc !! VGET(0)).get.asInstanceOf[Array[Byte]]) should equal("nilanjan")
|
||||
proc.stop
|
||||
}
|
||||
}
|
||||
|
||||
describe("Ticket 343 Issue #5") {
|
||||
it("vector slice() should not ignore elements added in current transaction") {
|
||||
val proc = actorOf[SampleVectorStorage]
|
||||
proc.start
|
||||
|
||||
// add 4 elements in separate transactions
|
||||
(proc !! VADD("debasish")).getOrElse("VADD failed") should equal(1)
|
||||
(proc !! VADD("maulindu")).getOrElse("VADD failed") should equal(2)
|
||||
(proc !! VADD("ramanendu")).getOrElse("VADD failed") should equal(3)
|
||||
(proc !! VADD("nilanjan")).getOrElse("VADD failed") should equal(4)
|
||||
|
||||
// slice with no new elements added in current transaction
|
||||
(proc !! VADD_WITH_SLICE(List(), 2, 2)).getOrElse("VADD_WITH_SLICE failed") should equal(Vector("maulindu", "debasish"))
|
||||
|
||||
// slice with new elements added in current transaction
|
||||
(proc !! VADD_WITH_SLICE(List("a", "b", "c", "d"), 2, 2)).getOrElse("VADD_WITH_SLICE failed") should equal(Vector("b", "a"))
|
||||
proc.stop
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
@ -0,0 +1,123 @@
|
|||
/**
|
||||
* Copyright (C) 2009-2010 Scalable Solutions AB <http://scalablesolutions.se>
|
||||
*/
|
||||
|
||||
package se.scalablesolutions.akka.persistence.common
|
||||
|
||||
import org.scalatest.matchers.ShouldMatchers
|
||||
import se.scalablesolutions.akka.util.Logging
|
||||
import org.scalatest.{BeforeAndAfterEach, Spec}
|
||||
import scala.util.Random
|
||||
|
||||
/**
|
||||
* Implementation Compatibility test for PersistentVector backend implementations.
|
||||
*/
|
||||
|
||||
trait VectorStorageBackendTest extends Spec with ShouldMatchers with BeforeAndAfterEach with Logging {
|
||||
def storage: VectorStorageBackend[Array[Byte]]
|
||||
|
||||
def dropVectors: Unit
|
||||
|
||||
override def beforeEach = {
|
||||
log.info("beforeEach: dropping vectors")
|
||||
dropVectors
|
||||
}
|
||||
|
||||
override def afterEach = {
|
||||
log.info("afterEach: dropping vectors")
|
||||
dropVectors
|
||||
}
|
||||
|
||||
|
||||
|
||||
describe("A Properly functioning VectorStorageBackend") {
|
||||
it("should insertVectorStorageEntry as a logical prepend operation to the existing list") {
|
||||
val vector = "insertSingleTest"
|
||||
val rand = new Random(3).nextInt(100)
|
||||
val values = (0 to rand).toList.map {i: Int => vector + "value" + i}
|
||||
storage.getVectorStorageSizeFor(vector) should be(0)
|
||||
values.foreach {s: String => storage.insertVectorStorageEntryFor(vector, s.getBytes)}
|
||||
val shouldRetrieve = values.reverse
|
||||
(0 to rand).foreach {
|
||||
i: Int => {
|
||||
shouldRetrieve(i) should be(new String(storage.getVectorStorageEntryFor(vector, i)))
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
it("should insertVectorStorageEntries as a logical prepend operation to the existing list") {
|
||||
val vector = "insertMultiTest"
|
||||
val rand = new Random(3).nextInt(100)
|
||||
val values = (0 to rand).toList.map {i: Int => vector + "value" + i}
|
||||
storage.getVectorStorageSizeFor(vector) should be(0)
|
||||
storage.insertVectorStorageEntriesFor(vector, values.map {s: String => s.getBytes})
|
||||
val shouldRetrieve = values.reverse
|
||||
(0 to rand).foreach {
|
||||
i: Int => {
|
||||
shouldRetrieve(i) should be(new String(storage.getVectorStorageEntryFor(vector, i)))
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
it("should successfully update entries") {
|
||||
val vector = "updateTest"
|
||||
val rand = new Random(3).nextInt(100)
|
||||
val values = (0 to rand).toList.map {i: Int => vector + "value" + i}
|
||||
val urand = new Random(3).nextInt(rand)
|
||||
storage.insertVectorStorageEntriesFor(vector, values.map {s: String => s.getBytes})
|
||||
val toUpdate = "updated" + values.reverse(urand)
|
||||
storage.updateVectorStorageEntryFor(vector, urand, toUpdate.getBytes)
|
||||
toUpdate should be(new String(storage.getVectorStorageEntryFor(vector, urand)))
|
||||
}
|
||||
|
||||
it("should return the correct value from getVectorStorageFor") {
|
||||
val vector = "getTest"
|
||||
val rand = new Random(3).nextInt(100)
|
||||
val values = (0 to rand).toList.map {i: Int => vector + "value" + i}
|
||||
val urand = new Random(3).nextInt(rand)
|
||||
storage.insertVectorStorageEntriesFor(vector, values.map {s: String => s.getBytes})
|
||||
values.reverse(urand) should be(new String(storage.getVectorStorageEntryFor(vector, urand)))
|
||||
}
|
||||
|
||||
it("should return the correct values from getVectorStorageRangeFor") {
|
||||
val vector = "getTest"
|
||||
val rand = new Random(3).nextInt(100)
|
||||
val drand = new Random(3).nextInt(rand)
|
||||
val values = (0 to rand).toList.map {i: Int => vector + "value" + i}
|
||||
storage.insertVectorStorageEntriesFor(vector, values.map {s: String => s.getBytes})
|
||||
values.reverse should be(storage.getVectorStorageRangeFor(vector, None, None, rand + 1).map {b: Array[Byte] => new String(b)})
|
||||
(0 to drand).foreach {
|
||||
i: Int => {
|
||||
val value: String = vector + "value" + (rand - i)
|
||||
log.debug(value)
|
||||
List(value) should be(storage.getVectorStorageRangeFor(vector, Some(i), None, 1).map {b: Array[Byte] => new String(b)})
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
it("should behave properly when the range used in getVectorStorageRangeFor has indexes outside the current size of the vector") {
|
||||
//what is proper?
|
||||
}
|
||||
|
||||
it("shoud return null when getStorageEntry is called on a null entry") {
|
||||
//What is proper?
|
||||
val vector = "nullTest"
|
||||
storage.insertVectorStorageEntryFor(vector, null)
|
||||
storage.getVectorStorageEntryFor(vector, 0) should be(null)
|
||||
}
|
||||
|
||||
it("shoud throw a Storage exception when there is an attempt to retrieve an index larger than the Vector") {
|
||||
val vector = "tooLargeRetrieve"
|
||||
storage.insertVectorStorageEntryFor(vector, null)
|
||||
evaluating {storage.getVectorStorageEntryFor(vector, 9)} should produce[StorageException]
|
||||
}
|
||||
|
||||
it("shoud throw a Storage exception when there is an attempt to update an index larger than the Vector") {
|
||||
val vector = "tooLargeUpdate"
|
||||
storage.insertVectorStorageEntryFor(vector, null)
|
||||
evaluating {storage.updateVectorStorageEntryFor(vector, 9, null)} should produce[StorageException]
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
}
|
||||
|
|
@ -76,7 +76,7 @@ class PersistentFailerActor extends Transactor {
|
|||
}
|
||||
}
|
||||
|
||||
class HbasePersistentActorSpec extends JUnitSuite with BeforeAndAfterAll {
|
||||
class HbasePersistentActorSpecTestIntegration extends JUnitSuite with BeforeAndAfterAll {
|
||||
|
||||
val testUtil = new HBaseTestingUtility
|
||||
|
||||
|
|
@ -5,7 +5,7 @@ import org.scalatest.matchers.ShouldMatchers
|
|||
import org.scalatest.BeforeAndAfterAll
|
||||
import org.scalatest.BeforeAndAfterEach
|
||||
|
||||
class HbaseStorageSpec extends
|
||||
class HbaseStorageSpecTestIntegration extends
|
||||
Spec with
|
||||
ShouldMatchers with
|
||||
BeforeAndAfterAll with
|
||||
|
|
@ -36,7 +36,7 @@ case class VADD_WITH_SLICE(vsToAdd: List[String], start: Int, cnt: Int)
|
|||
|
||||
object Storage {
|
||||
class HbaseSampleMapStorage extends Actor {
|
||||
self.lifeCycle = Some(LifeCycle(Permanent))
|
||||
self.lifeCycle = Permanent
|
||||
val FOO_MAP = "akka.sample.map"
|
||||
|
||||
private var fooMap = atomic { HbaseStorage.getMap(FOO_MAP) }
|
||||
|
|
@ -119,7 +119,7 @@ object Storage {
|
|||
}
|
||||
|
||||
class HbaseSampleVectorStorage extends Actor {
|
||||
self.lifeCycle = Some(LifeCycle(Permanent))
|
||||
self.lifeCycle = Permanent
|
||||
val FOO_VECTOR = "akka.sample.vector"
|
||||
|
||||
private var fooVector = atomic { HbaseStorage.getVector(FOO_VECTOR) }
|
||||
|
|
@ -171,7 +171,7 @@ object Storage {
|
|||
import Storage._
|
||||
|
||||
@RunWith(classOf[JUnitRunner])
|
||||
class HbaseTicket343Spec extends Spec with ShouldMatchers with BeforeAndAfterAll with BeforeAndAfterEach {
|
||||
class HbaseTicket343SpecTestIntegration extends Spec with ShouldMatchers with BeforeAndAfterAll with BeforeAndAfterEach {
|
||||
|
||||
import org.apache.hadoop.hbase.HBaseTestingUtility
|
||||
|
||||
|
|
@ -10,7 +10,7 @@ import org.junit.Test
|
|||
import org.apache.hadoop.hbase.HBaseTestingUtility
|
||||
|
||||
@RunWith(classOf[JUnitRunner])
|
||||
class PersistenceSpecTest extends Spec with BeforeAndAfterAll with ShouldMatchers {
|
||||
class SimpleHbaseSpecTestIntegration extends Spec with BeforeAndAfterAll with ShouldMatchers {
|
||||
|
||||
import org.apache.hadoop.hbase.HBaseTestingUtility
|
||||
|
||||
|
|
@ -36,7 +36,7 @@ case class VADD_WITH_SLICE(vsToAdd: List[String], start: Int, cnt: Int)
|
|||
|
||||
object Storage {
|
||||
class MongoSampleMapStorage extends Actor {
|
||||
self.lifeCycle = Some(LifeCycle(Permanent))
|
||||
self.lifeCycle = Permanent
|
||||
val FOO_MAP = "akka.sample.map"
|
||||
|
||||
private var fooMap = atomic { MongoStorage.getMap(FOO_MAP) }
|
||||
|
|
@ -119,7 +119,7 @@ object Storage {
|
|||
}
|
||||
|
||||
class MongoSampleVectorStorage extends Actor {
|
||||
self.lifeCycle = Some(LifeCycle(Permanent))
|
||||
self.lifeCycle = Permanent
|
||||
val FOO_VECTOR = "akka.sample.vector"
|
||||
|
||||
private var fooVector = atomic { MongoStorage.getVector(FOO_VECTOR) }
|
||||
|
|
|
|||
|
|
@ -28,7 +28,7 @@ case class SETFOO(s: String)
|
|||
|
||||
object SampleStorage {
|
||||
class RedisSampleStorage extends Actor {
|
||||
self.lifeCycle = Some(LifeCycle(Permanent))
|
||||
self.lifeCycle = Permanent
|
||||
val EVENT_MAP = "akka.sample.map"
|
||||
|
||||
private var eventMap = atomic { RedisStorage.getMap(EVENT_MAP) }
|
||||
|
|
|
|||
|
|
@ -41,7 +41,7 @@ case class VADD_WITH_SLICE(vsToAdd: List[String], start: Int, cnt: Int)
|
|||
|
||||
object Storage {
|
||||
class RedisSampleMapStorage extends Actor {
|
||||
self.lifeCycle = Some(LifeCycle(Permanent))
|
||||
self.lifeCycle = Permanent
|
||||
val FOO_MAP = "akka.sample.map"
|
||||
|
||||
private var fooMap = atomic { RedisStorage.getMap(FOO_MAP) }
|
||||
|
|
@ -134,7 +134,7 @@ object Storage {
|
|||
}
|
||||
|
||||
class RedisSampleVectorStorage extends Actor {
|
||||
self.lifeCycle = Some(LifeCycle(Permanent))
|
||||
self.lifeCycle = Permanent
|
||||
val FOO_VECTOR = "akka.sample.vector"
|
||||
|
||||
private var fooVector = atomic { RedisStorage.getVector(FOO_VECTOR) }
|
||||
|
|
|
|||
|
|
@ -15,14 +15,17 @@ object VoldemortStorage extends Storage {
|
|||
def newMap: PersistentMap[ElementType, ElementType] = newMap(newUuid.toString)
|
||||
def newVector: PersistentVector[ElementType] = newVector(newUuid.toString)
|
||||
def newRef: PersistentRef[ElementType] = newRef(newUuid.toString)
|
||||
override def newQueue: PersistentQueue[ElementType] = newQueue(newUuid.toString)
|
||||
|
||||
def getMap(id: String): PersistentMap[ElementType, ElementType] = newMap(id)
|
||||
def getVector(id: String): PersistentVector[ElementType] = newVector(id)
|
||||
def getRef(id: String): PersistentRef[ElementType] = newRef(id)
|
||||
override def getQueue(id: String): PersistentQueue[ElementType] = newQueue(id)
|
||||
|
||||
def newMap(id: String): PersistentMap[ElementType, ElementType] = new VoldemortPersistentMap(id)
|
||||
def newVector(id: String): PersistentVector[ElementType] = new VoldemortPersistentVector(id)
|
||||
def newRef(id: String): PersistentRef[ElementType] = new VoldemortPersistentRef(id)
|
||||
override def newQueue(id:String): PersistentQueue[ElementType] = new VoldemortPersistentQueue(id)
|
||||
}
|
||||
|
||||
|
||||
|
|
@ -41,3 +44,8 @@ class VoldemortPersistentRef(id: String) extends PersistentRef[Array[Byte]] {
|
|||
val uuid = id
|
||||
val storage = VoldemortStorageBackend
|
||||
}
|
||||
|
||||
class VoldemortPersistentQueue(id: String) extends PersistentQueue[Array[Byte]] {
|
||||
val uuid = id
|
||||
val storage = VoldemortStorageBackend
|
||||
}
|
||||
|
|
|
|||
|
|
@ -17,14 +17,21 @@ import voldemort.versioning.Versioned
|
|||
import collection.JavaConversions
|
||||
import java.nio.ByteBuffer
|
||||
import collection.Map
|
||||
import collection.immutable.{IndexedSeq, SortedSet, TreeSet, HashMap}
|
||||
import collection.mutable.{Set, HashSet, ArrayBuffer}
|
||||
import java.util.{Properties, Map => JMap}
|
||||
import se.scalablesolutions.akka.persistence.common.PersistentMapBinary.COrdering._
|
||||
import collection.immutable._
|
||||
|
||||
/*
|
||||
RequiredReads + RequiredWrites should be > ReplicationFactor for all Voldemort Stores
|
||||
In this case all VoldemortBackend operations can be retried until successful, and data should remain consistent
|
||||
*/
|
||||
|
||||
private[akka] object VoldemortStorageBackend extends
|
||||
MapStorageBackend[Array[Byte], Array[Byte]] with
|
||||
VectorStorageBackend[Array[Byte]] with
|
||||
RefStorageBackend[Array[Byte]] with
|
||||
QueueStorageBackend[Array[Byte]] with
|
||||
Logging {
|
||||
val bootstrapUrlsProp = "bootstrap_urls"
|
||||
val clientConfig = config.getConfigMap("akka.storage.voldemort.client") match {
|
||||
|
|
@ -32,35 +39,39 @@ MapStorageBackend[Array[Byte], Array[Byte]] with
|
|||
case None => getClientConfig(new HashMap[String, String] + (bootstrapUrlsProp -> "tcp://localhost:6666"))
|
||||
}
|
||||
val refStore = config.getString("akka.storage.voldemort.store.ref", "Refs")
|
||||
val mapKeyStore = config.getString("akka.storage.voldemort.store.map-key", "MapKeys")
|
||||
val mapValueStore = config.getString("akka.storage.voldemort.store.map-value", "MapValues")
|
||||
val vectorSizeStore = config.getString("akka.storage.voldemort.store.vector-size", "VectorSizes")
|
||||
val vectorValueStore = config.getString("akka.storage.voldemort.store.vector-value", "VectorValues")
|
||||
val mapStore = config.getString("akka.storage.voldemort.store.map", "Maps")
|
||||
val vectorStore = config.getString("akka.storage.voldemort.store.vector", "Vectors")
|
||||
val queueStore = config.getString("akka.storage.voldemort.store.queue", "Queues")
|
||||
|
||||
var storeClientFactory: StoreClientFactory = null
|
||||
var refClient: StoreClient[String, Array[Byte]] = null
|
||||
var mapKeyClient: StoreClient[String, Array[Byte]] = null
|
||||
var mapValueClient: StoreClient[Array[Byte], Array[Byte]] = null
|
||||
var vectorSizeClient: StoreClient[String, Array[Byte]] = null
|
||||
var vectorValueClient: StoreClient[Array[Byte], Array[Byte]] = null
|
||||
var mapClient: StoreClient[Array[Byte], Array[Byte]] = null
|
||||
var vectorClient: StoreClient[Array[Byte], Array[Byte]] = null
|
||||
var queueClient: StoreClient[Array[Byte], Array[Byte]] = null
|
||||
initStoreClients
|
||||
|
||||
val nullMapValueHeader = 0x00.byteValue
|
||||
val nullMapValue: Array[Byte] = Array(nullMapValueHeader)
|
||||
val notNullMapValueHeader: Byte = 0xff.byteValue
|
||||
val underscoreBytesUTF8 = "_".getBytes("UTF-8")
|
||||
implicit val byteOrder = new Ordering[Array[Byte]] {
|
||||
override def compare(x: Array[Byte], y: Array[Byte]) = ByteUtils.compare(x, y)
|
||||
}
|
||||
val mapKeysIndex = getIndexedBytes(-1)
|
||||
val vectorSizeIndex = getIndexedBytes(-1)
|
||||
val queueHeadIndex = getIndexedBytes(-1)
|
||||
val queueTailIndex = getIndexedBytes(-2)
|
||||
//explicit implicit :)
|
||||
implicit val ordering = ArrayOrdering
|
||||
|
||||
|
||||
def getRefStorageFor(name: String): Option[Array[Byte]] = {
|
||||
val result: Array[Byte] = refClient.getValue(name)
|
||||
result match {
|
||||
case null => None
|
||||
case _ => Some(result)
|
||||
}
|
||||
Option(result)
|
||||
}
|
||||
|
||||
def insertRefStorageFor(name: String, element: Array[Byte]) = {
|
||||
refClient.put(name, element)
|
||||
element match {
|
||||
case null => refClient.delete(name)
|
||||
case _ => refClient.put(name, element)
|
||||
}
|
||||
}
|
||||
|
||||
def getMapStorageRangeFor(name: String, start: Option[Array[Byte]], finish: Option[Array[Byte]], count: Int): List[(Array[Byte], Array[Byte])] = {
|
||||
|
|
@ -76,21 +87,21 @@ MapStorageBackend[Array[Byte], Array[Byte]] with
|
|||
|
||||
private def getKeyValues(name: String, keys: SortedSet[Array[Byte]]): List[(Array[Byte], Array[Byte])] = {
|
||||
val all: JMap[Array[Byte], Versioned[Array[Byte]]] =
|
||||
mapValueClient.getAll(JavaConversions.asIterable(keys.map {
|
||||
mapClient.getAll(JavaConversions.asIterable(keys.map {
|
||||
mapKey => getKey(name, mapKey)
|
||||
}))
|
||||
|
||||
val buf = new ArrayBuffer[(Array[Byte], Array[Byte])](all.size)
|
||||
var returned = new TreeMap[Array[Byte], Array[Byte]]()(ordering)
|
||||
JavaConversions.asMap(all).foreach {
|
||||
(entry) => {
|
||||
entry match {
|
||||
case (key: Array[Byte], versioned: Versioned[Array[Byte]]) => {
|
||||
buf += key -> versioned.getValue
|
||||
case (namePlusKey: Array[Byte], versioned: Versioned[Array[Byte]]) => {
|
||||
returned += getMapKeyFromKey(name, namePlusKey) -> getMapValueFromStored(versioned.getValue)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
buf.toList
|
||||
returned.toList
|
||||
}
|
||||
|
||||
def getMapStorageSizeFor(name: String): Int = {
|
||||
|
|
@ -99,10 +110,10 @@ MapStorageBackend[Array[Byte], Array[Byte]] with
|
|||
}
|
||||
|
||||
def getMapStorageEntryFor(name: String, key: Array[Byte]): Option[Array[Byte]] = {
|
||||
val result: Array[Byte] = mapValueClient.getValue(getKey(name, key))
|
||||
val result: Array[Byte] = mapClient.getValue(getKey(name, key))
|
||||
result match {
|
||||
case null => None
|
||||
case _ => Some(result)
|
||||
case _ => Some(getMapValueFromStored(result))
|
||||
}
|
||||
}
|
||||
|
||||
|
|
@ -110,7 +121,7 @@ MapStorageBackend[Array[Byte], Array[Byte]] with
|
|||
var keys = getMapKeys(name)
|
||||
keys -= key
|
||||
putMapKeys(name, keys)
|
||||
mapValueClient.delete(getKey(name, key))
|
||||
mapClient.delete(getKey(name, key))
|
||||
}
|
||||
|
||||
|
||||
|
|
@ -118,13 +129,13 @@ MapStorageBackend[Array[Byte], Array[Byte]] with
|
|||
val keys = getMapKeys(name)
|
||||
keys.foreach {
|
||||
key =>
|
||||
mapValueClient.delete(getKey(name, key))
|
||||
mapClient.delete(getKey(name, key))
|
||||
}
|
||||
mapKeyClient.delete(name)
|
||||
mapClient.delete(getKey(name, mapKeysIndex))
|
||||
}
|
||||
|
||||
def insertMapStorageEntryFor(name: String, key: Array[Byte], value: Array[Byte]) = {
|
||||
mapValueClient.put(getKey(name, key), value)
|
||||
mapClient.put(getKey(name, key), getStoredMapValue(value))
|
||||
var keys = getMapKeys(name)
|
||||
keys += key
|
||||
putMapKeys(name, keys)
|
||||
|
|
@ -133,7 +144,7 @@ MapStorageBackend[Array[Byte], Array[Byte]] with
|
|||
def insertMapStorageEntriesFor(name: String, entries: List[(Array[Byte], Array[Byte])]) = {
|
||||
val newKeys = entries.map {
|
||||
case (key, value) => {
|
||||
mapValueClient.put(getKey(name, key), value)
|
||||
mapClient.put(getKey(name, key), getStoredMapValue(value))
|
||||
key
|
||||
}
|
||||
}
|
||||
|
|
@ -143,34 +154,39 @@ MapStorageBackend[Array[Byte], Array[Byte]] with
|
|||
}
|
||||
|
||||
def putMapKeys(name: String, keys: SortedSet[Array[Byte]]) = {
|
||||
mapKeyClient.put(name, SortedSetSerializer.toBytes(keys))
|
||||
mapClient.put(getKey(name, mapKeysIndex), SortedSetSerializer.toBytes(keys))
|
||||
}
|
||||
|
||||
def getMapKeys(name: String): SortedSet[Array[Byte]] = {
|
||||
SortedSetSerializer.fromBytes(mapKeyClient.getValue(name, Array.empty[Byte]))
|
||||
SortedSetSerializer.fromBytes(mapClient.getValue(getKey(name, mapKeysIndex), Array.empty[Byte]))
|
||||
}
|
||||
|
||||
|
||||
def getVectorStorageSizeFor(name: String): Int = {
|
||||
IntSerializer.fromBytes(vectorSizeClient.getValue(name, IntSerializer.toBytes(0)))
|
||||
IntSerializer.fromBytes(vectorClient.getValue(getKey(name, vectorSizeIndex), IntSerializer.toBytes(0)))
|
||||
}
|
||||
|
||||
|
||||
def getVectorStorageRangeFor(name: String, start: Option[Int], finish: Option[Int], count: Int): List[Array[Byte]] = {
|
||||
val size = getVectorStorageSizeFor(name)
|
||||
val st = start.getOrElse(0)
|
||||
val cnt =
|
||||
var cnt =
|
||||
if (finish.isDefined) {
|
||||
val f = finish.get
|
||||
if (f >= st) (f - st) else count
|
||||
} else {
|
||||
count
|
||||
}
|
||||
val seq: IndexedSeq[Array[Byte]] = (st until st + cnt).map {
|
||||
index => getVectorValueKey(name, index)
|
||||
if (cnt > (size - st)) {
|
||||
cnt = size - st
|
||||
}
|
||||
|
||||
val all: JMap[Array[Byte], Versioned[Array[Byte]]] = vectorValueClient.getAll(JavaConversions.asIterable(seq))
|
||||
|
||||
val seq: IndexedSeq[Array[Byte]] = (st until st + cnt).map {
|
||||
index => getIndexedKey(name, (size - 1) - index)
|
||||
} //read backwards
|
||||
|
||||
val all: JMap[Array[Byte], Versioned[Array[Byte]]] = vectorClient.getAll(JavaConversions.asIterable(seq))
|
||||
|
||||
var storage = new ArrayBuffer[Array[Byte]](seq.size)
|
||||
storage = storage.padTo(seq.size, Array.empty[Byte])
|
||||
|
|
@ -189,14 +205,23 @@ MapStorageBackend[Array[Byte], Array[Byte]] with
|
|||
|
||||
|
||||
def getVectorStorageEntryFor(name: String, index: Int): Array[Byte] = {
|
||||
vectorValueClient.getValue(getVectorValueKey(name, index), Array.empty[Byte])
|
||||
val size = getVectorStorageSizeFor(name)
|
||||
if (size > 0 && index < size) {
|
||||
vectorClient.getValue(getIndexedKey(name, /*read backwards*/ (size - 1) - index))
|
||||
} else {
|
||||
throw new StorageException("In Vector:" + name + " No such Index:" + index)
|
||||
}
|
||||
}
|
||||
|
||||
def updateVectorStorageEntryFor(name: String, index: Int, elem: Array[Byte]) = {
|
||||
val size = getVectorStorageSizeFor(name)
|
||||
vectorValueClient.put(getVectorValueKey(name, index), elem)
|
||||
if (size < index + 1) {
|
||||
vectorSizeClient.put(name, IntSerializer.toBytes(index + 1))
|
||||
if (size > 0 && index < size) {
|
||||
elem match {
|
||||
case null => vectorClient.delete(getIndexedKey(name, /*read backwards*/ (size - 1) - index))
|
||||
case _ => vectorClient.put(getIndexedKey(name, /*read backwards*/ (size - 1) - index), elem)
|
||||
}
|
||||
} else {
|
||||
throw new StorageException("In Vector:" + name + " No such Index:" + index)
|
||||
}
|
||||
}
|
||||
|
||||
|
|
@ -204,10 +229,12 @@ MapStorageBackend[Array[Byte], Array[Byte]] with
|
|||
var size = getVectorStorageSizeFor(name)
|
||||
elements.foreach {
|
||||
element =>
|
||||
vectorValueClient.put(getVectorValueKey(name, size), element)
|
||||
if (element != null) {
|
||||
vectorClient.put(getIndexedKey(name, size), element)
|
||||
}
|
||||
size += 1
|
||||
}
|
||||
vectorSizeClient.put(name, IntSerializer.toBytes(size))
|
||||
vectorClient.put(getKey(name, vectorSizeIndex), IntSerializer.toBytes(size))
|
||||
}
|
||||
|
||||
def insertVectorStorageEntryFor(name: String, element: Array[Byte]) = {
|
||||
|
|
@ -215,11 +242,88 @@ MapStorageBackend[Array[Byte], Array[Byte]] with
|
|||
}
|
||||
|
||||
|
||||
def remove(name: String): Boolean = {
|
||||
val mdata = getQueueMetadata(name)
|
||||
mdata.getActiveIndexes foreach {
|
||||
index =>
|
||||
queueClient.delete(getIndexedKey(name, index))
|
||||
}
|
||||
queueClient.delete(getKey(name, queueHeadIndex))
|
||||
queueClient.delete(getKey(name, queueTailIndex))
|
||||
}
|
||||
|
||||
def peek(name: String, start: Int, count: Int): List[Array[Byte]] = {
|
||||
val mdata = getQueueMetadata(name)
|
||||
val ret = mdata.getPeekIndexes(start, count).toList map {
|
||||
index: Int => {
|
||||
log.debug("peeking:" + index)
|
||||
queueClient.getValue(getIndexedKey(name, index))
|
||||
}
|
||||
}
|
||||
ret
|
||||
}
|
||||
|
||||
def size(name: String): Int = {
|
||||
getQueueMetadata(name).size
|
||||
}
|
||||
|
||||
def dequeue(name: String): Option[Array[Byte]] = {
|
||||
val mdata = getQueueMetadata(name)
|
||||
if (mdata.canDequeue) {
|
||||
val key = getIndexedKey(name, mdata.head)
|
||||
try {
|
||||
val dequeued = queueClient.getValue(key)
|
||||
queueClient.put(getKey(name, queueHeadIndex), IntSerializer.toBytes(mdata.nextDequeue))
|
||||
Some(dequeued)
|
||||
}
|
||||
finally {
|
||||
try {
|
||||
queueClient.delete(key)
|
||||
} catch {
|
||||
//a failure to delete is ok, just leaves a K-V in Voldemort that will be overwritten if the queue ever wraps around
|
||||
case e: Exception => log.warn(e, "caught an exception while deleting a dequeued element, however this will not cause any inconsistency in the queue")
|
||||
}
|
||||
}
|
||||
} else {
|
||||
None
|
||||
}
|
||||
}
|
||||
|
||||
def enqueue(name: String, item: Array[Byte]): Option[Int] = {
|
||||
val mdata = getQueueMetadata(name)
|
||||
if (mdata.canEnqueue) {
|
||||
val key = getIndexedKey(name, mdata.tail)
|
||||
item match {
|
||||
case null => queueClient.delete(key)
|
||||
case _ => queueClient.put(key, item)
|
||||
}
|
||||
queueClient.put(getKey(name, queueTailIndex), IntSerializer.toBytes(mdata.nextEnqueue))
|
||||
Some(mdata.size + 1)
|
||||
} else {
|
||||
None
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
def getQueueMetadata(name: String): QueueMetadata = {
|
||||
val keys = List(getKey(name, queueHeadIndex), getKey(name, queueTailIndex))
|
||||
val qdata = JavaConversions.asMap(queueClient.getAll(JavaConversions.asIterable(keys)))
|
||||
val values = keys.map {
|
||||
qdata.get(_) match {
|
||||
case Some(versioned) => IntSerializer.fromBytes(versioned.getValue)
|
||||
case None => 0
|
||||
}
|
||||
}
|
||||
QueueMetadata(values.head, values.tail.head)
|
||||
}
|
||||
|
||||
/**
|
||||
* Concat the ownerlenght+owner+key+ of owner so owned data will be colocated
|
||||
* Store the length of owner as first byte to work around the rare case
|
||||
* where ownerbytes1 + keybytes1 == ownerbytes2 + keybytes2 but ownerbytes1 != ownerbytes2
|
||||
*/
|
||||
|
||||
|
||||
def getKey(owner: String, key: Array[Byte]): Array[Byte] = {
|
||||
val ownerBytes: Array[Byte] = owner.getBytes("UTF-8")
|
||||
val ownerLenghtBytes: Array[Byte] = IntSerializer.toBytes(owner.length)
|
||||
|
|
@ -230,12 +334,16 @@ MapStorageBackend[Array[Byte], Array[Byte]] with
|
|||
theKey
|
||||
}
|
||||
|
||||
def getVectorValueKey(owner: String, index: Int): Array[Byte] = {
|
||||
def getIndexedBytes(index: Int): Array[Byte] = {
|
||||
val indexbytes = IntSerializer.toBytes(index)
|
||||
val theIndexKey = new Array[Byte](underscoreBytesUTF8.length + indexbytes.length)
|
||||
System.arraycopy(underscoreBytesUTF8, 0, theIndexKey, 0, underscoreBytesUTF8.length)
|
||||
System.arraycopy(indexbytes, 0, theIndexKey, underscoreBytesUTF8.length, indexbytes.length)
|
||||
getKey(owner, theIndexKey)
|
||||
theIndexKey
|
||||
}
|
||||
|
||||
def getIndexedKey(owner: String, index: Int): Array[Byte] = {
|
||||
getKey(owner, getIndexedBytes(index))
|
||||
}
|
||||
|
||||
def getIndexFromVectorValueKey(owner: String, key: Array[Byte]): Int = {
|
||||
|
|
@ -244,6 +352,39 @@ MapStorageBackend[Array[Byte], Array[Byte]] with
|
|||
IntSerializer.fromBytes(indexBytes)
|
||||
}
|
||||
|
||||
def getMapKeyFromKey(owner: String, key: Array[Byte]): Array[Byte] = {
|
||||
val mapKeyLength = key.length - IntSerializer.bytesPerInt - owner.getBytes("UTF-8").length
|
||||
val mapkey = new Array[Byte](mapKeyLength)
|
||||
System.arraycopy(key, key.length - mapKeyLength, mapkey, 0, mapKeyLength)
|
||||
mapkey
|
||||
}
|
||||
|
||||
//wrapper for null
|
||||
def getStoredMapValue(value: Array[Byte]): Array[Byte] = {
|
||||
value match {
|
||||
case null => nullMapValue
|
||||
case value => {
|
||||
val stored = new Array[Byte](value.length + 1)
|
||||
stored(0) = notNullMapValueHeader
|
||||
System.arraycopy(value, 0, stored, 1, value.length)
|
||||
stored
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
def getMapValueFromStored(value: Array[Byte]): Array[Byte] = {
|
||||
|
||||
if (value(0) == nullMapValueHeader) {
|
||||
null
|
||||
} else if (value(0) == notNullMapValueHeader) {
|
||||
val returned = new Array[Byte](value.length - 1)
|
||||
System.arraycopy(value, 1, returned, 0, value.length - 1)
|
||||
returned
|
||||
} else {
|
||||
throw new StorageException("unknown header byte on map value:" + value(0))
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
def getClientConfig(configMap: Map[String, String]): Properties = {
|
||||
val properites = new Properties
|
||||
|
|
@ -256,7 +397,7 @@ MapStorageBackend[Array[Byte], Array[Byte]] with
|
|||
}
|
||||
|
||||
def initStoreClients() = {
|
||||
if (storeClientFactory != null) {
|
||||
if (storeClientFactory ne null) {
|
||||
storeClientFactory.close
|
||||
}
|
||||
|
||||
|
|
@ -270,10 +411,63 @@ MapStorageBackend[Array[Byte], Array[Byte]] with
|
|||
}
|
||||
}
|
||||
refClient = storeClientFactory.getStoreClient(refStore)
|
||||
mapKeyClient = storeClientFactory.getStoreClient(mapKeyStore)
|
||||
mapValueClient = storeClientFactory.getStoreClient(mapValueStore)
|
||||
vectorSizeClient = storeClientFactory.getStoreClient(vectorSizeStore)
|
||||
vectorValueClient = storeClientFactory.getStoreClient(vectorValueStore)
|
||||
mapClient = storeClientFactory.getStoreClient(mapStore)
|
||||
vectorClient = storeClientFactory.getStoreClient(vectorStore)
|
||||
queueClient = storeClientFactory.getStoreClient(queueStore)
|
||||
}
|
||||
|
||||
|
||||
case class QueueMetadata(head: Int, tail: Int) {
|
||||
//queue is an sequence with indexes from 0 to Int.MAX_VALUE
|
||||
//wraps around when one pointer gets to max value
|
||||
//head has an element in it.
|
||||
//tail is the next slot to write to.
|
||||
def size = {
|
||||
if (tail >= head) {
|
||||
tail - head
|
||||
} else {
|
||||
//queue has wrapped
|
||||
(Integer.MAX_VALUE - head) + (tail + 1)
|
||||
}
|
||||
}
|
||||
|
||||
def canEnqueue = {
|
||||
//the -1 stops the tail from catching the head on a wrap around
|
||||
size < Integer.MAX_VALUE - 1
|
||||
}
|
||||
|
||||
def canDequeue = {size > 0}
|
||||
|
||||
def getActiveIndexes(): IndexedSeq[Int] = {
|
||||
if (tail >= head) {
|
||||
Range(head, tail)
|
||||
} else {
|
||||
//queue has wrapped
|
||||
val headRange = Range.inclusive(head, Integer.MAX_VALUE)
|
||||
(if (tail > 0) {headRange ++ Range(0, tail)} else {headRange})
|
||||
}
|
||||
}
|
||||
|
||||
def getPeekIndexes(start: Int, count: Int): IndexedSeq[Int] = {
|
||||
val indexes = getActiveIndexes
|
||||
if (indexes.size < start)
|
||||
{IndexedSeq.empty[Int]} else
|
||||
{indexes.drop(start).take(count)}
|
||||
}
|
||||
|
||||
def nextEnqueue = {
|
||||
tail match {
|
||||
case Integer.MAX_VALUE => 0
|
||||
case _ => tail + 1
|
||||
}
|
||||
}
|
||||
|
||||
def nextDequeue = {
|
||||
head match {
|
||||
case Integer.MAX_VALUE => 0
|
||||
case _ => head + 1
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
object IntSerializer {
|
||||
|
|
@ -309,6 +503,8 @@ MapStorageBackend[Array[Byte], Array[Byte]] with
|
|||
}
|
||||
|
||||
def fromBytes(bytes: Array[Byte]): SortedSet[Array[Byte]] = {
|
||||
import se.scalablesolutions.akka.persistence.common.PersistentMapBinary.COrdering._
|
||||
|
||||
var set = new TreeSet[Array[Byte]]
|
||||
if (bytes.length > IntSerializer.bytesPerInt) {
|
||||
var pos = 0
|
||||
|
|
|
|||
|
|
@ -15,9 +15,9 @@
|
|||
<value-serializer>
|
||||
<type>identity</type>
|
||||
</value-serializer>
|
||||
</store>
|
||||
</store>
|
||||
<store>
|
||||
<name>MapValues</name>
|
||||
<name>Maps</name>
|
||||
<replication-factor>1</replication-factor>
|
||||
<preferred-reads>1</preferred-reads>
|
||||
<required-reads>1</required-reads>
|
||||
|
|
@ -33,24 +33,7 @@
|
|||
</value-serializer>
|
||||
</store>
|
||||
<store>
|
||||
<name>MapKeys</name>
|
||||
<replication-factor>1</replication-factor>
|
||||
<preferred-reads>1</preferred-reads>
|
||||
<required-reads>1</required-reads>
|
||||
<preferred-writes>1</preferred-writes>
|
||||
<required-writes>1</required-writes>
|
||||
<persistence>memory</persistence>
|
||||
<routing>client</routing>
|
||||
<key-serializer>
|
||||
<type>string</type>
|
||||
<schema-info>utf8</schema-info>
|
||||
</key-serializer>
|
||||
<value-serializer>
|
||||
<type>identity</type>
|
||||
</value-serializer>
|
||||
</store>
|
||||
<store>
|
||||
<name>VectorValues</name>
|
||||
<name>Vectors</name>
|
||||
<replication-factor>1</replication-factor>
|
||||
<preferred-reads>1</preferred-reads>
|
||||
<required-reads>1</required-reads>
|
||||
|
|
@ -66,7 +49,7 @@
|
|||
</value-serializer>
|
||||
</store>
|
||||
<store>
|
||||
<name>VectorSizes</name>
|
||||
<name>Queues</name>
|
||||
<replication-factor>1</replication-factor>
|
||||
<preferred-reads>1</preferred-reads>
|
||||
<required-reads>1</required-reads>
|
||||
|
|
@ -75,11 +58,11 @@
|
|||
<persistence>memory</persistence>
|
||||
<routing>client</routing>
|
||||
<key-serializer>
|
||||
<type>string</type>
|
||||
<schema-info>utf8</schema-info>
|
||||
<type>identity</type>
|
||||
</key-serializer>
|
||||
<value-serializer>
|
||||
<type>identity</type>
|
||||
</value-serializer>
|
||||
</store>
|
||||
|
||||
</stores>
|
||||
|
|
@ -1,20 +1,20 @@
|
|||
package se.scalablesolutions.akka.persistence.voldemort
|
||||
|
||||
import org.scalatest.matchers.ShouldMatchers
|
||||
import voldemort.server.{VoldemortServer, VoldemortConfig}
|
||||
import org.scalatest.{Suite, BeforeAndAfterAll, FunSuite}
|
||||
import org.scalatest.{Suite, BeforeAndAfterAll}
|
||||
import org.junit.runner.RunWith
|
||||
import org.scalatest.junit.JUnitRunner
|
||||
import voldemort.utils.Utils
|
||||
import java.io.File
|
||||
import se.scalablesolutions.akka.util.{Logging}
|
||||
import collection.JavaConversions
|
||||
import voldemort.store.memory.InMemoryStorageConfiguration
|
||||
import voldemort.client.protocol.admin.{AdminClientConfig, AdminClient}
|
||||
|
||||
|
||||
@RunWith(classOf[JUnitRunner])
|
||||
trait EmbeddedVoldemort extends BeforeAndAfterAll with Logging {
|
||||
this: Suite =>
|
||||
var server: VoldemortServer = null
|
||||
var admin: AdminClient = null
|
||||
|
||||
override protected def beforeAll(): Unit = {
|
||||
|
||||
|
|
@ -28,6 +28,7 @@ trait EmbeddedVoldemort extends BeforeAndAfterAll with Logging {
|
|||
server = new VoldemortServer(config)
|
||||
server.start
|
||||
VoldemortStorageBackend.initStoreClients
|
||||
admin = new AdminClient(VoldemortStorageBackend.clientConfig.getProperty(VoldemortStorageBackend.bootstrapUrlsProp), new AdminClientConfig)
|
||||
log.info("Started")
|
||||
} catch {
|
||||
case e => log.error(e, "Error Starting Voldemort")
|
||||
|
|
@ -36,6 +37,7 @@ trait EmbeddedVoldemort extends BeforeAndAfterAll with Logging {
|
|||
}
|
||||
|
||||
override protected def afterAll(): Unit = {
|
||||
admin.stop
|
||||
server.stop
|
||||
}
|
||||
}
|
||||
|
|
@ -108,12 +108,11 @@ Spec with
|
|||
override def beforeEach {
|
||||
removeMapStorageFor(state)
|
||||
var size = getVectorStorageSizeFor(tx)
|
||||
(0 to size).foreach {
|
||||
(-1 to size).foreach {
|
||||
index => {
|
||||
vectorValueClient.delete(getVectorValueKey(tx, index))
|
||||
vectorClient.delete(getIndexedKey(tx, index))
|
||||
}
|
||||
}
|
||||
vectorSizeClient.delete(tx)
|
||||
}
|
||||
|
||||
override def afterEach {
|
||||
|
|
|
|||
|
|
@ -1,87 +0,0 @@
|
|||
package se.scalablesolutions.akka.persistence.voldemort
|
||||
|
||||
import org.scalatest.FunSuite
|
||||
import org.scalatest.matchers.ShouldMatchers
|
||||
import org.junit.runner.RunWith
|
||||
import org.scalatest.junit.JUnitRunner
|
||||
import se.scalablesolutions.akka.persistence.voldemort.VoldemortStorageBackend._
|
||||
import se.scalablesolutions.akka.actor.{newUuid,Uuid}
|
||||
import collection.immutable.TreeSet
|
||||
import VoldemortStorageBackendSuite._
|
||||
|
||||
import se.scalablesolutions.akka.stm._
|
||||
import se.scalablesolutions.akka.stm.global._
|
||||
import se.scalablesolutions.akka.config.ScalaConfig._
|
||||
import se.scalablesolutions.akka.persistence.common._
|
||||
import se.scalablesolutions.akka.util.Logging
|
||||
import se.scalablesolutions.akka.config.Config.config
|
||||
|
||||
@RunWith(classOf[JUnitRunner])
|
||||
class VoldemortPersistentDatastructureSuite extends FunSuite with ShouldMatchers with EmbeddedVoldemort with Logging {
|
||||
test("persistentRefs work as expected") {
|
||||
val name = newUuid.toString
|
||||
val one = "one".getBytes
|
||||
atomic {
|
||||
val ref = VoldemortStorage.getRef(name)
|
||||
ref.isDefined should be(false)
|
||||
ref.swap(one)
|
||||
ref.get match {
|
||||
case Some(bytes) => bytes should be(one)
|
||||
case None => true should be(false)
|
||||
}
|
||||
}
|
||||
val two = "two".getBytes
|
||||
atomic {
|
||||
val ref = VoldemortStorage.getRef(name)
|
||||
ref.isDefined should be(true)
|
||||
ref.swap(two)
|
||||
ref.get match {
|
||||
case Some(bytes) => bytes should be(two)
|
||||
case None => true should be(false)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
test("Persistent Vectors function as expected") {
|
||||
val name = newUuid.toString
|
||||
val one = "one".getBytes
|
||||
val two = "two".getBytes
|
||||
atomic {
|
||||
val vec = VoldemortStorage.getVector(name)
|
||||
vec.add(one)
|
||||
}
|
||||
atomic {
|
||||
val vec = VoldemortStorage.getVector(name)
|
||||
vec.size should be(1)
|
||||
vec.add(two)
|
||||
}
|
||||
atomic {
|
||||
val vec = VoldemortStorage.getVector(name)
|
||||
|
||||
vec.get(0) should be(one)
|
||||
vec.get(1) should be(two)
|
||||
vec.size should be(2)
|
||||
vec.update(0, two)
|
||||
}
|
||||
|
||||
atomic {
|
||||
val vec = VoldemortStorage.getVector(name)
|
||||
vec.get(0) should be(two)
|
||||
vec.get(1) should be(two)
|
||||
vec.size should be(2)
|
||||
vec.update(0, Array.empty[Byte])
|
||||
vec.update(1, Array.empty[Byte])
|
||||
}
|
||||
|
||||
atomic {
|
||||
val vec = VoldemortStorage.getVector(name)
|
||||
vec.get(0) should be(Array.empty[Byte])
|
||||
vec.get(1) should be(Array.empty[Byte])
|
||||
vec.size should be(2)
|
||||
}
|
||||
|
||||
|
||||
}
|
||||
|
||||
}
|
||||
|
|
@ -0,0 +1,49 @@
|
|||
package se.scalablesolutions.akka.persistence.voldemort
|
||||
|
||||
|
||||
import org.junit.runner.RunWith
|
||||
import org.scalatest.junit.JUnitRunner
|
||||
import se.scalablesolutions.akka.persistence.common.{QueueStorageBackendTest, VectorStorageBackendTest, MapStorageBackendTest, RefStorageBackendTest}
|
||||
|
||||
@RunWith(classOf[JUnitRunner])
|
||||
class VoldemortRefStorageBackendTest extends RefStorageBackendTest with EmbeddedVoldemort {
|
||||
def dropRefs = {
|
||||
admin.truncate(0, VoldemortStorageBackend.refStore)
|
||||
}
|
||||
|
||||
|
||||
def storage = VoldemortStorageBackend
|
||||
}
|
||||
|
||||
@RunWith(classOf[JUnitRunner])
|
||||
class VoldemortMapStorageBackendTest extends MapStorageBackendTest with EmbeddedVoldemort {
|
||||
def dropMaps = {
|
||||
admin.truncate(0, VoldemortStorageBackend.mapStore)
|
||||
}
|
||||
|
||||
|
||||
def storage = VoldemortStorageBackend
|
||||
}
|
||||
|
||||
@RunWith(classOf[JUnitRunner])
|
||||
class VoldemortVectorStorageBackendTest extends VectorStorageBackendTest with EmbeddedVoldemort {
|
||||
def dropVectors = {
|
||||
admin.truncate(0, VoldemortStorageBackend.vectorStore)
|
||||
}
|
||||
|
||||
|
||||
def storage = VoldemortStorageBackend
|
||||
}
|
||||
|
||||
|
||||
@RunWith(classOf[JUnitRunner])
|
||||
class VoldemortQueueStorageBackendTest extends QueueStorageBackendTest with EmbeddedVoldemort {
|
||||
def dropQueues = {
|
||||
admin.truncate(0, VoldemortStorageBackend.queueStore)
|
||||
}
|
||||
|
||||
|
||||
def storage = VoldemortStorageBackend
|
||||
}
|
||||
|
||||
|
||||
|
|
@ -8,6 +8,7 @@ import se.scalablesolutions.akka.persistence.voldemort.VoldemortStorageBackend._
|
|||
import se.scalablesolutions.akka.util.{Logging}
|
||||
import collection.immutable.TreeSet
|
||||
import VoldemortStorageBackendSuite._
|
||||
import scala.None
|
||||
|
||||
@RunWith(classOf[JUnitRunner])
|
||||
class VoldemortStorageBackendSuite extends FunSuite with ShouldMatchers with EmbeddedVoldemort with Logging {
|
||||
|
|
@ -34,8 +35,8 @@ class VoldemortStorageBackendSuite extends FunSuite with ShouldMatchers with Emb
|
|||
test("that map key storage and retrieval works") {
|
||||
val key = "testmapKey"
|
||||
val mapKeys = new TreeSet[Array[Byte]] + bytes("key1")
|
||||
mapKeyClient.delete(key)
|
||||
mapKeyClient.getValue(key, SortedSetSerializer.toBytes(emptySet)) should equal(SortedSetSerializer.toBytes(emptySet))
|
||||
mapClient.delete(getKey(key, mapKeysIndex))
|
||||
mapClient.getValue(getKey(key, mapKeysIndex), SortedSetSerializer.toBytes(emptySet)) should equal(SortedSetSerializer.toBytes(emptySet))
|
||||
putMapKeys(key, mapKeys)
|
||||
getMapKeys(key) should equal(mapKeys)
|
||||
}
|
||||
|
|
@ -43,8 +44,8 @@ class VoldemortStorageBackendSuite extends FunSuite with ShouldMatchers with Emb
|
|||
test("that map value storage and retrieval works") {
|
||||
val key = bytes("keyForTestingMapValueClient")
|
||||
val value = bytes("value for testing map value client")
|
||||
mapValueClient.put(key, value)
|
||||
mapValueClient.getValue(key, empty) should equal(value)
|
||||
mapClient.put(key, value)
|
||||
mapClient.getValue(key, empty) should equal(value)
|
||||
}
|
||||
|
||||
|
||||
|
|
@ -82,38 +83,27 @@ class VoldemortStorageBackendSuite extends FunSuite with ShouldMatchers with Emb
|
|||
|
||||
}
|
||||
|
||||
test("that vector size storage and retrieval works") {
|
||||
val key = "vectorKey"
|
||||
val size = IntSerializer.toBytes(17)
|
||||
vectorSizeClient.delete(key)
|
||||
vectorSizeClient.getValue(key, empty) should equal(empty)
|
||||
vectorSizeClient.put(key, size)
|
||||
vectorSizeClient.getValue(key) should equal(size)
|
||||
}
|
||||
|
||||
test("that vector value storage and retrieval works") {
|
||||
val key = "vectorValueKey"
|
||||
val index = 3
|
||||
val value = bytes("some bytes")
|
||||
val vecKey = getVectorValueKey(key, index)
|
||||
val vecKey = getIndexedKey(key, index)
|
||||
getIndexFromVectorValueKey(key, vecKey) should be(index)
|
||||
vectorValueClient.delete(vecKey)
|
||||
vectorValueClient.getValue(vecKey, empty) should equal(empty)
|
||||
vectorValueClient.put(vecKey, value)
|
||||
vectorValueClient.getValue(vecKey) should equal(value)
|
||||
vectorClient.delete(vecKey)
|
||||
vectorClient.getValue(vecKey, empty) should equal(empty)
|
||||
vectorClient.put(vecKey, value)
|
||||
vectorClient.getValue(vecKey) should equal(value)
|
||||
}
|
||||
|
||||
test("PersistentVector apis function as expected") {
|
||||
val key = "vectorApiKey"
|
||||
val value = bytes("Some bytes we want to store in a vector")
|
||||
val updatedValue = bytes("Some updated bytes we want to store in a vector")
|
||||
vectorSizeClient.delete(key)
|
||||
vectorValueClient.delete(getVectorValueKey(key, 0))
|
||||
vectorValueClient.delete(getVectorValueKey(key, 1))
|
||||
getVectorStorageEntryFor(key, 0) should be(empty)
|
||||
getVectorStorageEntryFor(key, 1) should be(empty)
|
||||
getVectorStorageRangeFor(key, None, None, 1).head should be(empty)
|
||||
|
||||
vectorClient.delete(getKey(key, vectorSizeIndex))
|
||||
vectorClient.delete(getIndexedKey(key, 0))
|
||||
vectorClient.delete(getIndexedKey(key, 1))
|
||||
|
||||
insertVectorStorageEntryFor(key, value)
|
||||
//again
|
||||
insertVectorStorageEntryFor(key, value)
|
||||
|
|
@ -134,6 +124,44 @@ class VoldemortStorageBackendSuite extends FunSuite with ShouldMatchers with Emb
|
|||
|
||||
}
|
||||
|
||||
test("Persistent Queue apis function as expected") {
|
||||
val key = "queueApiKey"
|
||||
val value = bytes("some bytes even")
|
||||
val valueOdd = bytes("some bytes odd")
|
||||
|
||||
remove(key)
|
||||
VoldemortStorageBackend.size(key) should be(0)
|
||||
enqueue(key, value) should be(Some(1))
|
||||
VoldemortStorageBackend.size(key) should be(1)
|
||||
enqueue(key, valueOdd) should be(Some(2))
|
||||
VoldemortStorageBackend.size(key) should be(2)
|
||||
peek(key, 0, 1)(0) should be(value)
|
||||
peek(key, 1, 1)(0) should be(valueOdd)
|
||||
dequeue(key).get should be(value)
|
||||
VoldemortStorageBackend.size(key) should be(1)
|
||||
dequeue(key).get should be(valueOdd)
|
||||
VoldemortStorageBackend.size(key) should be(0)
|
||||
dequeue(key) should be(None)
|
||||
queueClient.put(getKey(key, queueHeadIndex), IntSerializer.toBytes(Integer.MAX_VALUE))
|
||||
queueClient.put(getKey(key, queueTailIndex), IntSerializer.toBytes(Integer.MAX_VALUE))
|
||||
VoldemortStorageBackend.size(key) should be(0)
|
||||
enqueue(key, value) should be(Some(1))
|
||||
VoldemortStorageBackend.size(key) should be(1)
|
||||
enqueue(key, valueOdd) should be(Some(2))
|
||||
VoldemortStorageBackend.size(key) should be(2)
|
||||
peek(key, 0, 1)(0) should be(value)
|
||||
peek(key, 1, 1)(0) should be(valueOdd)
|
||||
dequeue(key).get should be(value)
|
||||
VoldemortStorageBackend.size(key) should be(1)
|
||||
dequeue(key).get should be(valueOdd)
|
||||
VoldemortStorageBackend.size(key) should be(0)
|
||||
dequeue(key) should be(None)
|
||||
|
||||
|
||||
}
|
||||
|
||||
|
||||
|
||||
}
|
||||
|
||||
object VoldemortStorageBackendSuite {
|
||||
|
|
|
|||
|
|
@ -0,0 +1,22 @@
|
|||
/**
|
||||
* Copyright (C) 2009-2010 Scalable Solutions AB <http://scalablesolutions.se>
|
||||
*/
|
||||
|
||||
package se.scalablesolutions.akka.persistence.voldemort
|
||||
|
||||
|
||||
import org.junit.runner.RunWith
|
||||
import org.scalatest.junit.JUnitRunner
|
||||
import se.scalablesolutions.akka.persistence.common._
|
||||
|
||||
@RunWith(classOf[JUnitRunner])
|
||||
class VoldemortTicket343Test extends Ticket343Test with EmbeddedVoldemort {
|
||||
def dropMapsAndVectors: Unit = {
|
||||
admin.truncate(0, VoldemortStorageBackend.mapStore)
|
||||
admin.truncate(0, VoldemortStorageBackend.vectorStore)
|
||||
}
|
||||
|
||||
def getVector: (String) => PersistentVector[Array[Byte]] = VoldemortStorage.getVector
|
||||
|
||||
def getMap: (String) => PersistentMap[Array[Byte], Array[Byte]] = VoldemortStorage.getMap
|
||||
}
|
||||
|
|
@ -23,7 +23,7 @@ message RemoteActorRefProtocol {
|
|||
}
|
||||
|
||||
/**
|
||||
* Defines a remote ActorRef that "remembers" and uses its original typed Actor instance
|
||||
* Defines a remote Typed ActorRef that "remembers" and uses its original typed Actor instance
|
||||
* on the original node.
|
||||
*/
|
||||
message RemoteTypedActorRefProtocol {
|
||||
|
|
|
|||
|
|
@ -241,7 +241,7 @@ object Cluster extends Cluster with Logging {
|
|||
Some(Supervisor(
|
||||
SupervisorConfig(
|
||||
RestartStrategy(OneForOne, 5, 1000, List(classOf[Exception])),
|
||||
Supervise(actor, LifeCycle(Permanent)) :: Nil)))
|
||||
Supervise(actor, Permanent) :: Nil)))
|
||||
|
||||
private[this] def clusterActor = if (clusterActorRef.isEmpty) None else Some(clusterActorRef.get.actor.asInstanceOf[ClusterActor])
|
||||
|
||||
|
|
|
|||
|
|
@ -7,7 +7,6 @@ package se.scalablesolutions.akka.remote
|
|||
import se.scalablesolutions.akka.remote.protocol.RemoteProtocol.{ActorType => ActorTypeProtocol, _}
|
||||
import se.scalablesolutions.akka.actor.{Exit, Actor, ActorRef, ActorType, RemoteActorRef, IllegalActorStateException}
|
||||
import se.scalablesolutions.akka.dispatch.{DefaultCompletableFuture, CompletableFuture}
|
||||
import se.scalablesolutions.akka.util.{ListenerManagement, Logging, Duration}
|
||||
import se.scalablesolutions.akka.actor.{Uuid,newUuid,uuidFrom}
|
||||
import se.scalablesolutions.akka.config.Config._
|
||||
import se.scalablesolutions.akka.serialization.RemoteActorSerialization._
|
||||
|
|
@ -31,6 +30,7 @@ import java.util.concurrent.atomic.AtomicLong
|
|||
import scala.collection.mutable.{HashSet, HashMap}
|
||||
import scala.reflect.BeanProperty
|
||||
import se.scalablesolutions.akka.actor._
|
||||
import se.scalablesolutions.akka.util._
|
||||
|
||||
/**
|
||||
* Life-cycle events for RemoteClient.
|
||||
|
|
@ -63,7 +63,7 @@ object RemoteClient extends Logging {
|
|||
val RECONNECT_DELAY = Duration(config.getInt("akka.remote.client.reconnect-delay", 5), TIME_UNIT)
|
||||
|
||||
private val remoteClients = new HashMap[String, RemoteClient]
|
||||
private val remoteActors = new HashMap[RemoteServer.Address, HashSet[Uuid]]
|
||||
private val remoteActors = new HashMap[Address, HashSet[Uuid]]
|
||||
|
||||
def actorFor(classNameOrServiceId: String, hostname: String, port: Int): ActorRef =
|
||||
actorFor(classNameOrServiceId, classNameOrServiceId, 5000L, hostname, port, None)
|
||||
|
|
@ -163,16 +163,16 @@ object RemoteClient extends Logging {
|
|||
}
|
||||
|
||||
def register(hostname: String, port: Int, uuid: Uuid) = synchronized {
|
||||
actorsFor(RemoteServer.Address(hostname, port)) += uuid
|
||||
actorsFor(Address(hostname, port)) += uuid
|
||||
}
|
||||
|
||||
private[akka] def unregister(hostname: String, port: Int, uuid: Uuid) = synchronized {
|
||||
val set = actorsFor(RemoteServer.Address(hostname, port))
|
||||
val set = actorsFor(Address(hostname, port))
|
||||
set -= uuid
|
||||
if (set.isEmpty) shutdownClientFor(new InetSocketAddress(hostname, port))
|
||||
}
|
||||
|
||||
private[akka] def actorsFor(remoteServerAddress: RemoteServer.Address): HashSet[Uuid] = {
|
||||
private[akka] def actorsFor(remoteServerAddress: Address): HashSet[Uuid] = {
|
||||
val set = remoteActors.get(remoteServerAddress)
|
||||
if (set.isDefined && (set.get ne null)) set.get
|
||||
else {
|
||||
|
|
@ -200,56 +200,52 @@ class RemoteClient private[akka] (
|
|||
private val remoteAddress = new InetSocketAddress(hostname, port)
|
||||
|
||||
//FIXME rewrite to a wrapper object (minimize volatile access and maximize encapsulation)
|
||||
@volatile private[remote] var isRunning = false
|
||||
@volatile private var bootstrap: ClientBootstrap = _
|
||||
@volatile private[remote] var connection: ChannelFuture = _
|
||||
@volatile private[remote] var openChannels: DefaultChannelGroup = _
|
||||
@volatile private var timer: HashedWheelTimer = _
|
||||
private[remote] val runSwitch = new Switch()
|
||||
|
||||
private[remote] def isRunning = runSwitch.isOn
|
||||
|
||||
private val reconnectionTimeWindow = Duration(config.getInt(
|
||||
"akka.remote.client.reconnection-time-window", 600), TIME_UNIT).toMillis
|
||||
@volatile private var reconnectionTimeWindowStart = 0L
|
||||
|
||||
def connect = synchronized {
|
||||
if (!isRunning) {
|
||||
openChannels = new DefaultChannelGroup(classOf[RemoteClient].getName)
|
||||
timer = new HashedWheelTimer
|
||||
bootstrap = new ClientBootstrap(
|
||||
new NioClientSocketChannelFactory(
|
||||
Executors.newCachedThreadPool,Executors.newCachedThreadPool
|
||||
)
|
||||
def connect = runSwitch switchOn {
|
||||
openChannels = new DefaultChannelGroup(classOf[RemoteClient].getName)
|
||||
timer = new HashedWheelTimer
|
||||
bootstrap = new ClientBootstrap(
|
||||
new NioClientSocketChannelFactory(
|
||||
Executors.newCachedThreadPool,Executors.newCachedThreadPool
|
||||
)
|
||||
bootstrap.setPipelineFactory(new RemoteClientPipelineFactory(name, futures, supervisors, bootstrap, remoteAddress, timer, this))
|
||||
bootstrap.setOption("tcpNoDelay", true)
|
||||
bootstrap.setOption("keepAlive", true)
|
||||
connection = bootstrap.connect(remoteAddress)
|
||||
log.info("Starting remote client connection to [%s:%s]", hostname, port)
|
||||
// Wait until the connection attempt succeeds or fails.
|
||||
val channel = connection.awaitUninterruptibly.getChannel
|
||||
openChannels.add(channel)
|
||||
if (!connection.isSuccess) {
|
||||
notifyListeners(RemoteClientError(connection.getCause, this))
|
||||
log.error(connection.getCause, "Remote client connection to [%s:%s] has failed", hostname, port)
|
||||
}
|
||||
notifyListeners(RemoteClientStarted(this))
|
||||
isRunning = true
|
||||
)
|
||||
bootstrap.setPipelineFactory(new RemoteClientPipelineFactory(name, futures, supervisors, bootstrap, remoteAddress, timer, this))
|
||||
bootstrap.setOption("tcpNoDelay", true)
|
||||
bootstrap.setOption("keepAlive", true)
|
||||
connection = bootstrap.connect(remoteAddress)
|
||||
log.info("Starting remote client connection to [%s:%s]", hostname, port)
|
||||
// Wait until the connection attempt succeeds or fails.
|
||||
val channel = connection.awaitUninterruptibly.getChannel
|
||||
openChannels.add(channel)
|
||||
if (!connection.isSuccess) {
|
||||
notifyListeners(RemoteClientError(connection.getCause, this))
|
||||
log.error(connection.getCause, "Remote client connection to [%s:%s] has failed", hostname, port)
|
||||
}
|
||||
notifyListeners(RemoteClientStarted(this))
|
||||
}
|
||||
|
||||
def shutdown = synchronized {
|
||||
def shutdown = runSwitch switchOff {
|
||||
log.info("Shutting down %s", name)
|
||||
if (isRunning) {
|
||||
isRunning = false
|
||||
notifyListeners(RemoteClientShutdown(this))
|
||||
timer.stop
|
||||
timer = null
|
||||
openChannels.close.awaitUninterruptibly
|
||||
openChannels = null
|
||||
bootstrap.releaseExternalResources
|
||||
bootstrap = null
|
||||
connection = null
|
||||
log.info("%s has been shut down", name)
|
||||
}
|
||||
notifyListeners(RemoteClientShutdown(this))
|
||||
timer.stop
|
||||
timer = null
|
||||
openChannels.close.awaitUninterruptibly
|
||||
openChannels = null
|
||||
bootstrap.releaseExternalResources
|
||||
bootstrap = null
|
||||
connection = null
|
||||
log.info("%s has been shut down", name)
|
||||
}
|
||||
|
||||
@deprecated("Use addListener instead")
|
||||
|
|
@ -423,7 +419,7 @@ class RemoteClientHandler(
|
|||
}
|
||||
}
|
||||
|
||||
override def channelClosed(ctx: ChannelHandlerContext, event: ChannelStateEvent) = if (client.isRunning) {
|
||||
override def channelClosed(ctx: ChannelHandlerContext, event: ChannelStateEvent) = client.runSwitch ifOn {
|
||||
if (client.isWithinReconnectionTimeWindow) {
|
||||
timer.newTimeout(new TimerTask() {
|
||||
def run(timeout: Timeout) = {
|
||||
|
|
|
|||
|
|
@ -10,12 +10,13 @@ import java.util.concurrent.{ConcurrentHashMap, Executors}
|
|||
import java.util.{Map => JMap}
|
||||
|
||||
import se.scalablesolutions.akka.actor.{
|
||||
Actor, TypedActor, ActorRef, IllegalActorStateException, RemoteActorSystemMessage,uuidFrom,Uuid}
|
||||
Actor, TypedActor, ActorRef, IllegalActorStateException, RemoteActorSystemMessage, uuidFrom, Uuid, ActorRegistry}
|
||||
import se.scalablesolutions.akka.actor.Actor._
|
||||
import se.scalablesolutions.akka.util._
|
||||
import se.scalablesolutions.akka.remote.protocol.RemoteProtocol._
|
||||
import se.scalablesolutions.akka.remote.protocol.RemoteProtocol.ActorType._
|
||||
import se.scalablesolutions.akka.config.Config._
|
||||
import se.scalablesolutions.akka.dispatch.{DefaultCompletableFuture, CompletableFuture}
|
||||
import se.scalablesolutions.akka.serialization.RemoteActorSerialization
|
||||
import se.scalablesolutions.akka.serialization.RemoteActorSerialization._
|
||||
|
||||
|
|
@ -30,7 +31,6 @@ import org.jboss.netty.handler.ssl.SslHandler
|
|||
|
||||
import scala.collection.mutable.Map
|
||||
import scala.reflect.BeanProperty
|
||||
import se.scalablesolutions.akka.dispatch.{DefaultCompletableFuture, CompletableFuture}
|
||||
|
||||
/**
|
||||
* Use this object if you need a single remote server on a specific node.
|
||||
|
|
@ -66,7 +66,8 @@ object RemoteNode extends RemoteServer
|
|||
*
|
||||
* @author <a href="http://jonasboner.com">Jonas Bonér</a>
|
||||
*/
|
||||
object RemoteServer {
|
||||
object
|
||||
RemoteServer {
|
||||
val UUID_PREFIX = "uuid:"
|
||||
val HOSTNAME = config.getString("akka.remote.server.hostname", "localhost")
|
||||
val PORT = config.getInt("akka.remote.server.port", 9999)
|
||||
|
|
@ -103,44 +104,9 @@ object RemoteServer {
|
|||
} else */false
|
||||
}
|
||||
|
||||
object Address {
|
||||
def apply(hostname: String, port: Int) = new Address(hostname, port)
|
||||
}
|
||||
|
||||
class Address(val hostname: String, val port: Int) {
|
||||
override def hashCode: Int = {
|
||||
var result = HashCode.SEED
|
||||
result = HashCode.hash(result, hostname)
|
||||
result = HashCode.hash(result, port)
|
||||
result
|
||||
}
|
||||
override def equals(that: Any): Boolean = {
|
||||
that != null &&
|
||||
that.isInstanceOf[Address] &&
|
||||
that.asInstanceOf[Address].hostname == hostname &&
|
||||
that.asInstanceOf[Address].port == port
|
||||
}
|
||||
}
|
||||
|
||||
private class RemoteActorSet {
|
||||
private[RemoteServer] val actors = new ConcurrentHashMap[String, ActorRef]
|
||||
private[RemoteServer] val actorsByUuid = new ConcurrentHashMap[String, ActorRef]
|
||||
private[RemoteServer] val typedActors = new ConcurrentHashMap[String, AnyRef]
|
||||
private[RemoteServer] val typedActorsByUuid = new ConcurrentHashMap[String, AnyRef]
|
||||
}
|
||||
|
||||
private val guard = new ReadWriteGuard
|
||||
private val remoteActorSets = Map[Address, RemoteActorSet]()
|
||||
private val remoteServers = Map[Address, RemoteServer]()
|
||||
|
||||
private[akka] def registerActorByUuid(address: InetSocketAddress, uuid: String, actor: ActorRef) = guard.withWriteGuard {
|
||||
actorsFor(RemoteServer.Address(address.getHostName, address.getPort)).actorsByUuid.put(uuid, actor)
|
||||
}
|
||||
|
||||
private[akka] def registerTypedActorByUuid(address: InetSocketAddress, uuid: String, typedActor: AnyRef) = guard.withWriteGuard {
|
||||
actorsFor(RemoteServer.Address(address.getHostName, address.getPort)).typedActors.put(uuid, typedActor)
|
||||
}
|
||||
|
||||
private[akka] def getOrCreateServer(address: InetSocketAddress): RemoteServer = guard.withWriteGuard {
|
||||
serverFor(address) match {
|
||||
case Some(server) => server
|
||||
|
|
@ -162,10 +128,7 @@ object RemoteServer {
|
|||
private[akka] def unregister(hostname: String, port: Int) = guard.withWriteGuard {
|
||||
remoteServers.remove(Address(hostname, port))
|
||||
}
|
||||
|
||||
private def actorsFor(remoteServerAddress: RemoteServer.Address): RemoteActorSet = {
|
||||
remoteActorSets.getOrElseUpdate(remoteServerAddress,new RemoteActorSet)
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
/**
|
||||
|
|
@ -198,7 +161,7 @@ class RemoteServer extends Logging with ListenerManagement {
|
|||
import RemoteServer._
|
||||
def name = "RemoteServer@" + hostname + ":" + port
|
||||
|
||||
private[akka] var address = RemoteServer.Address(RemoteServer.HOSTNAME,RemoteServer.PORT)
|
||||
private[akka] var address = Address(RemoteServer.HOSTNAME,RemoteServer.PORT)
|
||||
|
||||
def hostname = address.hostname
|
||||
def port = address.port
|
||||
|
|
@ -237,7 +200,7 @@ class RemoteServer extends Logging with ListenerManagement {
|
|||
private def start(_hostname: String, _port: Int, loader: Option[ClassLoader]): RemoteServer = synchronized {
|
||||
try {
|
||||
if (!_isRunning) {
|
||||
address = RemoteServer.Address(_hostname,_port)
|
||||
address = Address(_hostname,_port)
|
||||
log.info("Starting remote server at [%s:%s]", hostname, port)
|
||||
RemoteServer.register(hostname, port, this)
|
||||
val pipelineFactory = new RemoteServerPipelineFactory(
|
||||
|
|
@ -380,10 +343,10 @@ class RemoteServer extends Logging with ListenerManagement {
|
|||
|
||||
protected[akka] override def notifyListeners(message: => Any): Unit = super.notifyListeners(message)
|
||||
|
||||
private[akka] def actors() = RemoteServer.actorsFor(address).actors
|
||||
private[akka] def actorsByUuid() = RemoteServer.actorsFor(address).actorsByUuid
|
||||
private[akka] def typedActors() = RemoteServer.actorsFor(address).typedActors
|
||||
private[akka] def typedActorsByUuid() = RemoteServer.actorsFor(address).typedActorsByUuid
|
||||
private[akka] def actors() = ActorRegistry.actors(address)
|
||||
private[akka] def actorsByUuid() = ActorRegistry.actorsByUuid(address)
|
||||
private[akka] def typedActors() = ActorRegistry.typedActors(address)
|
||||
private[akka] def typedActorsByUuid() = ActorRegistry.typedActorsByUuid(address)
|
||||
}
|
||||
|
||||
object RemoteServerSslContext {
|
||||
|
|
@ -610,6 +573,29 @@ class RemoteServerHandler(
|
|||
server.typedActorsByUuid().get(uuid)
|
||||
}
|
||||
|
||||
private def findActorByIdOrUuid(id: String, uuid: String) : ActorRef = {
|
||||
var actorRefOrNull = if (id.startsWith(UUID_PREFIX)) {
|
||||
findActorByUuid(id.substring(UUID_PREFIX.length))
|
||||
} else {
|
||||
findActorById(id)
|
||||
}
|
||||
if (actorRefOrNull eq null) {
|
||||
actorRefOrNull = findActorByUuid(uuid)
|
||||
}
|
||||
actorRefOrNull
|
||||
}
|
||||
|
||||
private def findTypedActorByIdOrUuid(id: String, uuid: String) : AnyRef = {
|
||||
var actorRefOrNull = if (id.startsWith(UUID_PREFIX)) {
|
||||
findTypedActorByUuid(id.substring(UUID_PREFIX.length))
|
||||
} else {
|
||||
findTypedActorById(id)
|
||||
}
|
||||
if (actorRefOrNull eq null) {
|
||||
actorRefOrNull = findTypedActorByUuid(uuid)
|
||||
}
|
||||
actorRefOrNull
|
||||
}
|
||||
|
||||
/**
|
||||
* Creates a new instance of the actor with name, uuid and timeout specified as arguments.
|
||||
|
|
@ -625,11 +611,7 @@ class RemoteServerHandler(
|
|||
val name = actorInfo.getTarget
|
||||
val timeout = actorInfo.getTimeout
|
||||
|
||||
val actorRefOrNull = if (id.startsWith(UUID_PREFIX)) {
|
||||
findActorByUuid(id.substring(UUID_PREFIX.length))
|
||||
} else {
|
||||
findActorById(id)
|
||||
}
|
||||
val actorRefOrNull = findActorByIdOrUuid(id, uuidFrom(uuid.getHigh,uuid.getLow).toString)
|
||||
|
||||
if (actorRefOrNull eq null) {
|
||||
try {
|
||||
|
|
@ -641,7 +623,7 @@ class RemoteServerHandler(
|
|||
actorRef.id = id
|
||||
actorRef.timeout = timeout
|
||||
actorRef.remoteAddress = None
|
||||
server.actors.put(id, actorRef) // register by id
|
||||
server.actorsByUuid.put(actorRef.uuid.toString, actorRef) // register by uuid
|
||||
actorRef
|
||||
} catch {
|
||||
case e =>
|
||||
|
|
@ -656,11 +638,7 @@ class RemoteServerHandler(
|
|||
val uuid = actorInfo.getUuid
|
||||
val id = actorInfo.getId
|
||||
|
||||
val typedActorOrNull = if (id.startsWith(UUID_PREFIX)) {
|
||||
findTypedActorByUuid(id.substring(UUID_PREFIX.length))
|
||||
} else {
|
||||
findTypedActorById(id)
|
||||
}
|
||||
val typedActorOrNull = findTypedActorByIdOrUuid(id, uuidFrom(uuid.getHigh,uuid.getLow).toString)
|
||||
|
||||
if (typedActorOrNull eq null) {
|
||||
val typedActorInfo = actorInfo.getTypedActorInfo
|
||||
|
|
@ -677,7 +655,7 @@ class RemoteServerHandler(
|
|||
|
||||
val newInstance = TypedActor.newInstance(
|
||||
interfaceClass, targetClass.asInstanceOf[Class[_ <: TypedActor]], actorInfo.getTimeout).asInstanceOf[AnyRef]
|
||||
server.typedActors.put(id, newInstance) // register by id
|
||||
server.typedActors.put(uuidFrom(uuid.getHigh,uuid.getLow).toString, newInstance) // register by uuid
|
||||
newInstance
|
||||
} catch {
|
||||
case e =>
|
||||
|
|
|
|||
|
|
@ -9,7 +9,6 @@ import org.codehaus.jackson.map.ObjectMapper
|
|||
import com.google.protobuf.Message
|
||||
|
||||
import reflect.Manifest
|
||||
import sbinary.DefaultProtocol
|
||||
|
||||
import java.io.{StringWriter, ByteArrayOutputStream, ObjectOutputStream}
|
||||
|
||||
|
|
@ -114,7 +113,7 @@ object Serializable {
|
|||
* @author <a href="http://jonasboner.com">Jonas Bonér</a>
|
||||
*/
|
||||
trait ScalaJSON[T] extends JSON {
|
||||
def toJSON: String = new String(toBytes, "UTF-8")
|
||||
def toJSON: String
|
||||
def fromJSON(js: String): T
|
||||
def toBytes: Array[Byte]
|
||||
def fromBytes(bytes: Array[Byte]): T
|
||||
|
|
|
|||
|
|
@ -91,16 +91,10 @@ object ActorSerialization {
|
|||
private[akka] def toSerializedActorRefProtocol[T <: Actor](
|
||||
actorRef: ActorRef, format: Format[T], serializeMailBox: Boolean = true): SerializedActorRefProtocol = {
|
||||
val lifeCycleProtocol: Option[LifeCycleProtocol] = {
|
||||
def setScope(builder: LifeCycleProtocol.Builder, scope: Scope) = scope match {
|
||||
case Permanent => builder.setLifeCycle(LifeCycleType.PERMANENT)
|
||||
case Temporary => builder.setLifeCycle(LifeCycleType.TEMPORARY)
|
||||
}
|
||||
val builder = LifeCycleProtocol.newBuilder
|
||||
actorRef.lifeCycle match {
|
||||
case Some(LifeCycle(scope)) =>
|
||||
setScope(builder, scope)
|
||||
Some(builder.build)
|
||||
case None => None
|
||||
case Permanent => Some(LifeCycleProtocol.newBuilder.setLifeCycle(LifeCycleType.PERMANENT).build)
|
||||
case Temporary => Some(LifeCycleProtocol.newBuilder.setLifeCycle(LifeCycleType.TEMPORARY).build)
|
||||
case UndefinedLifeCycle => None//No need to send the undefined lifecycle over the wire //builder.setLifeCycle(LifeCycleType.UNDEFINED)
|
||||
}
|
||||
}
|
||||
|
||||
|
|
@ -164,11 +158,12 @@ object ActorSerialization {
|
|||
|
||||
val lifeCycle =
|
||||
if (protocol.hasLifeCycle) {
|
||||
val lifeCycleProtocol = protocol.getLifeCycle
|
||||
Some(if (lifeCycleProtocol.getLifeCycle == LifeCycleType.PERMANENT) LifeCycle(Permanent)
|
||||
else if (lifeCycleProtocol.getLifeCycle == LifeCycleType.TEMPORARY) LifeCycle(Temporary)
|
||||
else throw new IllegalActorStateException("LifeCycle type is not valid: " + lifeCycleProtocol.getLifeCycle))
|
||||
} else None
|
||||
protocol.getLifeCycle.getLifeCycle match {
|
||||
case LifeCycleType.PERMANENT => Permanent
|
||||
case LifeCycleType.TEMPORARY => Temporary
|
||||
case unknown => throw new IllegalActorStateException("LifeCycle type is not valid: " + unknown)
|
||||
}
|
||||
} else UndefinedLifeCycle
|
||||
|
||||
val supervisor =
|
||||
if (protocol.hasSupervisor)
|
||||
|
|
@ -192,7 +187,7 @@ object ActorSerialization {
|
|||
}
|
||||
|
||||
val ar = new LocalActorRef(
|
||||
uuidFrom(protocol.getUuid.getHigh,protocol.getUuid.getLow),
|
||||
uuidFrom(protocol.getUuid.getHigh, protocol.getUuid.getLow),
|
||||
protocol.getId,
|
||||
protocol.getOriginalAddress.getHostname,
|
||||
protocol.getOriginalAddress.getPort,
|
||||
|
|
@ -202,7 +197,6 @@ object ActorSerialization {
|
|||
lifeCycle,
|
||||
supervisor,
|
||||
hotswap,
|
||||
classLoader, // TODO: should we fall back to getClass.getClassLoader?
|
||||
factory)
|
||||
|
||||
val messages = protocol.getMessagesList.toArray.toList.asInstanceOf[List[RemoteRequestProtocol]]
|
||||
|
|
@ -231,7 +225,7 @@ object RemoteActorSerialization {
|
|||
* Deserializes a RemoteActorRefProtocol Protocol Buffers (protobuf) Message into an RemoteActorRef instance.
|
||||
*/
|
||||
private[akka] def fromProtobufToRemoteActorRef(protocol: RemoteActorRefProtocol, loader: Option[ClassLoader]): ActorRef = {
|
||||
Actor.log.debug("Deserializing RemoteActorRefProtocol to RemoteActorRef:\n" + protocol)
|
||||
Actor.log.debug("Deserializing RemoteActorRefProtocol to RemoteActorRef:\n %s", protocol)
|
||||
RemoteActorRef(
|
||||
protocol.getClassOrServiceName,
|
||||
protocol.getActorClassname,
|
||||
|
|
@ -249,13 +243,13 @@ object RemoteActorSerialization {
|
|||
val host = homeAddress.getHostName
|
||||
val port = homeAddress.getPort
|
||||
|
||||
Actor.log.debug("Register serialized Actor [%s] as remote @ [%s:%s]", actorClass.getName, host, port)
|
||||
Actor.log.debug("Register serialized Actor [%s] as remote @ [%s:%s]", actorClassName, host, port)
|
||||
RemoteServer.getOrCreateServer(homeAddress)
|
||||
RemoteServer.registerActorByUuid(homeAddress, uuid.toString, ar)
|
||||
ActorRegistry.registerActorByUuid(homeAddress, uuid.toString, ar)
|
||||
|
||||
RemoteActorRefProtocol.newBuilder
|
||||
.setClassOrServiceName(uuid.toString)
|
||||
.setActorClassname(actorClass.getName)
|
||||
.setActorClassname(actorClassName)
|
||||
.setHomeAddress(AddressProtocol.newBuilder.setHostname(host).setPort(port).build)
|
||||
.setTimeout(timeout)
|
||||
.build
|
||||
|
|
@ -291,15 +285,19 @@ object RemoteActorSerialization {
|
|||
case ActorType.TypedActor => actorInfoBuilder.setActorType(TYPED_ACTOR)
|
||||
}
|
||||
val actorInfo = actorInfoBuilder.build
|
||||
|
||||
val requestUuid = newUuid
|
||||
val requestBuilder = RemoteRequestProtocol.newBuilder
|
||||
.setUuid(UuidProtocol.newBuilder.setHigh(uuid.getTime).setLow(uuid.getClockSeqAndNode).build)
|
||||
.setUuid(UuidProtocol.newBuilder.setHigh(requestUuid.getTime).setLow(requestUuid.getClockSeqAndNode).build)
|
||||
.setMessage(MessageSerializer.serialize(message))
|
||||
.setActorInfo(actorInfo)
|
||||
.setIsOneWay(isOneWay)
|
||||
|
||||
val id = registerSupervisorAsRemoteActor
|
||||
if (id.isDefined) requestBuilder.setSupervisorUuid(UuidProtocol.newBuilder.setHigh(id.get.getTime).setLow(id.get.getClockSeqAndNode).build)
|
||||
if (id.isDefined) requestBuilder.setSupervisorUuid(
|
||||
UuidProtocol.newBuilder
|
||||
.setHigh(id.get.getTime)
|
||||
.setLow(id.get.getClockSeqAndNode)
|
||||
.build)
|
||||
|
||||
senderOption.foreach { sender =>
|
||||
RemoteServer.getOrCreateServer(sender.homeAddress).register(sender.uuid.toString, sender)
|
||||
|
|
@ -337,7 +335,7 @@ object TypedActorSerialization {
|
|||
proxy: AnyRef, format: Format[T]): SerializedTypedActorRefProtocol = {
|
||||
|
||||
val init = AspectInitRegistry.initFor(proxy)
|
||||
if (init == null) throw new IllegalArgumentException("Proxy for typed actor could not be found in AspectInitRegistry.")
|
||||
if (init eq null) throw new IllegalArgumentException("Proxy for typed actor could not be found in AspectInitRegistry.")
|
||||
|
||||
SerializedTypedActorRefProtocol.newBuilder
|
||||
.setActorRef(ActorSerialization.toSerializedActorRefProtocol(init.actorRef, format))
|
||||
|
|
|
|||
|
|
@ -129,7 +129,6 @@ object Serializer {
|
|||
* @author <a href="http://jonasboner.com">Jonas Bonér</a>
|
||||
*/
|
||||
trait ScalaJSON {
|
||||
import dispatch.json._
|
||||
import sjson.json._
|
||||
|
||||
var classLoader: Option[ClassLoader] = None
|
||||
|
|
|
|||
9
akka-remote/src/main/scala/serialization/package.scala
Normal file
9
akka-remote/src/main/scala/serialization/package.scala
Normal file
|
|
@ -0,0 +1,9 @@
|
|||
package se.scalablesolutions.akka
|
||||
|
||||
package object serialization {
|
||||
type JsValue = _root_.dispatch.json.JsValue
|
||||
val JsValue = _root_.dispatch.json.JsValue
|
||||
val Js = _root_.dispatch.json.Js
|
||||
val JsonSerialization = sjson.json.JsonSerialization
|
||||
val DefaultProtocol = sjson.json.DefaultProtocol
|
||||
}
|
||||
|
|
@ -56,6 +56,15 @@ object ClientInitiatedRemoteActorSpec {
|
|||
SendOneWayAndReplySenderActor.latch.countDown
|
||||
}
|
||||
}
|
||||
|
||||
class MyActorCustomConstructor extends Actor {
|
||||
var prefix = "default-"
|
||||
var count = 0
|
||||
def receive = {
|
||||
case "incrPrefix" => count += 1; prefix = "" + count + "-"
|
||||
case msg: String => self.reply(prefix + msg)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
class ClientInitiatedRemoteActorSpec extends JUnitSuite {
|
||||
|
|
@ -123,6 +132,19 @@ class ClientInitiatedRemoteActorSpec extends JUnitSuite {
|
|||
actor.stop
|
||||
}
|
||||
|
||||
@Test
|
||||
def shouldSendBangBangMessageAndReceiveReplyConcurrently = {
|
||||
val actors = (1 to 10).
|
||||
map(num => {
|
||||
val a = actorOf[RemoteActorSpecActorBidirectional]
|
||||
a.makeRemote(HOSTNAME, PORT1)
|
||||
a.start
|
||||
}).toList
|
||||
actors.map(_ !!! "Hello").
|
||||
foreach(future => assert("World" === future.await.result.asInstanceOf[Option[String]].get))
|
||||
actors.foreach(_.stop)
|
||||
}
|
||||
|
||||
@Test
|
||||
def shouldSendAndReceiveRemoteException {
|
||||
implicit val timeout = 500000000L
|
||||
|
|
@ -137,6 +159,26 @@ class ClientInitiatedRemoteActorSpec extends JUnitSuite {
|
|||
assert("Expected exception; to test fault-tolerance" === e.getMessage())
|
||||
}
|
||||
actor.stop
|
||||
}
|
||||
}
|
||||
|
||||
@Test
|
||||
def shouldRegisterActorByUuid {
|
||||
val actor1 = actorOf[MyActorCustomConstructor]
|
||||
actor1.makeRemote(HOSTNAME, PORT1)
|
||||
actor1.start
|
||||
actor1 ! "incrPrefix"
|
||||
assert((actor1 !! "test").get === "1-test")
|
||||
actor1 ! "incrPrefix"
|
||||
assert((actor1 !! "test").get === "2-test")
|
||||
|
||||
val actor2 = actorOf[MyActorCustomConstructor]
|
||||
actor2.makeRemote(HOSTNAME, PORT1)
|
||||
actor2.start
|
||||
|
||||
assert((actor2 !! "test").get === "default-test")
|
||||
|
||||
actor1.stop
|
||||
actor2.stop
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
|||
|
|
@ -483,7 +483,7 @@ class RemoteSupervisorSpec extends JUnitSuite {
|
|||
RestartStrategy(AllForOne, 3, 100, List(classOf[Exception])),
|
||||
Supervise(
|
||||
pingpong1,
|
||||
LifeCycle(Permanent))
|
||||
Permanent)
|
||||
:: Nil))
|
||||
|
||||
factory.newInstance
|
||||
|
|
@ -499,7 +499,7 @@ class RemoteSupervisorSpec extends JUnitSuite {
|
|||
RestartStrategy(OneForOne, 3, 100, List(classOf[Exception])),
|
||||
Supervise(
|
||||
pingpong1,
|
||||
LifeCycle(Permanent))
|
||||
Permanent)
|
||||
:: Nil))
|
||||
factory.newInstance
|
||||
}
|
||||
|
|
@ -520,15 +520,15 @@ class RemoteSupervisorSpec extends JUnitSuite {
|
|||
RestartStrategy(AllForOne, 3, 100, List(classOf[Exception])),
|
||||
Supervise(
|
||||
pingpong1,
|
||||
LifeCycle(Permanent))
|
||||
Permanent)
|
||||
::
|
||||
Supervise(
|
||||
pingpong2,
|
||||
LifeCycle(Permanent))
|
||||
Permanent)
|
||||
::
|
||||
Supervise(
|
||||
pingpong3,
|
||||
LifeCycle(Permanent))
|
||||
Permanent)
|
||||
:: Nil))
|
||||
factory.newInstance
|
||||
}
|
||||
|
|
@ -551,15 +551,15 @@ class RemoteSupervisorSpec extends JUnitSuite {
|
|||
RestartStrategy(OneForOne, 3, 100, List(classOf[Exception])),
|
||||
Supervise(
|
||||
pingpong1,
|
||||
LifeCycle(Permanent))
|
||||
Permanent)
|
||||
::
|
||||
Supervise(
|
||||
pingpong2,
|
||||
LifeCycle(Permanent))
|
||||
Permanent)
|
||||
::
|
||||
Supervise(
|
||||
pingpong3,
|
||||
LifeCycle(Permanent))
|
||||
Permanent)
|
||||
:: Nil))
|
||||
factory.newInstance
|
||||
}
|
||||
|
|
@ -580,17 +580,17 @@ class RemoteSupervisorSpec extends JUnitSuite {
|
|||
RestartStrategy(AllForOne, 3, 100, List(classOf[Exception])),
|
||||
Supervise(
|
||||
pingpong1,
|
||||
LifeCycle(Permanent))
|
||||
Permanent)
|
||||
::
|
||||
SupervisorConfig(
|
||||
RestartStrategy(AllForOne, 3, 100, List(classOf[Exception])),
|
||||
Supervise(
|
||||
pingpong2,
|
||||
LifeCycle(Permanent))
|
||||
Permanent)
|
||||
::
|
||||
Supervise(
|
||||
pingpong3,
|
||||
LifeCycle(Permanent))
|
||||
Permanent)
|
||||
:: Nil)
|
||||
:: Nil))
|
||||
factory.newInstance
|
||||
|
|
|
|||
|
|
@ -55,13 +55,13 @@ class RemoteTypedActorSpec extends
|
|||
new Component(
|
||||
classOf[RemoteTypedActorOne],
|
||||
classOf[RemoteTypedActorOneImpl],
|
||||
new LifeCycle(new Permanent),
|
||||
new Permanent,
|
||||
10000,
|
||||
new RemoteAddress("localhost", 9995)),
|
||||
new Component(
|
||||
classOf[RemoteTypedActorTwo],
|
||||
classOf[RemoteTypedActorTwoImpl],
|
||||
new LifeCycle(new Permanent),
|
||||
new Permanent,
|
||||
10000,
|
||||
new RemoteAddress("localhost", 9995))
|
||||
).toArray).supervise
|
||||
|
|
|
|||
|
|
@ -201,18 +201,18 @@ class ServerInitiatedRemoteActorSpec extends JUnitSuite {
|
|||
def shouldRegisterAndUnregister {
|
||||
val actor1 = actorOf[RemoteActorSpecActorUnidirectional]
|
||||
server.register("my-service-1", actor1)
|
||||
assert(server.actors().get("my-service-1") != null, "actor registered")
|
||||
assert(server.actors().get("my-service-1") ne null, "actor registered")
|
||||
server.unregister("my-service-1")
|
||||
assert(server.actors().get("my-service-1") == null, "actor unregistered")
|
||||
assert(server.actors().get("my-service-1") eq null, "actor unregistered")
|
||||
}
|
||||
|
||||
@Test
|
||||
def shouldRegisterAndUnregisterByUuid {
|
||||
val actor1 = actorOf[RemoteActorSpecActorUnidirectional]
|
||||
server.register("uuid:" + actor1.uuid, actor1)
|
||||
assert(server.actorsByUuid().get(actor1.uuid.toString) != null, "actor registered")
|
||||
assert(server.actorsByUuid().get(actor1.uuid.toString) ne null, "actor registered")
|
||||
server.unregister("uuid:" + actor1.uuid)
|
||||
assert(server.actorsByUuid().get(actor1.uuid) == null, "actor unregistered")
|
||||
assert(server.actorsByUuid().get(actor1.uuid) eq null, "actor unregistered")
|
||||
}
|
||||
|
||||
}
|
||||
|
|
|
|||
Some files were not shown because too many files have changed in this diff Show more
Loading…
Add table
Add a link
Reference in a new issue