diff --git a/.gitignore b/.gitignore index 22379bef4c..69dd6d55c9 100755 --- a/.gitignore +++ b/.gitignore @@ -1,5 +1,9 @@ *~ *# +project/boot/* +*/project/build/target +*/project/boot +lib_managed etags TAGS reports diff --git a/akka-amqp/pom.xml b/akka-amqp/pom.xml deleted file mode 100644 index aa569958a6..0000000000 --- a/akka-amqp/pom.xml +++ /dev/null @@ -1,29 +0,0 @@ - - 4.0.0 - - akka-amqp - Akka AMQP Module - - jar - - - akka - se.scalablesolutions.akka - 0.7-SNAPSHOT - - - - - akka-core - ${project.groupId} - ${project.version} - - - com.rabbitmq - amqp-client - 1.7.0 - - - - diff --git a/akka-camel/src/main/resources/META-INF/services/org/apache/camel/component/actor b/akka-camel/src/main/resources/META-INF/services/org/apache/camel/component/actor new file mode 100644 index 0000000000..a2141db8a9 --- /dev/null +++ b/akka-camel/src/main/resources/META-INF/services/org/apache/camel/component/actor @@ -0,0 +1 @@ +class=se.scalablesolutions.akka.camel.component.ActorComponent \ No newline at end of file diff --git a/akka-camel/src/main/scala/CamelContextLifecycle.scala b/akka-camel/src/main/scala/CamelContextLifecycle.scala new file mode 100644 index 0000000000..b9a696207c --- /dev/null +++ b/akka-camel/src/main/scala/CamelContextLifecycle.scala @@ -0,0 +1,95 @@ +/** + * Copyright (C) 2009-2010 Scalable Solutions AB + */ + +package se.scalablesolutions.akka.camel + +import org.apache.camel.{ProducerTemplate, CamelContext} +import org.apache.camel.impl.DefaultCamelContext + +import se.scalablesolutions.akka.util.Logging + +/** + * Defines the lifecycle of a CamelContext. Allowed state transitions are + * init -> start -> stop -> init -> ... etc. + * + * @author Martin Krasser + */ +trait CamelContextLifecycle extends Logging { + // TODO: enforce correct state transitions + // valid: init -> start -> stop -> init ... + + private var _context: CamelContext = _ + private var _template: ProducerTemplate = _ + + private var _initialized = false + private var _started = false + + /** + * Returns the managed CamelContext. + */ + protected def context: CamelContext = _context + + /** + * Returns the managed ProducerTemplate. + */ + protected def template: ProducerTemplate = _template + + /** + * Sets the managed CamelContext. + */ + protected def context_= (context: CamelContext) { _context = context } + + /** + * Sets the managed ProducerTemplate. + */ + protected def template_= (template: ProducerTemplate) { _template = template } + + def initialized = _initialized + def started = _started + + /** + * Starts the CamelContext and ProducerTemplate. + */ + def start = { + context.start + template.start + _started = true + log.info("Camel context started") + } + + /** + * Stops the CamelContext and ProducerTemplate. + */ + def stop = { + template.stop + context.stop + _initialized = false + _started = false + log.info("Camel context stopped") + } + + /** + * Initializes this lifecycle object with the a DefaultCamelContext. + */ + def init: Unit = init(new DefaultCamelContext) + + /** + * Initializes this lifecycle object with the given CamelContext. + */ + def init(context: CamelContext) { + this.context = context + this.template = context.createProducerTemplate + _initialized = true + log.info("Camel context initialized") + } +} + +/** + * Makes a global CamelContext and ProducerTemplate accessible to applications. The lifecycle + * of these objects is managed by se.scalablesolutions.akka.camel.service.CamelService. + */ +object CamelContextManager extends CamelContextLifecycle { + override def context: CamelContext = super.context + override def template: ProducerTemplate = super.template +} \ No newline at end of file diff --git a/akka-camel/src/main/scala/Consumer.scala b/akka-camel/src/main/scala/Consumer.scala new file mode 100644 index 0000000000..27ec98b25d --- /dev/null +++ b/akka-camel/src/main/scala/Consumer.scala @@ -0,0 +1,20 @@ +/** + * Copyright (C) 2009-2010 Scalable Solutions AB + */ + +package se.scalablesolutions.akka.camel + +import se.scalablesolutions.akka.actor.Actor + +/** + * Mixed in by Actor implementations that consume message from Camel endpoints. + * + * @author Martin Krasser + */ +trait Consumer { self: Actor => + + /** + * Returns the Camel endpoint URI to consume messages from. + */ + def endpointUri: String +} \ No newline at end of file diff --git a/akka-camel/src/main/scala/Message.scala b/akka-camel/src/main/scala/Message.scala new file mode 100644 index 0000000000..8e0156c669 --- /dev/null +++ b/akka-camel/src/main/scala/Message.scala @@ -0,0 +1,249 @@ +/** + * Copyright (C) 2009-2010 Scalable Solutions AB + */ + +package se.scalablesolutions.akka.camel + +import org.apache.camel.{Exchange, Message => CamelMessage} +import org.apache.camel.util.ExchangeHelper + +import scala.collection.jcl.{Map => MapWrapper} + +/** + * An immutable representation of a Camel message. Actor classes that mix in + * se.scalablesolutions.akka.camel.Producer or + * se.scalablesolutions.akka.camel.Consumer use this message type for communication. + * + * @author Martin Krasser + */ +case class Message(val body: Any, val headers: Map[String, Any]) { + /** + * Creates a message with a body and an empty header map. + */ + def this(body: Any) = this(body, Map.empty) + + /** + * Returns the body of the message converted to the type given by the clazz + * argument. Conversion is done using Camel's type converter. The type converter is obtained + * from the CamelContext managed by CamelContextManager. Applications have to ensure proper + * initialization of CamelContextManager. + * + * @see CamelContextManager. + */ + def bodyAs[T](clazz: Class[T]): T = + CamelContextManager.context.getTypeConverter.mandatoryConvertTo[T](clazz, body) + + /** + * Returns those headers from this message whose name is contained in names. + */ + def headers(names: Set[String]): Map[String, Any] = headers.filter(names contains _._1) + + /** + * Creates a Message with a new body using a transformer function. + */ + def transformBody[A](transformer: A => Any): Message = setBody(transformer(body.asInstanceOf[A])) + + /** + * Creates a Message with a new body converted to type clazz. + * + * @see Message#bodyAs(Class) + */ + def setBodyAs[T](clazz: Class[T]): Message = setBody(bodyAs(clazz)) + + /** + * Creates a Message with a new body. + */ + def setBody(body: Any) = new Message(body, this.headers) + + /** + * Creates a new Message with new headers. + */ + def setHeaders(headers: Map[String, Any]) = new Message(this.body, headers) + + /** + * Creates a new Message with the headers argument added to the existing headers. + */ + def addHeaders(headers: Map[String, Any]) = new Message(this.body, this.headers ++ headers) + + /** + * Creates a new Message with the header argument added to the existing headers. + */ + def addHeader(header: (String, Any)) = new Message(this.body, this.headers + header) + + /** + * Creates a new Message where the header with name headerName is removed from + * the existing headers. + */ + def removeHeader(headerName: String) = new Message(this.body, this.headers - headerName) +} + +/** + * Companion object of Message class. + * + * @author Martin Krasser + */ +object Message { + + /** + * Message header to correlate request with response messages. Applications that send + * messages to a Producer actor may want to set this header on the request message + * so that it can be correlated with an asynchronous response. Messages send to Consumer + * actors have this header already set. + */ + val MessageExchangeId = "MessageExchangeId".intern + + /** + * Creates a new Message with body as message body and an empty header map. + */ + def apply(body: Any) = new Message(body) + + /** + * Creates a canonical form of the given message msg. If msg of type + * Message then msg is returned, otherwise msg is set as body of a + * newly created Message object. + */ + def canonicalize(msg: Any) = msg match { + case mobj: Message => mobj + case body => new Message(body) + } +} + +/** + * An immutable representation of a failed Camel exchange. It contains the failure cause + * obtained from Exchange.getException and the headers from either the Exchange.getIn + * message or Exchange.getOut message, depending on the exchange pattern. + * + * @author Martin Krasser + */ +case class Failure(val cause: Exception, val headers: Map[String, Any]) + +/** + * Adapter for converting an org.apache.camel.Exchange to and from Message and Failure objects. + * + * @author Martin Krasser + */ +class CamelExchangeAdapter(exchange: Exchange) { + + import CamelMessageConversion.toMessageAdapter + + /** + * Sets Exchange.getIn from the given Message object. + */ + def fromRequestMessage(msg: Message): Exchange = { requestMessage.fromMessage(msg); exchange } + + /** + * Depending on the exchange pattern, sets Exchange.getIn or Exchange.getOut from the given + * Message object. If the exchange is out-capable then the Exchange.getOut is set, otherwise + * Exchange.getIn. + */ + def fromResponseMessage(msg: Message): Exchange = { responseMessage.fromMessage(msg); exchange } + + /** + * Sets Exchange.getException from the given Failure message. Headers of the Failure message + * are ignored. + */ + def fromFailureMessage(msg: Failure): Exchange = { exchange.setException(msg.cause); exchange } + + /** + * Creates a Message object from Exchange.getIn. + */ + def toRequestMessage: Message = toRequestMessage(Map.empty) + + /** + * Depending on the exchange pattern, creates a Message object from Exchange.getIn or Exchange.getOut. + * If the exchange is out-capable then the Exchange.getOut is set, otherwise Exchange.getIn. + */ + def toResponseMessage: Message = toResponseMessage(Map.empty) + + /** + * Creates a Failure object from the adapted Exchange. + * + * @see Failure + */ + def toFailureMessage: Failure = toFailureMessage(Map.empty) + + /** + * Creates a Message object from Exchange.getIn. + * + * @param headers additional headers to set on the created Message in addition to those + * in the Camel message. + */ + def toRequestMessage(headers: Map[String, Any]): Message = requestMessage.toMessage(headers) + + /** + * Depending on the exchange pattern, creates a Message object from Exchange.getIn or Exchange.getOut. + * If the exchange is out-capable then the Exchange.getOut is set, otherwise Exchange.getIn. + * + * @param headers additional headers to set on the created Message in addition to those + * in the Camel message. + */ + def toResponseMessage(headers: Map[String, Any]): Message = responseMessage.toMessage(headers) + + /** + * Creates a Failure object from the adapted Exchange. + * + * @param headers additional headers to set on the created Message in addition to those + * in the Camel message. + * + * @see Failure + */ + def toFailureMessage(headers: Map[String, Any]): Failure = + Failure(exchange.getException, headers ++ responseMessage.toMessage.headers) + + private def requestMessage = exchange.getIn + + private def responseMessage = ExchangeHelper.getResultMessage(exchange) + +} + +/** + * Adapter for converting an org.apache.camel.Message to and from Message objects. + * + * @author Martin Krasser + */ +class CamelMessageAdapter(val cm: CamelMessage) { + /** + * Set the adapted Camel message from the given Message object. + */ + def fromMessage(m: Message): CamelMessage = { + cm.setBody(m.body) + for (h <- m.headers) cm.getHeaders.put(h._1, h._2.asInstanceOf[AnyRef]) + cm + } + + /** + * Creates a new Message object from the adapted Camel message. + */ + def toMessage: Message = toMessage(Map.empty) + + /** + * Creates a new Message object from the adapted Camel message. + * + * @param headers additional headers to set on the created Message in addition to those + * in the Camel message. + */ + def toMessage(headers: Map[String, Any]): Message = Message(cm.getBody, cmHeaders(headers, cm)) + + private def cmHeaders(headers: Map[String, Any], cm: CamelMessage) = + headers ++ MapWrapper[String, AnyRef](cm.getHeaders).elements +} + +/** + * Defines conversion methods to CamelExchangeAdapter and CamelMessageAdapter. + * Imported by applications + * that implicitly want to use conversion methods of CamelExchangeAdapter and CamelMessageAdapter. + */ +object CamelMessageConversion { + + /** + * Creates an CamelExchangeAdapter for the given Camel exchange. + */ + implicit def toExchangeAdapter(ce: Exchange): CamelExchangeAdapter = + new CamelExchangeAdapter(ce) + + /** + * Creates an CamelMessageAdapter for the given Camel message. + */ + implicit def toMessageAdapter(cm: CamelMessage): CamelMessageAdapter = + new CamelMessageAdapter(cm) +} \ No newline at end of file diff --git a/akka-camel/src/main/scala/Producer.scala b/akka-camel/src/main/scala/Producer.scala new file mode 100644 index 0000000000..43e9b8b10e --- /dev/null +++ b/akka-camel/src/main/scala/Producer.scala @@ -0,0 +1,192 @@ +/** + * Copyright (C) 2009-2010 Scalable Solutions AB + */ + +package se.scalablesolutions.akka.camel + +import CamelMessageConversion.toExchangeAdapter + +import org.apache.camel.{Processor, ExchangePattern, Exchange, ProducerTemplate} +import org.apache.camel.impl.DefaultExchange +import org.apache.camel.spi.Synchronization + +import se.scalablesolutions.akka.actor.Actor +import se.scalablesolutions.akka.dispatch.CompletableFuture +import se.scalablesolutions.akka.util.Logging + +/** + * Mixed in by Actor implementations that produce messages to Camel endpoints. + * + * @author Martin Krasser + */ +trait Producer { self: Actor => + + private val headersToCopyDefault = Set(Message.MessageExchangeId) + + /** + * If set to true (default), communication with the Camel endpoint is done via the Camel + * Async API. Camel then processes the + * message in a separate thread. If set to false, the actor thread is blocked until Camel + * has finished processing the produced message. + */ + def async: Boolean = true + + /** + * If set to false (default), this producer expects a response message from the Camel endpoint. + * If set to true, this producer communicates with the Camel endpoint with an in-only message + * exchange pattern (fire and forget). + */ + def oneway: Boolean = false + + /** + * Returns the Camel endpoint URI to produce messages to. + */ + def endpointUri: String + + /** + * Returns the names of message headers to copy from a request message to a response message. + * By default only the Message.MessageExchangeId is copied. Applications may override this to + * define an application-specific set of message headers to copy. + */ + def headersToCopy: Set[String] = headersToCopyDefault + + /** + * Returns the producer template from the CamelContextManager. Applications either have to ensure + * proper initialization of CamelContextManager or override this method. + * + * @see CamelContextManager. + */ + protected def template: ProducerTemplate = CamelContextManager.template + + /** + * Initiates a one-way (in-only) message exchange to the Camel endpoint given by + * endpointUri. This method blocks until Camel finishes processing + * the message exchange. + * + * @param msg: the message to produce. The message is converted to its canonical + * representation via Message.canonicalize. + */ + protected def produceOneway(msg: Any): Unit = + template.send(endpointUri, createInOnlyExchange.fromRequestMessage(Message.canonicalize(msg))) + + /** + * Initiates a one-way (in-only) message exchange to the Camel endpoint given by + * endpointUri. This method triggers asynchronous processing of the + * message exchange by Camel. + * + * @param msg: the message to produce. The message is converted to its canonical + * representation via Message.canonicalize. + */ + protected def produceOnewayAsync(msg: Any): Unit = + template.asyncSend( + endpointUri, createInOnlyExchange.fromRequestMessage(Message.canonicalize(msg))) + + /** + * Initiates a two-way (in-out) message exchange to the Camel endpoint given by + * endpointUri. This method blocks until Camel finishes processing + * the message exchange. + * + * @param msg: the message to produce. The message is converted to its canonical + * representation via Message.canonicalize. + * @return either a response Message or a Failure object. + */ + protected def produce(msg: Any): Any = { + val cmsg = Message.canonicalize(msg) + val requestProcessor = new Processor() { + def process(exchange: Exchange) = exchange.fromRequestMessage(cmsg) + } + val result = template.request(endpointUri, requestProcessor) + if (result.isFailed) result.toFailureMessage(cmsg.headers(headersToCopy)) + else result.toResponseMessage(cmsg.headers(headersToCopy)) + } + + /** + * Initiates a two-way (in-out) message exchange to the Camel endpoint given by + * endpointUri. This method triggers asynchronous processing of the + * message exchange by Camel. The response message is returned asynchronously to + * the original sender (or sender future). + * + * @param msg: the message to produce. The message is converted to its canonical + * representation via Message.canonicalize. + * @return either a response Message or a Failure object. + * @see ProducerResponseSender + */ + protected def produceAsync(msg: Any): Unit = { + val cmsg = Message.canonicalize(msg) + val sync = new ProducerResponseSender( + cmsg.headers(headersToCopy), this.sender, this.senderFuture, this) + template.asyncCallback(endpointUri, createInOutExchange.fromRequestMessage(cmsg), sync) + } + + /** + * Default implementation for Actor.receive. Implementors may choose to + * def receive = produce. This partial function calls one of + * the protected produce methods depending on the return values of + * oneway and async. + */ + protected def produce: PartialFunction[Any, Unit] = { + case msg => { + if ( oneway && !async) produceOneway(msg) + else if ( oneway && async) produceOnewayAsync(msg) + else if (!oneway && !async) reply(produce(msg)) + else /*(!oneway && async)*/ produceAsync(msg) + } + } + + /** + * Creates a new in-only Exchange. + */ + protected def createInOnlyExchange: Exchange = createExchange(ExchangePattern.InOnly) + + /** + * Creates a new in-out Exchange. + */ + protected def createInOutExchange: Exchange = createExchange(ExchangePattern.InOut) + + /** + * Creates a new Exchange with given pattern from the CamelContext managed by + * CamelContextManager. Applications either have to ensure proper initialization + * of CamelContextManager or override this method. + * + * @see CamelContextManager. + */ + protected def createExchange(pattern: ExchangePattern): Exchange = + new DefaultExchange(CamelContextManager.context, pattern) +} + +/** + * Synchronization object that sends responses asynchronously to initial senders. This + * class is used by Producer for asynchronous two-way messaging with a Camel endpoint. + * + * @author Martin Krasser + */ +class ProducerResponseSender( + headers: Map[String, Any], + sender: Option[Actor], + senderFuture: Option[CompletableFuture], + producer: Actor) extends Synchronization with Logging { + + implicit val producerActor = Some(producer) // the response sender + + /** + * Replies a Failure message, created from the given exchange, to sender (or + * senderFuture if applicable). + */ + def onFailure(exchange: Exchange) = reply(exchange.toFailureMessage(headers)) + + /** + * Replies a response Message, created from the given exchange, to sender (or + * senderFuture if applicable). + */ + def onComplete(exchange: Exchange) = reply(exchange.toResponseMessage(headers)) + + private def reply(message: Any) = { + sender match { + case Some(actor) => actor ! message + case None => senderFuture match { + case Some(future) => future.completeWithResult(message) + case None => log.warning("No destination for sending response") + } + } + } +} diff --git a/akka-camel/src/main/scala/component/ActorComponent.scala b/akka-camel/src/main/scala/component/ActorComponent.scala new file mode 100644 index 0000000000..763f9dd017 --- /dev/null +++ b/akka-camel/src/main/scala/component/ActorComponent.scala @@ -0,0 +1,152 @@ +/** + * Copyright (C) 2009-2010 Scalable Solutions AB + */ + +package se.scalablesolutions.akka.camel.component + +import java.lang.{RuntimeException, String} +import java.util.{Map => JavaMap} +import java.util.concurrent.TimeoutException + +import org.apache.camel.{Exchange, Consumer, Processor} +import org.apache.camel.impl.{DefaultProducer, DefaultEndpoint, DefaultComponent} + +import se.scalablesolutions.akka.actor.{ActorRegistry, Actor} +import se.scalablesolutions.akka.camel.{Failure, CamelMessageConversion, Message} + +/** + * Camel component for sending messages to and receiving replies from actors. + * + * @see se.scalablesolutions.akka.camel.component.ActorEndpoint + * @see se.scalablesolutions.akka.camel.component.ActorProducer + * + * @author Martin Krasser + */ +class ActorComponent extends DefaultComponent { + def createEndpoint(uri: String, remaining: String, parameters: JavaMap[String, Object]): ActorEndpoint = { + val idAndUuid = idAndUuidPair(remaining) + new ActorEndpoint(uri, this, idAndUuid._1, idAndUuid._2) + } + + private def idAndUuidPair(remaining: String): Tuple2[Option[String], Option[String]] = { + remaining split ":" toList match { + case id :: Nil => (Some(id), None) + case "id" :: id :: Nil => (Some(id), None) + case "uuid" :: uuid :: Nil => (None, Some(uuid)) + case _ => throw new IllegalArgumentException( + "invalid path format: %s - should be or id: or uuid:" format remaining) + } + } +} + +/** + * Camel endpoint for referencing an actor. The actor reference is given by the endpoint URI. + * An actor can be referenced by its Actor.getId or its Actor.uuid. + * Supported endpoint URI formats are + * actor:<actorid>, + * actor:id:<actorid> and + * actor:uuid:<actoruuid>. + * + * @see se.scalablesolutions.akka.camel.component.ActorComponent + * @see se.scalablesolutions.akka.camel.component.ActorProducer + + * @author Martin Krasser + */ +class ActorEndpoint(uri: String, + comp: ActorComponent, + val id: Option[String], + val uuid: Option[String]) extends DefaultEndpoint(uri, comp) { + + /** + * @throws UnsupportedOperationException + */ + def createConsumer(processor: Processor): Consumer = + throw new UnsupportedOperationException("actor consumer not supported yet") + + /** + * Creates a new ActorProducer instance initialized with this endpoint. + */ + def createProducer: ActorProducer = new ActorProducer(this) + + /** + * Returns true. + */ + def isSingleton: Boolean = true +} + +/** + * Sends the in-message of an exchange to an actor. If the exchange pattern is out-capable, + * the producer waits for a reply (using the !! operator), otherwise the ! operator is used + * for sending the message. + * + * @see se.scalablesolutions.akka.camel.component.ActorComponent + * @see se.scalablesolutions.akka.camel.component.ActorEndpoint + * + * @author Martin Krasser + */ +class ActorProducer(val ep: ActorEndpoint) extends DefaultProducer(ep) { + import CamelMessageConversion.toExchangeAdapter + + implicit val sender = None + + /** + * Depending on the exchange pattern, this method either calls processInOut or + * processInOnly for interacting with an actor. This methods looks up the actor + * from the ActorRegistry according to this producer's endpoint URI. + * + * @param exchange represents the message exchange with the actor. + */ + def process(exchange: Exchange) { + val actor = target getOrElse (throw new ActorNotRegisteredException(ep.getEndpointUri)) + if (exchange.getPattern.isOutCapable) processInOut(exchange, actor) + else processInOnly(exchange, actor) + } + + /** + * Send the exchange in-message to the given actor using the ! operator. The message + * send to the actor is of type se.scalablesolutions.akka.camel.Message. + */ + protected def processInOnly(exchange: Exchange, actor: Actor): Unit = + actor ! exchange.toRequestMessage(Map(Message.MessageExchangeId -> exchange.getExchangeId)) + + /** + * Send the exchange in-message to the given actor using the !! operator. The exchange + * out-message is populated from the actor's reply message. The message sent to the + * actor is of type se.scalablesolutions.akka.camel.Message. + */ + protected def processInOut(exchange: Exchange, actor: Actor) { + val header = Map(Message.MessageExchangeId -> exchange.getExchangeId) + val result: Any = actor !! exchange.toRequestMessage(header) + + result match { + case Some(msg: Failure) => exchange.fromFailureMessage(msg) + case Some(msg) => exchange.fromResponseMessage(Message.canonicalize(msg)) + case None => { + throw new TimeoutException("timeout (%d ms) while waiting response from %s" + format (actor.timeout, ep.getEndpointUri)) + } + } + } + + private def target: Option[Actor] = + if (ep.id.isDefined) targetById(ep.id.get) + else targetByUuid(ep.uuid.get) + + private def targetById(id: String) = ActorRegistry.actorsFor(id) match { + case Nil => None + case actor :: Nil => Some(actor) + case actors => Some(actors.first) + } + + private def targetByUuid(uuid: String) = ActorRegistry.actorFor(uuid) +} + +/** + * Thrown to indicate that an actor referenced by an endpoint URI cannot be + * found in the ActorRegistry. + * + * @author Martin Krasser + */ +class ActorNotRegisteredException(uri: String) extends RuntimeException { + override def getMessage = "%s not registered" format uri +} \ No newline at end of file diff --git a/akka-camel/src/main/scala/service/CamelService.scala b/akka-camel/src/main/scala/service/CamelService.scala new file mode 100644 index 0000000000..86b4f2dc23 --- /dev/null +++ b/akka-camel/src/main/scala/service/CamelService.scala @@ -0,0 +1,89 @@ +/** + * Copyright (C) 2009-2010 Scalable Solutions AB + */ + +package se.scalablesolutions.akka.camel.service + +import se.scalablesolutions.akka.actor.ActorRegistry +import se.scalablesolutions.akka.camel.CamelContextManager +import se.scalablesolutions.akka.util.{Bootable, Logging} + +/** + * Used by applications (and the Kernel) to publish consumer actors via Camel + * endpoints and to manage the life cycle of a a global CamelContext which can + * be accessed via se.scalablesolutions.akka.camel.CamelContextManager. + * + * @author Martin Krasser + */ +trait CamelService extends Bootable with Logging { + + import se.scalablesolutions.akka.actor.Actor.Sender.Self + import CamelContextManager._ + + private[camel] val consumerPublisher = new ConsumerPublisher + private[camel] val publishRequestor = new PublishRequestor(consumerPublisher) + + /** + * Starts the CamelService. Any started actor that is a consumer actor will be (asynchronously) + * published as Camel endpoint. Consumer actors that are started after this method returned will + * be published as well. Actor publishing is done asynchronously. + */ + abstract override def onLoad = { + super.onLoad + + // Only init and start if not already done by application + if (!initialized) init + if (!started) start + + // Camel should cache input streams + context.setStreamCaching(true) + + // start actor that exposes consumer actors via Camel endpoints + consumerPublisher.start + + // add listener for actor registration events + ActorRegistry.addRegistrationListener(publishRequestor.start) + + // publish already registered consumer actors + for (publish <- Publish.forConsumers(ActorRegistry.actors)) consumerPublisher ! publish + } + + /** + * Stops the CamelService. + */ + abstract override def onUnload = { + ActorRegistry.removeRegistrationListener(publishRequestor) + publishRequestor.stop + consumerPublisher.stop + stop + super.onUnload + } + + /** + * Starts the CamelService. + * + * @see onLoad + */ + def load = onLoad + + /** + * Stops the CamelService. + * + * @see onUnload + */ + def unload = onUnload +} + +/** + * CamelService companion object used by standalone applications to create their own + * CamelService instance. + * + * @author Martin Krasser + */ +object CamelService { + + /** + * Creates a new CamelService instance. + */ + def newInstance: CamelService = new CamelService {} +} diff --git a/akka-camel/src/main/scala/service/ConsumerPublisher.scala b/akka-camel/src/main/scala/service/ConsumerPublisher.scala new file mode 100644 index 0000000000..a6509e2694 --- /dev/null +++ b/akka-camel/src/main/scala/service/ConsumerPublisher.scala @@ -0,0 +1,135 @@ +/** + * Copyright (C) 2009-2010 Scalable Solutions AB + */ +package se.scalablesolutions.akka.camel.service + +import java.io.InputStream +import java.util.concurrent.CountDownLatch + +import org.apache.camel.builder.RouteBuilder + +import se.scalablesolutions.akka.actor.{ActorUnregistered, ActorRegistered, Actor} +import se.scalablesolutions.akka.actor.annotation.consume +import se.scalablesolutions.akka.camel.{Consumer, CamelContextManager} +import se.scalablesolutions.akka.util.Logging + +/** + * Actor that publishes consumer actors as Camel endpoints at the CamelContext managed + * by se.scalablesolutions.akka.camel.CamelContextManager. It accepts messages of type + * se.scalablesolutions.akka.camel.service.Publish. + * + * @author Martin Krasser + */ +class ConsumerPublisher extends Actor with Logging { + @volatile private var latch = new CountDownLatch(0) + + /** + * Adds a route to the actor identified by a Publish message to the global CamelContext. + */ + protected def receive = { + case p: Publish => publish(new ConsumerRoute(p.endpointUri, p.id, p.uuid)) + case _ => { /* ignore */} + } + + /** + * Sets the number of expected Publish messages received by this actor. Used for testing + * only. + */ + private[camel] def expectPublishCount(count: Int): Unit = latch = new CountDownLatch(count) + + /** + * Waits for the number of expected Publish messages to arrive. Used for testing only. + */ + private[camel] def awaitPublish = latch.await + + private def publish(route: ConsumerRoute) { + CamelContextManager.context.addRoutes(route) + log.info("published actor via endpoint %s" format route.endpointUri) + latch.countDown // needed for testing only. + } +} + +/** + * Defines the route to a consumer actor. + * + * @param endpointUri endpoint URI of the consumer actor + * @param id actor identifier + * @param uuid true if id refers to Actor.uuid, false if + * id refers to Acotr.getId. + * + * @author Martin Krasser + */ +class ConsumerRoute(val endpointUri: String, id: String, uuid: Boolean) extends RouteBuilder { + // TODO: make conversions configurable + private val bodyConversions = Map( + "file" -> classOf[InputStream] + ) + + def configure = { + val schema = endpointUri take endpointUri.indexOf(":") // e.g. "http" from "http://whatever/..." + bodyConversions.get(schema) match { + case Some(clazz) => from(endpointUri).convertBodyTo(clazz).to(actorUri) + case None => from(endpointUri).to(actorUri) + } + } + + private def actorUri = (if (uuid) "actor:uuid:%s" else "actor:id:%s") format id +} + +/** + * A registration listener that publishes consumer actors (and ignores other actors). + * + * @author Martin Krasser + */ +class PublishRequestor(consumerPublisher: Actor) extends Actor { + protected def receive = { + case ActorUnregistered(actor) => { /* ignore */ } + case ActorRegistered(actor) => Publish.forConsumer(actor) match { + case Some(publish) => consumerPublisher ! publish + case None => { /* ignore */ } + } + } +} + +/** + * Request message for publishing a consumer actor. + * + * @param endpointUri endpoint URI of the consumer actor + * @param id actor identifier + * @param uuid true if id refers to Actor.uuid, false if + * id refers to Acotr.getId. + * + * @author Martin Krasser + */ +case class Publish(endpointUri: String, id: String, uuid: Boolean) + +/** + * @author Martin Krasser + */ +object Publish { + + /** + * Creates a list of Publish request messages for all consumer actors in the actors + * list. + */ + def forConsumers(actors: List[Actor]): List[Publish] = + for (actor <- actors; pub = forConsumer(actor); if pub.isDefined) yield pub.get + + /** + * Creates a Publish request message if actor is a consumer actor. + */ + def forConsumer(actor: Actor): Option[Publish] = + forConsumeAnnotated(actor) orElse forConsumerType(actor) + + private def forConsumeAnnotated(actor: Actor): Option[Publish] = { + val annotation = actor.getClass.getAnnotation(classOf[consume]) + if (annotation eq null) None + else if (actor._remoteAddress.isDefined) None // do not publish proxies + else Some(Publish(annotation.value, actor.getId, false)) + } + + private def forConsumerType(actor: Actor): Option[Publish] = + if (!actor.isInstanceOf[Consumer]) None + else if (actor._remoteAddress.isDefined) None + else Some(Publish(actor.asInstanceOf[Consumer].endpointUri, actor.uuid, true)) +} diff --git a/akka-camel/src/test/scala/MessageTest.scala b/akka-camel/src/test/scala/MessageTest.scala new file mode 100644 index 0000000000..d519dbafa7 --- /dev/null +++ b/akka-camel/src/test/scala/MessageTest.scala @@ -0,0 +1,79 @@ +package se.scalablesolutions.akka.camel + +import java.io.InputStream + +import org.apache.camel.NoTypeConversionAvailableException +import org.junit.Assert._ +import org.scalatest.junit.JUnitSuite + +import org.junit.Test + +class MessageTest extends JUnitSuite { + + // + // TODO: extend/rewrite unit tests + // These tests currently only ensure proper functioning of basic features. + // + + @Test def shouldConvertDoubleBodyToString = { + CamelContextManager.init + assertEquals("1.4", Message(1.4, null).bodyAs(classOf[String])) + } + + @Test def shouldThrowExceptionWhenConvertingDoubleBodyToInputStream { + CamelContextManager.init + intercept[NoTypeConversionAvailableException] { + Message(1.4, null).bodyAs(classOf[InputStream]) + } + } + + @Test def shouldReturnSubsetOfHeaders = { + val message = Message("test" , Map("A" -> "1", "B" -> "2")) + assertEquals(Map("B" -> "2"), message.headers(Set("B"))) + } + + @Test def shouldTransformBodyAndPreserveHeaders = { + assertEquals( + Message("ab", Map("A" -> "1")), + Message("a" , Map("A" -> "1")).transformBody[String](body => body + "b")) + } + + @Test def shouldConvertBodyAndPreserveHeaders = { + CamelContextManager.init + assertEquals( + Message("1.4", Map("A" -> "1")), + Message(1.4 , Map("A" -> "1")).setBodyAs(classOf[String])) + } + + @Test def shouldSetBodyAndPreserveHeaders = { + assertEquals( + Message("test2" , Map("A" -> "1")), + Message("test1" , Map("A" -> "1")).setBody("test2")) + } + + @Test def shouldSetHeadersAndPreserveBody = { + assertEquals( + Message("test1" , Map("C" -> "3")), + Message("test1" , Map("A" -> "1")).setHeaders(Map("C" -> "3"))) + + } + + @Test def shouldAddHeaderAndPreserveBodyAndHeaders = { + assertEquals( + Message("test1" , Map("A" -> "1", "B" -> "2")), + Message("test1" , Map("A" -> "1")).addHeader("B" -> "2")) + } + + @Test def shouldAddHeadersAndPreserveBodyAndHeaders = { + assertEquals( + Message("test1" , Map("A" -> "1", "B" -> "2")), + Message("test1" , Map("A" -> "1")).addHeaders(Map("B" -> "2"))) + } + + @Test def shouldRemoveHeadersAndPreserveBodyAndRemainingHeaders = { + assertEquals( + Message("test1" , Map("A" -> "1")), + Message("test1" , Map("A" -> "1", "B" -> "2")).removeHeader("B")) + } + +} \ No newline at end of file diff --git a/akka-camel/src/test/scala/ProducerTest.scala b/akka-camel/src/test/scala/ProducerTest.scala new file mode 100644 index 0000000000..11ae148fb5 --- /dev/null +++ b/akka-camel/src/test/scala/ProducerTest.scala @@ -0,0 +1,109 @@ +package se.scalablesolutions.akka.camel + +import org.apache.camel.{Exchange, Processor} +import org.apache.camel.builder.RouteBuilder +import org.apache.camel.component.mock.MockEndpoint +import org.junit.Assert._ +import org.junit.{Test, After, Before} +import org.scalatest.junit.JUnitSuite + +import se.scalablesolutions.akka.actor.Actor + +class ProducerTest extends JUnitSuite { + + // + // TODO: extend/rewrite unit tests + // These tests currently only ensure proper functioning of basic features. + // + + import CamelContextManager._ + + var mock: MockEndpoint = _ + + @Before def setUp = { + init + context.addRoutes(new TestRouteBuilder) + start + mock = context.getEndpoint("mock:mock", classOf[MockEndpoint]) + } + + @After def tearDown = { + stop + } + + // + // TODO: test replies to messages sent with ! (bang) + // TODO: test copying of custom message headers + // + + @Test def shouldProduceMessageSyncAndReceiveResponse = { + val producer = new TestProducer("direct:input2", false, false).start + val message = Message("test1", Map(Message.MessageExchangeId -> "123")) + val expected = Message("Hello test1", Map(Message.MessageExchangeId -> "123")) + assertEquals(expected, producer !! message get) + producer.stop + } + + @Test def shouldProduceMessageSyncAndReceiveFailure = { + val producer = new TestProducer("direct:input2", false, false).start + val message = Message("fail", Map(Message.MessageExchangeId -> "123")) + val result = producer.!![Failure](message).get + assertEquals("failure", result.cause.getMessage) + assertEquals(Map(Message.MessageExchangeId -> "123"), result.headers) + producer.stop + } + + @Test def shouldProduceMessageAsyncAndReceiveResponse = { + val producer = new TestProducer("direct:input2", true, false).start + val message = Message("test2", Map(Message.MessageExchangeId -> "124")) + val expected = Message("Hello test2", Map(Message.MessageExchangeId -> "124")) + assertEquals(expected, producer !! message get) + producer.stop + } + + @Test def shouldProduceMessageAsyncAndReceiveFailure = { + val producer = new TestProducer("direct:input2", true, false).start + val message = Message("fail", Map(Message.MessageExchangeId -> "124")) + val result = producer.!![Failure](message).get + assertEquals("failure", result.cause.getMessage) + assertEquals(Map(Message.MessageExchangeId -> "124"), result.headers) + producer.stop + } + + @Test def shouldProduceMessageSyncWithoutReceivingResponse = { + val producer = new TestProducer("direct:input1", false, true).start + mock.expectedBodiesReceived("test3") + producer.!("test3")(None) + producer.stop + } + + @Test def shouldProduceMessageAsyncAndReceiveResponseSync = { + val producer = new TestProducer("direct:input1", true, true).start + mock.expectedBodiesReceived("test4") + producer.!("test4")(None) + producer.stop + } + + class TestProducer(uri:String, prodAsync: Boolean, prodOneway: Boolean) extends Actor with Producer { + override def async = prodAsync + override def oneway = prodOneway + def endpointUri = uri + def receive = produce + } + + class TestRouteBuilder extends RouteBuilder { + def configure { + from("direct:input1").to("mock:mock") + from("direct:input2").process(new Processor() { + def process(exchange: Exchange) = { + val body = exchange.getIn.getBody + body match { + case "fail" => throw new Exception("failure") + case body => exchange.getOut.setBody("Hello %s" format body) + } + } + }) + } + } + +} \ No newline at end of file diff --git a/akka-camel/src/test/scala/component/ActorComponentFeatureTest.scala b/akka-camel/src/test/scala/component/ActorComponentFeatureTest.scala new file mode 100644 index 0000000000..58b2cdb169 --- /dev/null +++ b/akka-camel/src/test/scala/component/ActorComponentFeatureTest.scala @@ -0,0 +1,62 @@ +package se.scalablesolutions.akka.camel.component + +import org.apache.camel.RuntimeCamelException +import org.scalatest.{BeforeAndAfterEach, BeforeAndAfterAll, FeatureSpec} + +import se.scalablesolutions.akka.actor.ActorRegistry +import se.scalablesolutions.akka.camel.CamelContextManager +import se.scalablesolutions.akka.camel.support.{Respond, Countdown, Tester, Retain} + +class ActorComponentFeatureTest extends FeatureSpec with BeforeAndAfterAll with BeforeAndAfterEach { + override protected def beforeAll() = { + ActorRegistry.shutdownAll + CamelContextManager.init + CamelContextManager.start + } + + override protected def afterAll() = CamelContextManager.stop + + override protected def afterEach() = ActorRegistry.shutdownAll + + feature("Communicate with an actor from a Camel application using actor endpoint URIs") { + import CamelContextManager.template + + scenario("one-way communication using actor id") { + val actor = new Tester with Retain with Countdown + actor.start + template.sendBody("actor:%s" format actor.getId, "Martin") + assert(actor.waitFor) + assert(actor.body === "Martin") + } + + scenario("one-way communication using actor uuid") { + val actor = new Tester with Retain with Countdown + actor.start + template.sendBody("actor:uuid:%s" format actor.uuid, "Martin") + assert(actor.waitFor) + assert(actor.body === "Martin") + } + + scenario("two-way communication using actor id") { + val actor = new Tester with Respond + actor.start + assert(template.requestBody("actor:%s" format actor.getId, "Martin") === "Hello Martin") + } + + scenario("two-way communication using actor uuid") { + val actor = new Tester with Respond + actor.start + assert(template.requestBody("actor:uuid:%s" format actor.uuid, "Martin") === "Hello Martin") + } + + scenario("two-way communication with timeout") { + val actor = new Tester { + timeout = 1 + } + actor.start + intercept[RuntimeCamelException] { + template.requestBody("actor:uuid:%s" format actor.uuid, "Martin") + } + } + } +} \ No newline at end of file diff --git a/akka-camel/src/test/scala/component/ActorComponentTest.scala b/akka-camel/src/test/scala/component/ActorComponentTest.scala new file mode 100644 index 0000000000..1f7b42bf08 --- /dev/null +++ b/akka-camel/src/test/scala/component/ActorComponentTest.scala @@ -0,0 +1,35 @@ +package se.scalablesolutions.akka.camel.component + +import org.apache.camel.impl.DefaultCamelContext +import org.junit._ +import org.scalatest.junit.JUnitSuite + +class ActorComponentTest extends JUnitSuite { + + val component: ActorComponent = ActorComponentTest.mockComponent + + @Test def shouldCreateEndpointWithIdDefined = { + val ep1: ActorEndpoint = component.createEndpoint("actor:abc").asInstanceOf[ActorEndpoint] + val ep2: ActorEndpoint = component.createEndpoint("actor:id:abc").asInstanceOf[ActorEndpoint] + assert(ep1.id === Some("abc")) + assert(ep2.id === Some("abc")) + assert(ep1.uuid === None) + assert(ep2.uuid === None) + } + + @Test def shouldCreateEndpointWithUuidDefined = { + val ep: ActorEndpoint = component.createEndpoint("actor:uuid:abc").asInstanceOf[ActorEndpoint] + assert(ep.uuid === Some("abc")) + assert(ep.id === None) + } +} + +object ActorComponentTest { + def mockComponent = { + val component = new ActorComponent + component.setCamelContext(new DefaultCamelContext) + component + } + + def mockEndpoint(uri:String) = mockComponent.createEndpoint(uri) +} diff --git a/akka-camel/src/test/scala/component/ActorProducerTest.scala b/akka-camel/src/test/scala/component/ActorProducerTest.scala new file mode 100644 index 0000000000..afb4a12ef0 --- /dev/null +++ b/akka-camel/src/test/scala/component/ActorProducerTest.scala @@ -0,0 +1,76 @@ +package se.scalablesolutions.akka.camel.component + +import ActorComponentTest._ + +import java.util.concurrent.TimeoutException + +import org.apache.camel.ExchangePattern +import org.junit.{After, Test} +import org.scalatest.junit.JUnitSuite +import org.scalatest.BeforeAndAfterAll + +import se.scalablesolutions.akka.actor.ActorRegistry +import se.scalablesolutions.akka.camel.support.{Countdown, Retain, Tester, Respond} +import se.scalablesolutions.akka.camel.{Failure, Message} + +class ActorProducerTest extends JUnitSuite with BeforeAndAfterAll { + + @After def tearDown = { + ActorRegistry.shutdownAll + } + + @Test def shouldSendMessageToActor = { + val actor = new Tester with Retain with Countdown + val endpoint = mockEndpoint("actor:uuid:%s" format actor.uuid) + val exchange = endpoint.createExchange(ExchangePattern.InOnly) + actor.start + exchange.getIn.setBody("Martin") + exchange.getIn.setHeader("k1", "v1") + endpoint.createProducer.process(exchange) + actor.waitFor + assert(actor.body === "Martin") + assert(actor.headers === Map(Message.MessageExchangeId -> exchange.getExchangeId, "k1" -> "v1")) + } + + @Test def shouldSendMessageToActorAndReturnResponse = { + val actor = new Tester with Respond { + override def response(msg: Message) = Message(super.response(msg), Map("k2" -> "v2")) + } + val endpoint = mockEndpoint("actor:uuid:%s" format actor.uuid) + val exchange = endpoint.createExchange(ExchangePattern.InOut) + actor.start + exchange.getIn.setBody("Martin") + exchange.getIn.setHeader("k1", "v1") + endpoint.createProducer.process(exchange) + assert(exchange.getOut.getBody === "Hello Martin") + assert(exchange.getOut.getHeader("k2") === "v2") + } + + @Test def shouldSendMessageToActorAndReturnFailure = { + val actor = new Tester with Respond { + override def response(msg: Message) = Failure(new Exception("testmsg"), Map("k3" -> "v3")) + } + val endpoint = mockEndpoint("actor:uuid:%s" format actor.uuid) + val exchange = endpoint.createExchange(ExchangePattern.InOut) + actor.start + exchange.getIn.setBody("Martin") + exchange.getIn.setHeader("k1", "v1") + endpoint.createProducer.process(exchange) + assert(exchange.getException.getMessage === "testmsg") + assert(exchange.getOut.getBody === null) + assert(exchange.getOut.getHeader("k3") === null) // headers from failure message are currently ignored + } + + @Test def shouldSendMessageToActorAndTimeout: Unit = { + val actor = new Tester { + timeout = 1 + } + val endpoint = mockEndpoint("actor:uuid:%s" format actor.uuid) + val exchange = endpoint.createExchange(ExchangePattern.InOut) + actor.start + exchange.getIn.setBody("Martin") + intercept[TimeoutException] { + endpoint.createProducer.process(exchange) + } + } +} diff --git a/akka-camel/src/test/scala/service/CamelServiceTest.scala b/akka-camel/src/test/scala/service/CamelServiceTest.scala new file mode 100644 index 0000000000..a3b0f5c913 --- /dev/null +++ b/akka-camel/src/test/scala/service/CamelServiceTest.scala @@ -0,0 +1,103 @@ +package se.scalablesolutions.akka.camel.service + +import org.apache.camel.builder.RouteBuilder +import org.junit.Assert._ +import org.scalatest.junit.JUnitSuite + +import se.scalablesolutions.akka.actor.Actor +import se.scalablesolutions.akka.actor.annotation.consume +import se.scalablesolutions.akka.camel.{CamelContextManager, Consumer, Message} +import org.junit.{Ignore, Before, After, Test} + +class CamelServiceTest extends JUnitSuite with CamelService { + + // + // TODO: extend/rewrite unit tests + // These tests currently only ensure proper functioning of basic features. + // + + import CamelContextManager._ + + var actor1: Actor = _ + var actor2: Actor = _ + var actor3: Actor = _ + + @Before def setUp = { + // register actors before starting the CamelService + actor1 = new TestActor1().start + actor2 = new TestActor2().start + actor3 = new TestActor3().start + // initialize global CamelContext + init + // customize global CamelContext + context.addRoutes(new TestRouteBuilder) + consumerPublisher.expectPublishCount(2) + load + consumerPublisher.awaitPublish + } + + @After def tearDown = { + unload + actor1.stop + actor2.stop + actor3.stop + } + + @Test def shouldReceiveResponseViaPreStartGeneratedRoutes = { + assertEquals("Hello Martin (actor1)", template.requestBody("direct:actor1", "Martin")) + assertEquals("Hello Martin (actor2)", template.requestBody("direct:actor2", "Martin")) + } + + @Test def shouldReceiveResponseViaPostStartGeneratedRoute = { + consumerPublisher.expectPublishCount(1) + // register actor after starting CamelService + val actor4 = new TestActor4().start + consumerPublisher.awaitPublish + assertEquals("Hello Martin (actor4)", template.requestBody("direct:actor4", "Martin")) + actor4.stop + } + + @Test def shouldReceiveResponseViaCustomRoute = { + assertEquals("Hello Tester (actor3)", template.requestBody("direct:actor3", "Martin")) + } + +} + +class TestActor1 extends Actor with Consumer { + def endpointUri = "direct:actor1" + + protected def receive = { + case msg: Message => reply("Hello %s (actor1)" format msg.body) + } +} + +@consume("direct:actor2") +class TestActor2 extends Actor { + protected def receive = { + case msg: Message => reply("Hello %s (actor2)" format msg.body) + } +} + +class TestActor3 extends Actor { + id = "actor3" + + protected def receive = { + case msg: Message => reply("Hello %s (actor3)" format msg.body) + } +} + +class TestActor4 extends Actor with Consumer { + def endpointUri = "direct:actor4" + + protected def receive = { + case msg: Message => reply("Hello %s (actor4)" format msg.body) + } +} + +class TestRouteBuilder extends RouteBuilder { + def configure { + val actorUri = "actor:%s" format classOf[TestActor3].getName + from("direct:actor3").transform(constant("Tester")).to("actor:actor3") + } +} + diff --git a/akka-camel/src/test/scala/support/TestSupport.scala b/akka-camel/src/test/scala/support/TestSupport.scala new file mode 100644 index 0000000000..f6b7998934 --- /dev/null +++ b/akka-camel/src/test/scala/support/TestSupport.scala @@ -0,0 +1,49 @@ +package se.scalablesolutions.akka.camel.support + +import java.util.concurrent.{TimeUnit, CountDownLatch} + +import se.scalablesolutions.akka.camel.Message +import se.scalablesolutions.akka.actor.Actor + +trait Receive { + def onMessage(msg: Message): Unit +} + +trait Respond extends Receive {self: Actor => + abstract override def onMessage(msg: Message): Unit = { + super.onMessage(msg) + reply(response(msg)) + } + def response(msg: Message): Any = "Hello %s" format msg.body +} + +trait Retain extends Receive { + var body: Any = _ + var headers = Map.empty[String, Any] + abstract override def onMessage(msg: Message): Unit = { + super.onMessage(msg) + body = msg.body + headers = msg.headers + } +} + +trait Countdown extends Receive { + val count = 1 + val duration = 5000 + val latch = new CountDownLatch(count) + + def waitFor = latch.await(duration, TimeUnit.MILLISECONDS) + def countDown = latch.countDown + + abstract override def onMessage(msg: Message) = { + super.onMessage(msg) + countDown + } +} + +class Tester extends Actor with Receive { + def receive = { + case msg: Message => onMessage(msg) + } + def onMessage(msg: Message): Unit = {} +} diff --git a/akka-cluster/akka-cluster-jgroups/pom.xml b/akka-cluster/akka-cluster-jgroups/pom.xml deleted file mode 100644 index 85d25e2330..0000000000 --- a/akka-cluster/akka-cluster-jgroups/pom.xml +++ /dev/null @@ -1,24 +0,0 @@ - - 4.0.0 - - akka-cluster-jgroups - Akka Cluster JGroups Module - - jar - - - akka-cluster-parent - se.scalablesolutions.akka - 0.7-SNAPSHOT - - - - - jgroups - jgroups - 2.8.0.CR7 - - - - diff --git a/akka-cluster/akka-cluster-jgroups/src/main/scala/JGroupsClusterActor.scala b/akka-cluster/akka-cluster-jgroups/src/main/scala/JGroupsClusterActor.scala index 12d93ef272..7d56bb1539 100644 --- a/akka-cluster/akka-cluster-jgroups/src/main/scala/JGroupsClusterActor.scala +++ b/akka-cluster/akka-cluster-jgroups/src/main/scala/JGroupsClusterActor.scala @@ -1,15 +1,17 @@ -package se.scalablesolutions.akka.remote +package se.scalablesolutions.akka.cluster.jgroups import org.jgroups.{JChannel, View => JG_VIEW, Address, Message => JG_MSG, ExtendedMembershipListener, Receiver} +import se.scalablesolutions.akka.remote.ClusterActor._ +import se.scalablesolutions.akka.remote.BasicClusterActor + +import org.scala_tools.javautils.Imports._ + /** * Clustering support via JGroups. * @Author Viktor Klang */ class JGroupsClusterActor extends BasicClusterActor { - import ClusterActor._ - import org.scala_tools.javautils.Imports._ - type ADDR_T = Address @volatile private var isActive = false diff --git a/akka-cluster/akka-cluster-shoal/pom.xml b/akka-cluster/akka-cluster-shoal/pom.xml deleted file mode 100644 index b58e77dcf5..0000000000 --- a/akka-cluster/akka-cluster-shoal/pom.xml +++ /dev/null @@ -1,34 +0,0 @@ - - 4.0.0 - - akka-cluster-shoal - Akka Cluster Shoal Module - - jar - - - akka-cluster-parent - se.scalablesolutions.akka - 0.7-SNAPSHOT - - - - - - shoal-jxta - shoal - 1.1-20090818 - - - shoal-jxta - jxta - 1.1-20090818 - - - - diff --git a/akka-cluster/akka-cluster-shoal/src/main/scala/ShoalClusterActor.scala b/akka-cluster/akka-cluster-shoal/src/main/scala/ShoalClusterActor.scala index 3d83a46ef3..068d3a4345 100644 --- a/akka-cluster/akka-cluster-shoal/src/main/scala/ShoalClusterActor.scala +++ b/akka-cluster/akka-cluster-shoal/src/main/scala/ShoalClusterActor.scala @@ -1,29 +1,16 @@ /** * Copyright (C) 2009-2010 Scalable Solutions AB */ -package se.scalablesolutions.akka.remote +package se.scalablesolutions.akka.cluster.shoal -import se.scalablesolutions.akka.Config.config import java.util.Properties -import com.sun.enterprise.ee.cms.core.{CallBack, - GMSConstants, - GMSFactory, - GroupManagementService, - MessageSignal, - Signal, - GMSException, - SignalAcquireException, - SignalReleaseException, - JoinNotificationSignal, - FailureSuspectedSignal, - FailureNotificationSignal } -import com.sun.enterprise.ee.cms.impl.client.{FailureNotificationActionFactoryImpl, - FailureSuspectedActionFactoryImpl, - JoinNotificationActionFactoryImpl, - MessageActionFactoryImpl, - PlannedShutdownActionFactoryImpl -} +import se.scalablesolutions.akka.config.Config.config +import se.scalablesolutions.akka.remote.{ClusterActor, BasicClusterActor, RemoteServer} + +import com.sun.enterprise.ee.cms.core._ +import com.sun.enterprise.ee.cms.impl.client._ + /** * Clustering support via Shoal. */ @@ -67,9 +54,9 @@ class ShoalClusterActor extends BasicClusterActor { * Adds callbacks and boots up the cluster */ protected def createGMS : GroupManagementService = { - - val g = GMSFactory.startGMSModule(serverName,name, GroupManagementService.MemberType.CORE, properties()).asInstanceOf[GroupManagementService] - + val g = GMSFactory + .startGMSModule(serverName,name, GroupManagementService.MemberType.CORE, properties()) + .asInstanceOf[GroupManagementService] val callback = createCallback g.addActionFactory(new JoinNotificationActionFactoryImpl(callback)) g.addActionFactory(new FailureSuspectedActionFactoryImpl(callback)) @@ -102,8 +89,8 @@ class ShoalClusterActor extends BasicClusterActor { } signal.release() } catch { - case e : SignalAcquireException => log.warning(e,"SignalAcquireException") - case e : SignalReleaseException => log.warning(e,"SignalReleaseException") + case e : SignalAcquireException => log.warning(e,"SignalAcquireException") + case e : SignalReleaseException => log.warning(e,"SignalReleaseException") } } } diff --git a/akka-cluster/akka-cluster-tribes/pom.xml b/akka-cluster/akka-cluster-tribes/pom.xml deleted file mode 100644 index efcea51aa8..0000000000 --- a/akka-cluster/akka-cluster-tribes/pom.xml +++ /dev/null @@ -1,24 +0,0 @@ - - 4.0.0 - - akka-cluster-tribes - Akka Cluster Tribes Module - - jar - - - akka-cluster-parent - se.scalablesolutions.akka - 0.7-SNAPSHOT - - - - - org.apache.tomcat - tribes - 6.0.20 - - - - diff --git a/akka-cluster/pom.xml b/akka-cluster/pom.xml deleted file mode 100644 index 9d7bd42000..0000000000 --- a/akka-cluster/pom.xml +++ /dev/null @@ -1,42 +0,0 @@ - - 4.0.0 - - akka-cluster-parent - Akka Cluster Modules - - pom - - - akka - se.scalablesolutions.akka - 0.7-SNAPSHOT - - - - akka-cluster-jgroups - - akka-cluster-shoal - - - - - akka-core - ${project.groupId} - ${project.version} - - - - org.scalatest - scalatest - 1.0 - test - - - junit - junit - 4.5 - test - - - diff --git a/akka-comet/pom.xml b/akka-comet/pom.xml deleted file mode 100644 index 88cdc0cf57..0000000000 --- a/akka-comet/pom.xml +++ /dev/null @@ -1,54 +0,0 @@ - - 4.0.0 - - akka-comet - Akka Comet Module - - jar - - - akka - se.scalablesolutions.akka - 0.7-SNAPSHOT - - - - - - akka-rest - ${project.groupId} - ${project.version} - - - - - com.sun.grizzly - grizzly-comet-webserver - ${grizzly.version} - - - - - javax.servlet - servlet-api - 2.5 - - - org.atmosphere - atmosphere-annotations - ${atmosphere.version} - - - org.atmosphere - atmosphere-jersey - ${atmosphere.version} - - - org.atmosphere - atmosphere-runtime - ${atmosphere.version} - - - diff --git a/akka-comet/src/main/scala/AkkaClusterBroadcastFilter.scala b/akka-comet/src/main/scala/AkkaClusterBroadcastFilter.scala index 724c82432e..8fdd47fddd 100644 --- a/akka-comet/src/main/scala/AkkaClusterBroadcastFilter.scala +++ b/akka-comet/src/main/scala/AkkaClusterBroadcastFilter.scala @@ -4,13 +4,13 @@ package se.scalablesolutions.akka.comet -import se.scalablesolutions.akka.actor.{Actor} -import se.scalablesolutions.akka.remote.{Cluster} -import scala.reflect.{BeanProperty} +import se.scalablesolutions.akka.actor.Actor +import se.scalablesolutions.akka.remote.Cluster +import scala.reflect.BeanProperty import org.atmosphere.cpr.{BroadcastFilter, ClusterBroadcastFilter, Broadcaster} sealed trait ClusterCometMessageType -case class ClusterCometBroadcast(val name : String, val msg : AnyRef) extends ClusterCometMessageType +case class ClusterCometBroadcast(name: String, msg: AnyRef) extends ClusterCometMessageType /** * Enables explicit clustering of Atmosphere (Comet) resources diff --git a/akka-kernel/src/main/scala/BootableCometActorService.scala b/akka-comet/src/main/scala/BootableCometActorService.scala similarity index 87% rename from akka-kernel/src/main/scala/BootableCometActorService.scala rename to akka-comet/src/main/scala/BootableCometActorService.scala index b014fcb9ad..496cc33aed 100644 --- a/akka-kernel/src/main/scala/BootableCometActorService.scala +++ b/akka-comet/src/main/scala/BootableCometActorService.scala @@ -2,16 +2,16 @@ * Copyright (C) 2009-2010 Scalable Solutions AB */ -package se.scalablesolutions.akka +package se.scalablesolutions.akka.comet import com.sun.grizzly.http.SelectorThread import com.sun.grizzly.http.servlet.ServletAdapter import com.sun.grizzly.standalone.StaticStreamAlgorithm import javax.ws.rs.core.UriBuilder -import se.scalablesolutions.akka.comet.AkkaServlet + import se.scalablesolutions.akka.actor.BootableActorLoaderService -import se.scalablesolutions.akka.util.{Bootable,Logging} +import se.scalablesolutions.akka.util.{Bootable, Logging} /** * Handles the Akka Comet Support (load/unload) @@ -19,16 +19,17 @@ import se.scalablesolutions.akka.util.{Bootable,Logging} trait BootableCometActorService extends Bootable with Logging { self : BootableActorLoaderService => - import Config._ + import config.Config._ val REST_HOSTNAME = config.getString("akka.rest.hostname", "localhost") val REST_URL = "http://" + REST_HOSTNAME val REST_PORT = config.getInt("akka.rest.port", 9998) + protected var jerseySelectorThread: Option[SelectorThread] = None abstract override def onLoad = { super.onLoad - if(config.getBool("akka.rest.service", true)){ + if (config.getBool("akka.rest.service", true)) { val uri = UriBuilder.fromUri(REST_URL).port(REST_PORT).build() @@ -42,8 +43,7 @@ trait BootableCometActorService extends Bootable with Logging { adapter.setHandleStaticResources(true) adapter.setServletInstance(new AkkaServlet) adapter.setContextPath(uri.getPath) - //Using autodetection for now - //adapter.addInitParameter("cometSupport", "org.atmosphere.container.GrizzlyCometSupport") + adapter.addInitParameter("cometSupport", "org.atmosphere.container.GrizzlyCometSupport") if (HOME.isDefined) adapter.setRootFolder(HOME.get + "/deploy/root") log.info("REST service root path [%s] and context path [%s]", adapter.getRootFolder, adapter.getContextPath) diff --git a/akka-core/src/main/scala/actor/ActiveObject.scala b/akka-core/src/main/scala/actor/ActiveObject.scala index d88f0e861b..9b5a6b409a 100644 --- a/akka-core/src/main/scala/actor/ActiveObject.scala +++ b/akka-core/src/main/scala/actor/ActiveObject.scala @@ -19,7 +19,7 @@ import java.net.InetSocketAddress import java.lang.reflect.{InvocationTargetException, Method} object Annotations { - import se.scalablesolutions.akka.annotation._ + import se.scalablesolutions.akka.actor.annotation._ val oneway = classOf[oneway] val transactionrequired = classOf[transactionrequired] val prerestart = classOf[prerestart] diff --git a/akka-core/src/main/scala/actor/Actor.scala b/akka-core/src/main/scala/actor/Actor.scala index e5423e7bd1..674afeb6ad 100644 --- a/akka-core/src/main/scala/actor/Actor.scala +++ b/akka-core/src/main/scala/actor/Actor.scala @@ -4,23 +4,25 @@ package se.scalablesolutions.akka.actor -import se.scalablesolutions.akka.Config._ import se.scalablesolutions.akka.dispatch._ +import se.scalablesolutions.akka.config.Config._ import se.scalablesolutions.akka.config.{AllForOneStrategy, OneForOneStrategy, FaultHandlingStrategy} import se.scalablesolutions.akka.config.ScalaConfig._ import se.scalablesolutions.akka.stm.Transaction._ import se.scalablesolutions.akka.stm.TransactionManagement._ -import se.scalablesolutions.akka.stm.{StmException, TransactionManagement} +import se.scalablesolutions.akka.stm.TransactionManagement import se.scalablesolutions.akka.remote.protobuf.RemoteProtocol.RemoteRequest import se.scalablesolutions.akka.remote.{RemoteProtocolBuilder, RemoteClient, RemoteRequestIdFactory} import se.scalablesolutions.akka.serialization.Serializer import se.scalablesolutions.akka.util.{HashCode, Logging, UUID} import org.multiverse.api.ThreadLocalTransaction._ +import org.multiverse.commitbarriers.CountDownCommitBarrier import java.util.{Queue, HashSet} import java.util.concurrent.ConcurrentLinkedQueue import java.net.InetSocketAddress +import java.util.concurrent.locks.{Lock, ReentrantLock} /** * Implements the Transactor abstraction. E.g. a transactional actor. @@ -72,7 +74,7 @@ object Actor extends Logging { val HOSTNAME = config.getString("akka.remote.server.hostname", "localhost") val PORT = config.getInt("akka.remote.server.port", 9999) - object Sender{ + object Sender { implicit val Self: Option[Actor] = None } @@ -98,9 +100,7 @@ object Actor extends Logging { * The actor is started when created. * Example: *
-   * import Actor._
-   *
-   * val a = actor  {
+   * val a = Actor.init  {
    *   ... // init stuff
    * } receive  {
    *   case msg => ... // handle message
@@ -108,8 +108,8 @@ object Actor extends Logging {
    * 
* */ - def actor(body: => Unit) = { - def handler(body: => Unit) = new { + def init[A](body: => Unit) = { + def handler[A](body: => Unit) = new { def receive(handler: PartialFunction[Any, Unit]) = new Actor() { start body @@ -198,7 +198,7 @@ object Actor extends Logging { */ trait Actor extends TransactionManagement { implicit protected val self: Option[Actor] = Some(this) - implicit protected val transactionFamily: String = this.getClass.getName + implicit protected val transactionFamilyName: String = this.getClass.getName // Only mutable for RemoteServer in order to maintain identity across nodes private[akka] var _uuid = UUID.newUuid.toString @@ -219,6 +219,12 @@ trait Actor extends TransactionManagement { private[akka] var _replyToAddress: Option[InetSocketAddress] = None private[akka] val _mailbox: Queue[MessageInvocation] = new ConcurrentLinkedQueue[MessageInvocation] + /** + * This lock ensures thread safety in the dispatching: only one message can + * be dispatched at once on the actor. + */ + private[akka] val _dispatcherLock: Lock = new ReentrantLock + // ==================================== // protected fields // ==================================== @@ -309,9 +315,9 @@ trait Actor extends TransactionManagement { * If 'trapExit' is set for the actor to act as supervisor, then a faultHandler must be defined. * Can be one of: *
-   *  AllForOneStrategy(maxNrOfRetries: Int, withinTimeRange: Int)
+   *  faultHandler = Some(AllForOneStrategy(maxNrOfRetries, withinTimeRange))
    *
-   *  OneForOneStrategy(maxNrOfRetries: Int, withinTimeRange: Int)
+   *  faultHandler = Some(OneForOneStrategy(maxNrOfRetries, withinTimeRange))
    * 
*/ protected var faultHandler: Option[FaultHandlingStrategy] = None @@ -334,8 +340,8 @@ trait Actor extends TransactionManagement { /** * User overridable callback/setting. * - * Partial function implementing the server logic. - * To be implemented by subclassing server. + * Partial function implementing the actor logic. + * To be implemented by subclassing actor. *

* Example code: *

@@ -501,8 +507,6 @@ trait Actor extends TransactionManagement {
   def !![T](message: Any, timeout: Long): Option[T] = {
     if (_isKilled) throw new ActorKilledException("Actor [" + toString + "] has been killed, can't respond to messages")
     if (_isRunning) {
-      val from = if (sender != null && sender.isInstanceOf[Actor]) Some(sender.asInstanceOf[Actor])
-      else None
       val future = postMessageToMailboxAndCreateFutureResultWithTimeout(message, timeout, None)
       val isActiveObject = message.isInstanceOf[Invocation]
       if (isActiveObject && message.asInstanceOf[Invocation].isVoid) future.completeWithResult(None)
@@ -785,6 +789,11 @@ trait Actor extends TransactionManagement {
   }
 
   protected[akka] def postMessageToMailbox(message: Any, sender: Option[Actor]): Unit = {
+    if (isTransactionSetInScope) {
+      log.trace("Adding transaction for %s with message [%s] to transaction set", toString, message)
+      getTransactionSetInScope.incParties
+    }
+
     if (_remoteAddress.isDefined) {
       val requestBuilder = RemoteRequest.newBuilder
           .setId(RemoteRequestIdFactory.nextId)
@@ -796,8 +805,7 @@ trait Actor extends TransactionManagement {
           .setIsEscaped(false)
       
       val id = registerSupervisorAsRemoteActor
-      if(id.isDefined)
-        requestBuilder.setSupervisorUuid(id.get)
+      if (id.isDefined) requestBuilder.setSupervisorUuid(id.get)
 
       // set the source fields used to reply back to the original sender
       // (i.e. not the remote proxy actor)
@@ -816,7 +824,7 @@ trait Actor extends TransactionManagement {
       RemoteProtocolBuilder.setMessage(message, requestBuilder)
       RemoteClient.clientFor(_remoteAddress.get).send(requestBuilder.build, None)
     } else {
-      val invocation = new MessageInvocation(this, message, None, sender, currentTransaction.get)
+      val invocation = new MessageInvocation(this, message, None, sender, transactionSet.get)
       if (_isEventBased) {
         _mailbox.add(invocation)
         if (_isSuspended) invocation.send
@@ -824,12 +832,18 @@ trait Actor extends TransactionManagement {
       else
         invocation.send
     }
+    clearTransactionSet
   }
 
   protected[akka] def postMessageToMailboxAndCreateFutureResultWithTimeout(
       message: Any, 
       timeout: Long,
       senderFuture: Option[CompletableFuture]): CompletableFuture = {
+    if (isTransactionSetInScope) {
+      log.trace("Adding transaction for %s with message [%s] to transaction set", toString, message)    
+      getTransactionSetInScope.incParties
+    }
+    
     if (_remoteAddress.isDefined) {
       val requestBuilder = RemoteRequest.newBuilder
           .setId(RemoteRequestIdFactory.nextId)
@@ -843,16 +857,18 @@ trait Actor extends TransactionManagement {
       val id = registerSupervisorAsRemoteActor
       if (id.isDefined) requestBuilder.setSupervisorUuid(id.get)
       val future = RemoteClient.clientFor(_remoteAddress.get).send(requestBuilder.build, senderFuture)
+      clearTransactionSet
       if (future.isDefined) future.get
       else throw new IllegalStateException("Expected a future from remote call to actor " + toString)
     } else {
       val future = if (senderFuture.isDefined) senderFuture.get
                    else new DefaultCompletableFuture(timeout)
-      val invocation = new MessageInvocation(this, message, Some(future), None, currentTransaction.get)
+      val invocation = new MessageInvocation(this, message, Some(future), None, transactionSet.get)
       if (_isEventBased) {
         _mailbox.add(invocation)
         invocation.send
       } else invocation.send
+      clearTransactionSet
       future
     }
   }
@@ -872,7 +888,7 @@ trait Actor extends TransactionManagement {
   }
 
   private def dispatch[T](messageHandle: MessageInvocation) = {
-    setTransaction(messageHandle.tx)
+    setTransactionSet(messageHandle.transactionSet)
 
     val message = messageHandle.message //serializeMessage(messageHandle.message)
     senderFuture = messageHandle.future
@@ -894,43 +910,55 @@ trait Actor extends TransactionManagement {
   }
 
   private def transactionalDispatch[T](messageHandle: MessageInvocation) = {
-    setTransaction(messageHandle.tx)
+    var topLevelTransaction = false
+    val txSet: Option[CountDownCommitBarrier] =
+      if (messageHandle.transactionSet.isDefined) messageHandle.transactionSet
+      else {
+        topLevelTransaction = true // FIXME create a new internal atomic block that can wait for X seconds if top level tx
+        if (isTransactionRequiresNew) {
+          log.trace("Creating a new transaction set (top-level transaction) \nfor actor %s \nwith message %s", toString, messageHandle)
+          Some(createNewTransactionSet)
+        } else None
+      }
+    setTransactionSet(txSet)
 
     val message = messageHandle.message //serializeMessage(messageHandle.message)
     senderFuture = messageHandle.future
     sender = messageHandle.sender
 
+    def clearTx = {
+      clearTransactionSet
+      clearTransaction
+    }
+
     def proceed = {
-      try {
-        incrementTransaction
-        if (base.isDefinedAt(message)) base(message) // invoke user actor's receive partial function
-        else throw new IllegalArgumentException(
-          "Actor " + toString + " could not process message [" + message + "]" +
-           "\n\tsince no matching 'case' clause in its 'receive' method could be found")
-      } finally {
-        decrementTransaction
-      }
+      if (base.isDefinedAt(message)) base(message) // invoke user actor's receive partial function
+      else throw new IllegalArgumentException(
+        toString + " could not process message [" + message + "]" +
+        "\n\tsince no matching 'case' clause in its 'receive' method could be found")
+      setTransactionSet(txSet) // restore transaction set to allow atomic block to do commit
     }
 
     try {
-      if (isTransactionRequiresNew && !isTransactionInScope) {
-        if (senderFuture.isEmpty) throw new StmException(
-          "Can't continue transaction in a one-way fire-forget message send" +
-          "\n\tE.g. using Actor '!' method or Active Object 'void' method" +
-          "\n\tPlease use the Actor '!!' method or Active Object method with non-void return type")
+      if (isTransactionRequiresNew) {
         atomic {
           proceed
         }
       } else proceed
     } catch {
+      case e: IllegalStateException => {}
       case e =>
+        // abort transaction set
+        if (isTransactionSetInScope) try { getTransactionSetInScope.abort } catch { case e: IllegalStateException => {} }
         Actor.log.error(e, "Exception when invoking \n\tactor [%s] \n\twith message [%s]", this, message)
+
         if (senderFuture.isDefined) senderFuture.get.completeWithException(this, e)
-        clearTransaction // need to clear currentTransaction before call to supervisor
+        clearTx  // need to clear currentTransaction before call to supervisor
+
         // FIXME to fix supervisor restart of remote actor for oneway calls, inject a supervisor proxy that can send notification back to client
         if (_supervisor.isDefined) _supervisor.get ! Exit(this, e)
     } finally {
-      clearTransaction
+      clearTx
     }
   }
 
@@ -1042,6 +1070,5 @@ trait Actor extends TransactionManagement {
     that.asInstanceOf[Actor]._uuid == _uuid
   }
 
-  override def toString(): String = "Actor[" + id + ":" + uuid + "]"
-
+  override def toString = "Actor[" + id + ":" + uuid + "]"
 }
diff --git a/akka-core/src/main/scala/actor/ActorRegistry.scala b/akka-core/src/main/scala/actor/ActorRegistry.scala
index 9e0b1cba08..6db4d0375a 100644
--- a/akka-core/src/main/scala/actor/ActorRegistry.scala
+++ b/akka-core/src/main/scala/actor/ActorRegistry.scala
@@ -8,8 +8,7 @@ import se.scalablesolutions.akka.util.Logging
 
 import scala.collection.mutable.ListBuffer
 import scala.reflect.Manifest
-
-import java.util.concurrent.ConcurrentHashMap
+import java.util.concurrent.{CopyOnWriteArrayList, ConcurrentHashMap}
 
 /**
  * Registry holding all Actor instances in the whole system.
@@ -23,9 +22,10 @@ import java.util.concurrent.ConcurrentHashMap
  * @author Jonas Bonér
  */
 object ActorRegistry extends Logging {
-  private val actorsByUUID =      new ConcurrentHashMap[String, Actor]
-  private val actorsById =        new ConcurrentHashMap[String, List[Actor]]
-  private val actorsByClassName = new ConcurrentHashMap[String, List[Actor]]
+  private val actorsByUUID =          new ConcurrentHashMap[String, Actor]
+  private val actorsById =            new ConcurrentHashMap[String, List[Actor]]
+  private val actorsByClassName =     new ConcurrentHashMap[String, List[Actor]]
+  private val registrationListeners = new CopyOnWriteArrayList[Actor]
 
   /**
    * Returns all actors in the system.
@@ -103,6 +103,9 @@ object ActorRegistry extends Logging {
     if (actorsByClassName.containsKey(className)) {
       actorsByClassName.put(className, actor :: actorsByClassName.get(className))
     } else actorsByClassName.put(className, actor :: Nil)
+
+    // notify listeners
+    foreachListener(_.!(ActorRegistered(actor))(None))
   }
 
   /**
@@ -112,6 +115,8 @@ object ActorRegistry extends Logging {
     actorsByUUID remove actor.uuid
     actorsById remove actor.getId
     actorsByClassName remove actor.getClass.getName
+    // notify listeners
+    foreachListener(_.!(ActorUnregistered(actor))(None))
   }
 
   /**
@@ -125,4 +130,26 @@ object ActorRegistry extends Logging {
     actorsByClassName.clear
     log.info("All actors have been shut down and unregistered from ActorRegistry")
   }
+
+  /**
+   * Adds the registration listener this this registry's listener list.
+   */
+  def addRegistrationListener(listener: Actor) = {
+    registrationListeners.add(listener)
+  }
+
+  /**
+   * Removes the registration listener this this registry's listener list.
+   */
+  def removeRegistrationListener(listener: Actor) = {
+    registrationListeners.remove(listener)
+  }
+
+  private def foreachListener(f: (Actor) => Unit) {
+    val iterator = registrationListeners.iterator
+    while (iterator.hasNext) f(iterator.next)
+  }
 }
+
+case class ActorRegistered(actor: Actor)
+case class ActorUnregistered(actor: Actor)
\ No newline at end of file
diff --git a/akka-core/src/main/scala/actor/BootableActorLoaderService.scala b/akka-core/src/main/scala/actor/BootableActorLoaderService.scala
index 1bacbf6f59..5c80620d80 100644
--- a/akka-core/src/main/scala/actor/BootableActorLoaderService.scala
+++ b/akka-core/src/main/scala/actor/BootableActorLoaderService.scala
@@ -7,8 +7,8 @@ package se.scalablesolutions.akka.actor
 import java.io.File
 import java.net.URLClassLoader
 
-import se.scalablesolutions.akka.util.{Bootable,Logging}
-import se.scalablesolutions.akka.Config._
+import se.scalablesolutions.akka.util.{Bootable, Logging}
+import se.scalablesolutions.akka.config.Config._
 
 /**
  * Handles all modules in the deploy directory (load and unload)
@@ -30,12 +30,8 @@ trait BootableActorLoaderService extends Bootable with Logging {
       }
       val toDeploy = for (f <- DEPLOY_DIR.listFiles().toArray.toList.asInstanceOf[List[File]]) yield f.toURL
       log.info("Deploying applications from [%s]: [%s]", DEPLOY, toDeploy.toArray.toList)
-      new URLClassLoader(toDeploy.toArray, ClassLoader.getSystemClassLoader)
-    } else if (getClass.getClassLoader.getResourceAsStream("akka.conf") ne null) {
-      getClass.getClassLoader
-    } else throw new IllegalStateException(
-      "AKKA_HOME is not defined and no 'akka.conf' can be found on the classpath, aborting")
-    )
+      new URLClassLoader(toDeploy.toArray, getClass.getClassLoader)
+    } else getClass.getClassLoader)
   }
 
   abstract override def onLoad = {
@@ -47,4 +43,4 @@ trait BootableActorLoaderService extends Bootable with Logging {
   }
   
   abstract override def onUnload = ActorRegistry.shutdownAll
-}
\ No newline at end of file
+}
diff --git a/akka-core/src/main/scala/actor/Scheduler.scala b/akka-core/src/main/scala/actor/Scheduler.scala
index 8205db5843..be23149b61 100644
--- a/akka-core/src/main/scala/actor/Scheduler.scala
+++ b/akka-core/src/main/scala/actor/Scheduler.scala
@@ -17,7 +17,7 @@ import java.util.concurrent._
 
 import se.scalablesolutions.akka.config.ScalaConfig._
 import se.scalablesolutions.akka.config.{AllForOneStrategy, OneForOneStrategy, FaultHandlingStrategy}
-import se.scalablesolutions.akka.util.{Logging}
+import se.scalablesolutions.akka.util.Logging
 
 import org.scala_tools.javautils.Imports._
 
diff --git a/akka-core/src/main/scala/config/Config.scala b/akka-core/src/main/scala/config/Config.scala
index e993573972..ecbdf33d81 100644
--- a/akka-core/src/main/scala/config/Config.scala
+++ b/akka-core/src/main/scala/config/Config.scala
@@ -4,231 +4,71 @@
 
 package se.scalablesolutions.akka.config
 
-import se.scalablesolutions.akka.actor.Actor
-import se.scalablesolutions.akka.dispatch.MessageDispatcher
+import se.scalablesolutions.akka.util.Logging
 
-sealed abstract class FaultHandlingStrategy
-case class AllForOneStrategy(maxNrOfRetries: Int, withinTimeRange: Int) extends FaultHandlingStrategy
-case class OneForOneStrategy(maxNrOfRetries: Int, withinTimeRange: Int) extends FaultHandlingStrategy
-
-/**
- * Configuration classes - not to be used as messages.
- *
- * @author Jonas Bonér
- */
-object ScalaConfig {
-  sealed abstract class ConfigElement
-
-  abstract class Server extends ConfigElement
-  abstract class FailOverScheme extends ConfigElement
-  abstract class Scope extends ConfigElement
-
-  case class SupervisorConfig(restartStrategy: RestartStrategy, worker: List[Server]) extends Server
-  
-  class Supervise(val actor: Actor, val lifeCycle: LifeCycle, _remoteAddress: RemoteAddress) extends Server {
-    val remoteAddress: Option[RemoteAddress] = if (_remoteAddress eq null) None else Some(_remoteAddress)
-  }
-  object Supervise {
-    def apply(actor: Actor, lifeCycle: LifeCycle, remoteAddress: RemoteAddress) = new Supervise(actor, lifeCycle, remoteAddress)
-    def apply(actor: Actor, lifeCycle: LifeCycle) = new Supervise(actor, lifeCycle, null)
-    def unapply(supervise: Supervise) = Some((supervise.actor, supervise.lifeCycle, supervise.remoteAddress))
-  }
-
-  case class RestartStrategy(
-      scheme: FailOverScheme,
-      maxNrOfRetries: Int,
-      withinTimeRange: Int,
-      trapExceptions: List[Class[_ <: Throwable]]) extends ConfigElement
-
-  case object AllForOne extends FailOverScheme
-  case object OneForOne extends FailOverScheme
-
-  case class LifeCycle(scope: Scope, callbacks: Option[RestartCallbacks]) extends ConfigElement
-  object LifeCycle {
-    def apply(scope: Scope) = new LifeCycle(scope, None)
-  }
-  case class RestartCallbacks(preRestart: String, postRestart: String) {
-    if ((preRestart eq null) || (postRestart eq null)) throw new IllegalArgumentException("Restart callback methods can't be null")
-  }
-
-  case object Permanent extends Scope
-  case object Temporary extends Scope
-
-  case class RemoteAddress(val hostname: String, val port: Int) extends ConfigElement
-
-  class Component(_intf: Class[_],
-                  val target: Class[_],
-                  val lifeCycle: LifeCycle,
-                  val timeout: Int,
-                  val transactionRequired: Boolean,
-                  _dispatcher: MessageDispatcher, // optional
-                  _remoteAddress: RemoteAddress   // optional
-          ) extends Server {
-    val intf: Option[Class[_]] = if (_intf eq null) None else Some(_intf)
-    val dispatcher: Option[MessageDispatcher] = if (_dispatcher eq null) None else Some(_dispatcher)
-    val remoteAddress: Option[RemoteAddress] = if (_remoteAddress eq null) None else Some(_remoteAddress)
-  }
-  object Component {
-    def apply(intf: Class[_], target: Class[_], lifeCycle: LifeCycle, timeout: Int) =
-      new Component(intf, target, lifeCycle, timeout, false, null, null)
-
-    def apply(target: Class[_], lifeCycle: LifeCycle, timeout: Int) =
-      new Component(null, target, lifeCycle, timeout, false, null, null)
-
-    def apply(intf: Class[_], target: Class[_], lifeCycle: LifeCycle, timeout: Int, dispatcher: MessageDispatcher) =
-      new Component(intf, target, lifeCycle, timeout, false, dispatcher, null)
-
-    def apply(target: Class[_], lifeCycle: LifeCycle, timeout: Int, dispatcher: MessageDispatcher) =
-      new Component(null, target, lifeCycle, timeout, false, dispatcher, null)
-
-    def apply(intf: Class[_], target: Class[_], lifeCycle: LifeCycle, timeout: Int, remoteAddress: RemoteAddress) =
-      new Component(intf, target, lifeCycle, timeout, false, null, remoteAddress)
-
-    def apply(target: Class[_], lifeCycle: LifeCycle, timeout: Int, remoteAddress: RemoteAddress) =
-      new Component(null, target, lifeCycle, timeout, false, null, remoteAddress)
-
-    def apply(intf: Class[_], target: Class[_], lifeCycle: LifeCycle, timeout: Int, dispatcher: MessageDispatcher, remoteAddress: RemoteAddress) =
-      new Component(intf, target, lifeCycle, timeout, false, dispatcher, remoteAddress)
-
-    def apply(target: Class[_], lifeCycle: LifeCycle, timeout: Int, dispatcher: MessageDispatcher, remoteAddress: RemoteAddress) =
-      new Component(null, target, lifeCycle, timeout, false, dispatcher, remoteAddress)
-
-    def apply(intf: Class[_], target: Class[_], lifeCycle: LifeCycle, timeout: Int, transactionRequired: Boolean) =
-      new Component(intf, target, lifeCycle, timeout, transactionRequired, null, null)
-
-    def apply(target: Class[_], lifeCycle: LifeCycle, timeout: Int, transactionRequired: Boolean) =
-      new Component(null, target, lifeCycle, timeout, transactionRequired, null, null)
-
-    def apply(intf: Class[_], target: Class[_], lifeCycle: LifeCycle, timeout: Int, transactionRequired: Boolean, dispatcher: MessageDispatcher) =
-      new Component(intf, target, lifeCycle, timeout, transactionRequired, dispatcher, null)
-
-    def apply(target: Class[_], lifeCycle: LifeCycle, timeout: Int, transactionRequired: Boolean, dispatcher: MessageDispatcher) =
-      new Component(null, target, lifeCycle, timeout, transactionRequired, dispatcher, null)
-
-    def apply(intf: Class[_], target: Class[_], lifeCycle: LifeCycle, timeout: Int, transactionRequired: Boolean, remoteAddress: RemoteAddress) =
-      new Component(intf, target, lifeCycle, timeout, transactionRequired, null, remoteAddress)
-
-    def apply(target: Class[_], lifeCycle: LifeCycle, timeout: Int, transactionRequired: Boolean, remoteAddress: RemoteAddress) =
-      new Component(null, target, lifeCycle, timeout, transactionRequired, null, remoteAddress)
-
-    def apply(intf: Class[_], target: Class[_], lifeCycle: LifeCycle, timeout: Int, transactionRequired: Boolean, dispatcher: MessageDispatcher, remoteAddress: RemoteAddress) =
-      new Component(intf, target, lifeCycle, timeout, transactionRequired, dispatcher, remoteAddress)
-
-    def apply(target: Class[_], lifeCycle: LifeCycle, timeout: Int, transactionRequired: Boolean, dispatcher: MessageDispatcher, remoteAddress: RemoteAddress) =
-      new Component(null, target, lifeCycle, timeout, transactionRequired, dispatcher, remoteAddress)
-  }
-}
+import net.lag.configgy.{Configgy, ParseException}
 
 /**
  * @author Jonas Bonér
  */
-object JavaConfig {
-  import scala.reflect.BeanProperty
+object Config extends Logging {
+  val VERSION = "0.7-SNAPSHOT"
 
-  sealed abstract class ConfigElement
+  // Set Multiverse options for max speed
+  System.setProperty("org.multiverse.MuliverseConstants.sanityChecks", "false")
+  System.setProperty("org.multiverse.api.GlobalStmInstance.factorymethod", "org.multiverse.stms.alpha.AlphaStm.createFast")
 
-  class RestartStrategy(
-      @BeanProperty val scheme: FailOverScheme,
-      @BeanProperty val maxNrOfRetries: Int,
-      @BeanProperty val withinTimeRange: Int,
-      @BeanProperty val trapExceptions: Array[Class[_ <: Throwable]]) extends ConfigElement {
-    def transform = se.scalablesolutions.akka.config.ScalaConfig.RestartStrategy(
-      scheme.transform, maxNrOfRetries, withinTimeRange, trapExceptions.toList)
+  val HOME = {
+    val systemHome = System.getenv("AKKA_HOME")
+    if (systemHome == null || systemHome.length == 0 || systemHome == ".") {
+      val optionHome = System.getProperty("akka.home", "")
+      if (optionHome.length != 0) Some(optionHome)
+      else None
+    } else Some(systemHome)
   }
-  
-  class LifeCycle(@BeanProperty val scope: Scope, @BeanProperty val callbacks: RestartCallbacks) extends ConfigElement {
-    def this(scope: Scope) = this(scope, null)
-    def transform = {
-      val callbackOption = if (callbacks eq null) None else Some(callbacks.transform)
-      se.scalablesolutions.akka.config.ScalaConfig.LifeCycle(scope.transform, callbackOption)
+
+  val config = {
+    if (HOME.isDefined) {
+      try {
+        val configFile = HOME.get + "/config/akka.conf"
+        Configgy.configure(configFile)
+        log.info("AKKA_HOME is defined to [%s], config loaded from [%s].", HOME.get, configFile)
+      } catch {
+        case e: ParseException => throw new IllegalStateException(
+          "'akka.conf' config file can not be found in [" + HOME + "/config/akka.conf] aborting." +
+          "\n\tEither add it in the 'config' directory or add it to the classpath.")
+      }
+    } else if (System.getProperty("akka.config", "") != "") {
+      val configFile = System.getProperty("akka.config", "")
+      try {
+        Configgy.configure(configFile)
+        log.info("Config loaded from -Dakka.config=%s", configFile)
+      } catch {
+        case e: ParseException => throw new IllegalStateException(
+          "Config could not be loaded from -Dakka.config=" + configFile)
+      }
+    } else {
+      try {
+        Configgy.configureFromResource("akka.conf", getClass.getClassLoader)
+        log.info("Config loaded from the application classpath.")
+      } catch {
+        case e: ParseException => throw new IllegalStateException(
+          "\nCan't find 'akka.conf' configuration file." + 
+          "\nOne of the three ways of locating the 'akka.conf' file needs to be defined:" +
+          "\n\t1. Define 'AKKA_HOME' environment variable to the root of the Akka distribution." +
+          "\n\t2. Define the '-Dakka.config=...' system property option." +
+          "\n\t3. Put the 'akka.conf' file on the classpath." +
+          "\nI have no way of finding the 'akka.conf' configuration file." +
+          "\nAborting.")
+      }
     }
+    Configgy.config
   }
 
-  class RestartCallbacks(@BeanProperty val preRestart: String, @BeanProperty val postRestart: String) {
-    def transform = se.scalablesolutions.akka.config.ScalaConfig.RestartCallbacks(preRestart, postRestart)
-  }
+  val CONFIG_VERSION = config.getString("akka.version", "0")
+  if (VERSION != CONFIG_VERSION) throw new IllegalStateException(
+    "Akka JAR version [" + VERSION + "] is different than the provided config ('akka.conf') version [" + CONFIG_VERSION + "]")
+  val startTime = System.currentTimeMillis
 
-  abstract class Scope extends ConfigElement {
-    def transform: se.scalablesolutions.akka.config.ScalaConfig.Scope
-  }
-  class Permanent extends Scope {
-    override def transform = se.scalablesolutions.akka.config.ScalaConfig.Permanent
-  }
-  class Temporary extends Scope {
-    override def transform = se.scalablesolutions.akka.config.ScalaConfig.Temporary
-  }
-
-  abstract class FailOverScheme extends ConfigElement {
-    def transform: se.scalablesolutions.akka.config.ScalaConfig.FailOverScheme
-  }
-  class AllForOne extends FailOverScheme {
-    override def transform = se.scalablesolutions.akka.config.ScalaConfig.AllForOne
-  }
-  class OneForOne extends FailOverScheme {
-    override def transform = se.scalablesolutions.akka.config.ScalaConfig.OneForOne
-  }
-
-  class RemoteAddress(@BeanProperty val hostname: String, @BeanProperty val port: Int)
-
-  abstract class Server extends ConfigElement
-  class Component(@BeanProperty val intf: Class[_],
-                  @BeanProperty val target: Class[_],
-                  @BeanProperty val lifeCycle: LifeCycle,
-                  @BeanProperty val timeout: Int,
-                  @BeanProperty val transactionRequired: Boolean,  // optional
-                  @BeanProperty val dispatcher: MessageDispatcher, // optional
-                  @BeanProperty val remoteAddress: RemoteAddress   // optional
-          ) extends Server {
-
-    def this(intf: Class[_], target: Class[_], lifeCycle: LifeCycle, timeout: Int) =
-      this(intf, target, lifeCycle, timeout, false, null, null)
-
-    def this(target: Class[_], lifeCycle: LifeCycle, timeout: Int) =
-      this(null, target, lifeCycle, timeout, false, null, null)
-
-    def this(intf: Class[_], target: Class[_], lifeCycle: LifeCycle, timeout: Int, remoteAddress: RemoteAddress) =
-      this(intf, target, lifeCycle, timeout, false, null, remoteAddress)
-
-    def this(target: Class[_], lifeCycle: LifeCycle, timeout: Int, remoteAddress: RemoteAddress) =
-      this(null, target, lifeCycle, timeout, false, null, remoteAddress)
-
-    def this(intf: Class[_], target: Class[_], lifeCycle: LifeCycle, timeout: Int, dispatcher: MessageDispatcher) =
-      this(intf, target, lifeCycle, timeout, false, dispatcher, null)
-
-    def this(target: Class[_], lifeCycle: LifeCycle, timeout: Int, dispatcher: MessageDispatcher) =
-      this(null, target, lifeCycle, timeout, false, dispatcher, null)
-
-    def this(target: Class[_], lifeCycle: LifeCycle, timeout: Int, dispatcher: MessageDispatcher, remoteAddress: RemoteAddress) =
-      this(null, target, lifeCycle, timeout, false, dispatcher, remoteAddress)
-
-    def this(intf: Class[_], target: Class[_], lifeCycle: LifeCycle, timeout: Int, transactionRequired: Boolean) =
-      this(intf, target, lifeCycle, timeout, transactionRequired, null, null)
-
-    def this(target: Class[_], lifeCycle: LifeCycle, timeout: Int, transactionRequired: Boolean) =
-      this(null, target, lifeCycle, timeout, transactionRequired, null, null)
-
-    def this(intf: Class[_], target: Class[_], lifeCycle: LifeCycle, timeout: Int, transactionRequired: Boolean, remoteAddress: RemoteAddress) =
-      this(intf, target, lifeCycle, timeout, transactionRequired, null, remoteAddress)
-
-    def this(target: Class[_], lifeCycle: LifeCycle, timeout: Int, transactionRequired: Boolean, remoteAddress: RemoteAddress) =
-      this(null, target, lifeCycle, timeout, transactionRequired, null, remoteAddress)
-
-    def this(intf: Class[_], target: Class[_], lifeCycle: LifeCycle, timeout: Int, transactionRequired: Boolean, dispatcher: MessageDispatcher) =
-      this(intf, target, lifeCycle, timeout, transactionRequired, dispatcher, null)
-
-    def this(target: Class[_], lifeCycle: LifeCycle, timeout: Int, transactionRequired: Boolean, dispatcher: MessageDispatcher) =
-      this(null, target, lifeCycle, timeout, transactionRequired, dispatcher, null)
-
-    def this(target: Class[_], lifeCycle: LifeCycle, timeout: Int, transactionRequired: Boolean, dispatcher: MessageDispatcher, remoteAddress: RemoteAddress) =
-      this(null, target, lifeCycle, timeout, transactionRequired, dispatcher, remoteAddress)
-
-    def transform =
-      se.scalablesolutions.akka.config.ScalaConfig.Component(
-        intf, target, lifeCycle.transform, timeout, transactionRequired, dispatcher,
-        if (remoteAddress ne null) se.scalablesolutions.akka.config.ScalaConfig.RemoteAddress(remoteAddress.hostname, remoteAddress.port) else null)
-
-    def newSupervised(actor: Actor) =
-      se.scalablesolutions.akka.config.ScalaConfig.Supervise(actor, lifeCycle.transform)
-  }
-  
-}
\ No newline at end of file
+  def uptime = (System.currentTimeMillis - startTime) / 1000
+}
diff --git a/akka-core/src/main/scala/config/ConfiguratorRepository.scala b/akka-core/src/main/scala/config/ConfiguratorRepository.scala
index 9c12bf4b32..097259164b 100644
--- a/akka-core/src/main/scala/config/ConfiguratorRepository.scala
+++ b/akka-core/src/main/scala/config/ConfiguratorRepository.scala
@@ -6,7 +6,7 @@ package se.scalablesolutions.akka.config
 
 import scala.collection.mutable.HashSet
 
-import util.Logging
+import se.scalablesolutions.akka.util.Logging
 
 object ConfiguratorRepository extends Logging {
 
diff --git a/akka-core/src/main/scala/config/SupervisionConfig.scala b/akka-core/src/main/scala/config/SupervisionConfig.scala
new file mode 100644
index 0000000000..e993573972
--- /dev/null
+++ b/akka-core/src/main/scala/config/SupervisionConfig.scala
@@ -0,0 +1,234 @@
+/**
+ * Copyright (C) 2009-2010 Scalable Solutions AB 
+ */
+
+package se.scalablesolutions.akka.config
+
+import se.scalablesolutions.akka.actor.Actor
+import se.scalablesolutions.akka.dispatch.MessageDispatcher
+
+sealed abstract class FaultHandlingStrategy
+case class AllForOneStrategy(maxNrOfRetries: Int, withinTimeRange: Int) extends FaultHandlingStrategy
+case class OneForOneStrategy(maxNrOfRetries: Int, withinTimeRange: Int) extends FaultHandlingStrategy
+
+/**
+ * Configuration classes - not to be used as messages.
+ *
+ * @author Jonas Bonér
+ */
+object ScalaConfig {
+  sealed abstract class ConfigElement
+
+  abstract class Server extends ConfigElement
+  abstract class FailOverScheme extends ConfigElement
+  abstract class Scope extends ConfigElement
+
+  case class SupervisorConfig(restartStrategy: RestartStrategy, worker: List[Server]) extends Server
+  
+  class Supervise(val actor: Actor, val lifeCycle: LifeCycle, _remoteAddress: RemoteAddress) extends Server {
+    val remoteAddress: Option[RemoteAddress] = if (_remoteAddress eq null) None else Some(_remoteAddress)
+  }
+  object Supervise {
+    def apply(actor: Actor, lifeCycle: LifeCycle, remoteAddress: RemoteAddress) = new Supervise(actor, lifeCycle, remoteAddress)
+    def apply(actor: Actor, lifeCycle: LifeCycle) = new Supervise(actor, lifeCycle, null)
+    def unapply(supervise: Supervise) = Some((supervise.actor, supervise.lifeCycle, supervise.remoteAddress))
+  }
+
+  case class RestartStrategy(
+      scheme: FailOverScheme,
+      maxNrOfRetries: Int,
+      withinTimeRange: Int,
+      trapExceptions: List[Class[_ <: Throwable]]) extends ConfigElement
+
+  case object AllForOne extends FailOverScheme
+  case object OneForOne extends FailOverScheme
+
+  case class LifeCycle(scope: Scope, callbacks: Option[RestartCallbacks]) extends ConfigElement
+  object LifeCycle {
+    def apply(scope: Scope) = new LifeCycle(scope, None)
+  }
+  case class RestartCallbacks(preRestart: String, postRestart: String) {
+    if ((preRestart eq null) || (postRestart eq null)) throw new IllegalArgumentException("Restart callback methods can't be null")
+  }
+
+  case object Permanent extends Scope
+  case object Temporary extends Scope
+
+  case class RemoteAddress(val hostname: String, val port: Int) extends ConfigElement
+
+  class Component(_intf: Class[_],
+                  val target: Class[_],
+                  val lifeCycle: LifeCycle,
+                  val timeout: Int,
+                  val transactionRequired: Boolean,
+                  _dispatcher: MessageDispatcher, // optional
+                  _remoteAddress: RemoteAddress   // optional
+          ) extends Server {
+    val intf: Option[Class[_]] = if (_intf eq null) None else Some(_intf)
+    val dispatcher: Option[MessageDispatcher] = if (_dispatcher eq null) None else Some(_dispatcher)
+    val remoteAddress: Option[RemoteAddress] = if (_remoteAddress eq null) None else Some(_remoteAddress)
+  }
+  object Component {
+    def apply(intf: Class[_], target: Class[_], lifeCycle: LifeCycle, timeout: Int) =
+      new Component(intf, target, lifeCycle, timeout, false, null, null)
+
+    def apply(target: Class[_], lifeCycle: LifeCycle, timeout: Int) =
+      new Component(null, target, lifeCycle, timeout, false, null, null)
+
+    def apply(intf: Class[_], target: Class[_], lifeCycle: LifeCycle, timeout: Int, dispatcher: MessageDispatcher) =
+      new Component(intf, target, lifeCycle, timeout, false, dispatcher, null)
+
+    def apply(target: Class[_], lifeCycle: LifeCycle, timeout: Int, dispatcher: MessageDispatcher) =
+      new Component(null, target, lifeCycle, timeout, false, dispatcher, null)
+
+    def apply(intf: Class[_], target: Class[_], lifeCycle: LifeCycle, timeout: Int, remoteAddress: RemoteAddress) =
+      new Component(intf, target, lifeCycle, timeout, false, null, remoteAddress)
+
+    def apply(target: Class[_], lifeCycle: LifeCycle, timeout: Int, remoteAddress: RemoteAddress) =
+      new Component(null, target, lifeCycle, timeout, false, null, remoteAddress)
+
+    def apply(intf: Class[_], target: Class[_], lifeCycle: LifeCycle, timeout: Int, dispatcher: MessageDispatcher, remoteAddress: RemoteAddress) =
+      new Component(intf, target, lifeCycle, timeout, false, dispatcher, remoteAddress)
+
+    def apply(target: Class[_], lifeCycle: LifeCycle, timeout: Int, dispatcher: MessageDispatcher, remoteAddress: RemoteAddress) =
+      new Component(null, target, lifeCycle, timeout, false, dispatcher, remoteAddress)
+
+    def apply(intf: Class[_], target: Class[_], lifeCycle: LifeCycle, timeout: Int, transactionRequired: Boolean) =
+      new Component(intf, target, lifeCycle, timeout, transactionRequired, null, null)
+
+    def apply(target: Class[_], lifeCycle: LifeCycle, timeout: Int, transactionRequired: Boolean) =
+      new Component(null, target, lifeCycle, timeout, transactionRequired, null, null)
+
+    def apply(intf: Class[_], target: Class[_], lifeCycle: LifeCycle, timeout: Int, transactionRequired: Boolean, dispatcher: MessageDispatcher) =
+      new Component(intf, target, lifeCycle, timeout, transactionRequired, dispatcher, null)
+
+    def apply(target: Class[_], lifeCycle: LifeCycle, timeout: Int, transactionRequired: Boolean, dispatcher: MessageDispatcher) =
+      new Component(null, target, lifeCycle, timeout, transactionRequired, dispatcher, null)
+
+    def apply(intf: Class[_], target: Class[_], lifeCycle: LifeCycle, timeout: Int, transactionRequired: Boolean, remoteAddress: RemoteAddress) =
+      new Component(intf, target, lifeCycle, timeout, transactionRequired, null, remoteAddress)
+
+    def apply(target: Class[_], lifeCycle: LifeCycle, timeout: Int, transactionRequired: Boolean, remoteAddress: RemoteAddress) =
+      new Component(null, target, lifeCycle, timeout, transactionRequired, null, remoteAddress)
+
+    def apply(intf: Class[_], target: Class[_], lifeCycle: LifeCycle, timeout: Int, transactionRequired: Boolean, dispatcher: MessageDispatcher, remoteAddress: RemoteAddress) =
+      new Component(intf, target, lifeCycle, timeout, transactionRequired, dispatcher, remoteAddress)
+
+    def apply(target: Class[_], lifeCycle: LifeCycle, timeout: Int, transactionRequired: Boolean, dispatcher: MessageDispatcher, remoteAddress: RemoteAddress) =
+      new Component(null, target, lifeCycle, timeout, transactionRequired, dispatcher, remoteAddress)
+  }
+}
+
+/**
+ * @author Jonas Bonér
+ */
+object JavaConfig {
+  import scala.reflect.BeanProperty
+
+  sealed abstract class ConfigElement
+
+  class RestartStrategy(
+      @BeanProperty val scheme: FailOverScheme,
+      @BeanProperty val maxNrOfRetries: Int,
+      @BeanProperty val withinTimeRange: Int,
+      @BeanProperty val trapExceptions: Array[Class[_ <: Throwable]]) extends ConfigElement {
+    def transform = se.scalablesolutions.akka.config.ScalaConfig.RestartStrategy(
+      scheme.transform, maxNrOfRetries, withinTimeRange, trapExceptions.toList)
+  }
+  
+  class LifeCycle(@BeanProperty val scope: Scope, @BeanProperty val callbacks: RestartCallbacks) extends ConfigElement {
+    def this(scope: Scope) = this(scope, null)
+    def transform = {
+      val callbackOption = if (callbacks eq null) None else Some(callbacks.transform)
+      se.scalablesolutions.akka.config.ScalaConfig.LifeCycle(scope.transform, callbackOption)
+    }
+  }
+
+  class RestartCallbacks(@BeanProperty val preRestart: String, @BeanProperty val postRestart: String) {
+    def transform = se.scalablesolutions.akka.config.ScalaConfig.RestartCallbacks(preRestart, postRestart)
+  }
+
+  abstract class Scope extends ConfigElement {
+    def transform: se.scalablesolutions.akka.config.ScalaConfig.Scope
+  }
+  class Permanent extends Scope {
+    override def transform = se.scalablesolutions.akka.config.ScalaConfig.Permanent
+  }
+  class Temporary extends Scope {
+    override def transform = se.scalablesolutions.akka.config.ScalaConfig.Temporary
+  }
+
+  abstract class FailOverScheme extends ConfigElement {
+    def transform: se.scalablesolutions.akka.config.ScalaConfig.FailOverScheme
+  }
+  class AllForOne extends FailOverScheme {
+    override def transform = se.scalablesolutions.akka.config.ScalaConfig.AllForOne
+  }
+  class OneForOne extends FailOverScheme {
+    override def transform = se.scalablesolutions.akka.config.ScalaConfig.OneForOne
+  }
+
+  class RemoteAddress(@BeanProperty val hostname: String, @BeanProperty val port: Int)
+
+  abstract class Server extends ConfigElement
+  class Component(@BeanProperty val intf: Class[_],
+                  @BeanProperty val target: Class[_],
+                  @BeanProperty val lifeCycle: LifeCycle,
+                  @BeanProperty val timeout: Int,
+                  @BeanProperty val transactionRequired: Boolean,  // optional
+                  @BeanProperty val dispatcher: MessageDispatcher, // optional
+                  @BeanProperty val remoteAddress: RemoteAddress   // optional
+          ) extends Server {
+
+    def this(intf: Class[_], target: Class[_], lifeCycle: LifeCycle, timeout: Int) =
+      this(intf, target, lifeCycle, timeout, false, null, null)
+
+    def this(target: Class[_], lifeCycle: LifeCycle, timeout: Int) =
+      this(null, target, lifeCycle, timeout, false, null, null)
+
+    def this(intf: Class[_], target: Class[_], lifeCycle: LifeCycle, timeout: Int, remoteAddress: RemoteAddress) =
+      this(intf, target, lifeCycle, timeout, false, null, remoteAddress)
+
+    def this(target: Class[_], lifeCycle: LifeCycle, timeout: Int, remoteAddress: RemoteAddress) =
+      this(null, target, lifeCycle, timeout, false, null, remoteAddress)
+
+    def this(intf: Class[_], target: Class[_], lifeCycle: LifeCycle, timeout: Int, dispatcher: MessageDispatcher) =
+      this(intf, target, lifeCycle, timeout, false, dispatcher, null)
+
+    def this(target: Class[_], lifeCycle: LifeCycle, timeout: Int, dispatcher: MessageDispatcher) =
+      this(null, target, lifeCycle, timeout, false, dispatcher, null)
+
+    def this(target: Class[_], lifeCycle: LifeCycle, timeout: Int, dispatcher: MessageDispatcher, remoteAddress: RemoteAddress) =
+      this(null, target, lifeCycle, timeout, false, dispatcher, remoteAddress)
+
+    def this(intf: Class[_], target: Class[_], lifeCycle: LifeCycle, timeout: Int, transactionRequired: Boolean) =
+      this(intf, target, lifeCycle, timeout, transactionRequired, null, null)
+
+    def this(target: Class[_], lifeCycle: LifeCycle, timeout: Int, transactionRequired: Boolean) =
+      this(null, target, lifeCycle, timeout, transactionRequired, null, null)
+
+    def this(intf: Class[_], target: Class[_], lifeCycle: LifeCycle, timeout: Int, transactionRequired: Boolean, remoteAddress: RemoteAddress) =
+      this(intf, target, lifeCycle, timeout, transactionRequired, null, remoteAddress)
+
+    def this(target: Class[_], lifeCycle: LifeCycle, timeout: Int, transactionRequired: Boolean, remoteAddress: RemoteAddress) =
+      this(null, target, lifeCycle, timeout, transactionRequired, null, remoteAddress)
+
+    def this(intf: Class[_], target: Class[_], lifeCycle: LifeCycle, timeout: Int, transactionRequired: Boolean, dispatcher: MessageDispatcher) =
+      this(intf, target, lifeCycle, timeout, transactionRequired, dispatcher, null)
+
+    def this(target: Class[_], lifeCycle: LifeCycle, timeout: Int, transactionRequired: Boolean, dispatcher: MessageDispatcher) =
+      this(null, target, lifeCycle, timeout, transactionRequired, dispatcher, null)
+
+    def this(target: Class[_], lifeCycle: LifeCycle, timeout: Int, transactionRequired: Boolean, dispatcher: MessageDispatcher, remoteAddress: RemoteAddress) =
+      this(null, target, lifeCycle, timeout, transactionRequired, dispatcher, remoteAddress)
+
+    def transform =
+      se.scalablesolutions.akka.config.ScalaConfig.Component(
+        intf, target, lifeCycle.transform, timeout, transactionRequired, dispatcher,
+        if (remoteAddress ne null) se.scalablesolutions.akka.config.ScalaConfig.RemoteAddress(remoteAddress.hostname, remoteAddress.port) else null)
+
+    def newSupervised(actor: Actor) =
+      se.scalablesolutions.akka.config.ScalaConfig.Supervise(actor, lifeCycle.transform)
+  }
+  
+}
\ No newline at end of file
diff --git a/akka-core/src/main/scala/dispatch/ExecutorBasedEventDrivenDispatcher.scala b/akka-core/src/main/scala/dispatch/ExecutorBasedEventDrivenDispatcher.scala
index e115800d4b..b48e7717cf 100644
--- a/akka-core/src/main/scala/dispatch/ExecutorBasedEventDrivenDispatcher.scala
+++ b/akka-core/src/main/scala/dispatch/ExecutorBasedEventDrivenDispatcher.scala
@@ -57,18 +57,29 @@ class ExecutorBasedEventDrivenDispatcher(_name: String) extends MessageDispatche
   @volatile private var active: Boolean = false
   
   val name: String = "event-driven:executor:dispatcher:" + _name
-  init 
-    
+  init
+
   def dispatch(invocation: MessageInvocation) = if (active) {
     executor.execute(new Runnable() {
       def run = {
-        invocation.receiver.synchronized {
-          var messageInvocation = invocation.receiver._mailbox.poll
-          while (messageInvocation != null) {
-            messageInvocation.invoke
-            messageInvocation = invocation.receiver._mailbox.poll
+        var lockAcquiredOnce = false
+        // this do-wile loop is required to prevent missing new messages between the end of the inner while
+        // loop and releasing the lock
+        do {
+          if (invocation.receiver._dispatcherLock.tryLock) {
+            lockAcquiredOnce = true
+            try {
+              // Only dispatch if we got the lock. Otherwise another thread is already dispatching.
+              var messageInvocation = invocation.receiver._mailbox.poll
+              while (messageInvocation != null) {
+                messageInvocation.invoke
+                messageInvocation = invocation.receiver._mailbox.poll
+              }
+            } finally {
+              invocation.receiver._dispatcherLock.unlock
+            }
           }
-        }
+        } while ((lockAcquiredOnce && !invocation.receiver._mailbox.isEmpty))
       }
     })
   } else throw new IllegalStateException("Can't submit invocations to dispatcher since it's not started")
@@ -88,4 +99,4 @@ class ExecutorBasedEventDrivenDispatcher(_name: String) extends MessageDispatche
     "Can't build a new thread pool for a dispatcher that is already up and running")
 
   private[akka] def init = withNewThreadPoolWithLinkedBlockingQueueWithUnboundedCapacity.buildThreadPool
-}
\ No newline at end of file
+}
diff --git a/akka-core/src/main/scala/dispatch/Future.scala b/akka-core/src/main/scala/dispatch/Future.scala
index 0dcc0f850c..0bf9723e31 100644
--- a/akka-core/src/main/scala/dispatch/Future.scala
+++ b/akka-core/src/main/scala/dispatch/Future.scala
@@ -13,6 +13,7 @@ class FutureTimeoutException(message: String) extends RuntimeException(message)
 object Futures {
 
   /**
+   * FIXME document
    * 
    * val future = Futures.future(1000) {
    *  ... // do stuff
diff --git a/akka-core/src/main/scala/dispatch/Reactor.scala b/akka-core/src/main/scala/dispatch/Reactor.scala
index bf8254c64a..627d27aeac 100644
--- a/akka-core/src/main/scala/dispatch/Reactor.scala
+++ b/akka-core/src/main/scala/dispatch/Reactor.scala
@@ -7,16 +7,17 @@ package se.scalablesolutions.akka.dispatch
 import java.util.List
 
 import se.scalablesolutions.akka.util.{HashCode, Logging}
-import se.scalablesolutions.akka.stm.Transaction
 import se.scalablesolutions.akka.actor.Actor
 
 import java.util.concurrent.ConcurrentHashMap
 
+import org.multiverse.commitbarriers.CountDownCommitBarrier
+
 final class MessageInvocation(val receiver: Actor,
                               val message: Any,
                               val future: Option[CompletableFuture],
                               val sender: Option[Actor],
-                              val tx: Option[Transaction]) {
+                              val transactionSet: Option[CountDownCommitBarrier]) {
   if (receiver eq null) throw new IllegalArgumentException("receiver is null")
 
   def invoke = receiver.invoke(this)
@@ -37,13 +38,13 @@ final class MessageInvocation(val receiver: Actor,
     that.asInstanceOf[MessageInvocation].message == message
   }
 
-  override def toString(): String = synchronized {
+  override def toString = synchronized {
     "MessageInvocation[" +
      "\n\tmessage = " + message +
      "\n\treceiver = " + receiver +
      "\n\tsender = " + sender +
      "\n\tfuture = " + future +
-     "\n\ttx = " + tx +
+     "\n\ttransactionSet = " + transactionSet +
      "\n]"
   }
 }
diff --git a/akka-core/src/main/scala/remote/BootableRemoteActorService.scala b/akka-core/src/main/scala/remote/BootableRemoteActorService.scala
index 1c31c3025c..8aaec0661b 100644
--- a/akka-core/src/main/scala/remote/BootableRemoteActorService.scala
+++ b/akka-core/src/main/scala/remote/BootableRemoteActorService.scala
@@ -5,8 +5,8 @@
 package se.scalablesolutions.akka.remote
 
 import se.scalablesolutions.akka.actor.BootableActorLoaderService
-import se.scalablesolutions.akka.util.{Bootable,Logging}
-import se.scalablesolutions.akka.Config.config
+import se.scalablesolutions.akka.util.{Bootable, Logging}
+import se.scalablesolutions.akka.config.Config.config
 
 /**
  * This bundle/service is responsible for booting up and shutting down the remote actors facility
@@ -23,22 +23,19 @@ trait BootableRemoteActorService extends Bootable with Logging {
   def startRemoteService = remoteServerThread.start
   
   abstract override def onLoad   = {
+    super.onLoad //Initialize BootableActorLoaderService before remote service
     if(config.getBool("akka.remote.server.service", true)){
-      log.info("Starting up Cluster Service")
-      Cluster.start
-      super.onLoad //Initialize BootableActorLoaderService before remote service
+      
+      if(config.getBool("akka.remote.cluster.service", true))
+        Cluster.start(self.applicationLoader)
+     
       log.info("Initializing Remote Actors Service...")
       startRemoteService
       log.info("Remote Actors Service initialized!")
     }
-    else
-      super.onLoad
-
   }
 
   abstract override def onUnload = {
-    super.onUnload
-
     log.info("Shutting down Remote Actors Service")
 
     RemoteNode.shutdown
@@ -50,6 +47,8 @@ trait BootableRemoteActorService extends Bootable with Logging {
     Cluster.shutdown
 
     log.info("Remote Actors Service has been shut down")
+
+    super.onUnload
   }
 
-}
\ No newline at end of file
+}
diff --git a/akka-core/src/main/scala/remote/Cluster.scala b/akka-core/src/main/scala/remote/Cluster.scala
index 4313cfe98c..4a1d6012a7 100644
--- a/akka-core/src/main/scala/remote/Cluster.scala
+++ b/akka-core/src/main/scala/remote/Cluster.scala
@@ -4,7 +4,7 @@
 
 package se.scalablesolutions.akka.remote
 
-import se.scalablesolutions.akka.Config.config
+import se.scalablesolutions.akka.config.Config.config
 import se.scalablesolutions.akka.config.ScalaConfig._
 import se.scalablesolutions.akka.serialization.Serializer
 import se.scalablesolutions.akka.actor.{Supervisor, SupervisorFactory, Actor, ActorRegistry}
@@ -17,17 +17,43 @@ import scala.collection.immutable.{Map, HashMap}
  * @author Viktor Klang
  */
 trait Cluster {
+
+  /**
+   * Specifies the cluster name
+   */
   def name: String
 
+  /**
+   * Adds the specified hostname + port as a local node
+   * This information will be propagated to other nodes in the cluster
+   * and will be available at the other nodes through lookup and foreach
+   */
   def registerLocalNode(hostname: String, port: Int): Unit
 
+  /**
+   * Removes the specified hostname + port from the local node
+   * This information will be propagated to other nodes in the cluster
+   * and will no longer be available at the other nodes through lookup and foreach
+   */
   def deregisterLocalNode(hostname: String, port: Int): Unit
 
+  /**
+   * Sends the message to all Actors of the specified type on all other nodes in the cluster
+   */
   def relayMessage(to: Class[_ <: Actor], msg: AnyRef): Unit
 
+  /**
+   * Traverses all known remote addresses avaiable at all other nodes in the cluster
+   * and applies the given PartialFunction on the first address that it's defined at
+   * The order of application is undefined and may vary
+   */
   def lookup[T](pf: PartialFunction[RemoteAddress, T]): Option[T]
-  
-  def foreach(f : (RemoteAddress) => Unit) : Unit
+
+  /**
+   * Applies the specified function to all known remote addresses on al other nodes in the cluster
+   * The order of application is undefined and may vary
+   */
+  def foreach(f: (RemoteAddress) => Unit): Unit
 }
 
 /**
@@ -37,6 +63,10 @@ trait Cluster {
  */
 trait ClusterActor extends Actor with Cluster {
   val name = config.getString("akka.remote.cluster.name") getOrElse "default"
+  
+  @volatile protected var serializer : Serializer = _
+  
+  private[remote] def setSerializer(s : Serializer) : Unit = serializer = s
 }
 
 /**
@@ -44,20 +74,20 @@ trait ClusterActor extends Actor with Cluster {
  *
  * @author Viktor Klang
  */
-private[remote] object ClusterActor {
+private[akka] object ClusterActor {
   sealed trait ClusterMessage
 
-  private[remote] case class RelayedMessage(actorClassFQN: String, msg: AnyRef) extends ClusterMessage
-  private[remote] case class Message[ADDR_T](sender : ADDR_T,msg : Array[Byte])
-  private[remote] case object PapersPlease extends ClusterMessage
-  private[remote] case class Papers(addresses: List[RemoteAddress]) extends ClusterMessage
-  private[remote] case object Block extends ClusterMessage
-  private[remote] case object Unblock extends ClusterMessage
-  private[remote] case class View[ADDR_T](othersPresent : Set[ADDR_T]) extends ClusterMessage
-  private[remote] case class Zombie[ADDR_T](address: ADDR_T) extends ClusterMessage
-  private[remote] case class RegisterLocalNode(server: RemoteAddress) extends ClusterMessage
-  private[remote] case class DeregisterLocalNode(server: RemoteAddress) extends ClusterMessage
-  private[remote] case class Node(endpoints: List[RemoteAddress])
+  private[akka] case class RelayedMessage(actorClassFQN: String, msg: AnyRef) extends ClusterMessage
+  private[akka] case class Message[ADDR_T](sender: ADDR_T, msg: Array[Byte])
+  private[akka] case object PapersPlease extends ClusterMessage
+  private[akka] case class Papers(addresses: List[RemoteAddress]) extends ClusterMessage
+  private[akka] case object Block extends ClusterMessage
+  private[akka] case object Unblock extends ClusterMessage
+  private[akka] case class View[ADDR_T](othersPresent: Set[ADDR_T]) extends ClusterMessage
+  private[akka] case class Zombie[ADDR_T](address: ADDR_T) extends ClusterMessage
+  private[akka] case class RegisterLocalNode(server: RemoteAddress) extends ClusterMessage
+  private[akka] case class DeregisterLocalNode(server: RemoteAddress) extends ClusterMessage
+  private[akka] case class Node(endpoints: List[RemoteAddress])
 }
 
 /**
@@ -67,72 +97,70 @@ private[remote] object ClusterActor {
  */
 abstract class BasicClusterActor extends ClusterActor {
   import ClusterActor._
-
   type ADDR_T
 
-
   @volatile private var local: Node = Node(Nil)
   @volatile private var remotes: Map[ADDR_T, Node] = Map()
 
   override def init = {
-      remotes = new HashMap[ADDR_T, Node]
+    remotes = new HashMap[ADDR_T, Node]
   }
 
   override def shutdown = {
-      remotes = Map()
+    remotes = Map()
   }
 
   def receive = {
-    case v : View[ADDR_T] => {
+    case v: View[ADDR_T] => {
       // Not present in the cluster anymore = presumably zombies
       // Nodes we have no prior knowledge existed = unknowns
       val zombies = Set[ADDR_T]() ++ remotes.keySet -- v.othersPresent
       val unknown = v.othersPresent -- remotes.keySet
 
       log debug ("Updating view")
-      log debug ("Other memebers: [%s]",v.othersPresent)
-      log debug ("Zombies: [%s]",zombies)
-      log debug ("Unknowns: [%s]",unknown)
+      log debug ("Other memebers: [%s]", v.othersPresent)
+      log debug ("Zombies: [%s]", zombies)
+      log debug ("Unknowns: [%s]", unknown)
 
       // Tell the zombies and unknowns to provide papers and prematurely treat the zombies as dead
       broadcast(zombies ++ unknown, PapersPlease)
       remotes = remotes -- zombies
     }
 
-    case z : Zombie[ADDR_T] => { //Ask the presumed zombie for papers and prematurely treat it as dead
+    case z: Zombie[ADDR_T] => { //Ask the presumed zombie for papers and prematurely treat it as dead
       log debug ("Killing Zombie Node: %s", z.address)
       broadcast(z.address :: Nil, PapersPlease)
       remotes = remotes - z.address
     }
 
-    case rm @ RelayedMessage(_, _) => {
+    case rm@RelayedMessage(_, _) => {
       log debug ("Relaying message: %s", rm)
       broadcast(rm)
     }
 
-    case m : Message[ADDR_T] => {
-        val (src,msg) = (m.sender,m.msg)
-        (Cluster.serializer in (msg, None)) match {
+    case m: Message[ADDR_T] => {
+      val (src, msg) = (m.sender, m.msg)
+      (serializer in (msg, None)) match {
 
-          case PapersPlease => {
-            log debug ("Asked for papers by %s", src)
-            broadcast(src :: Nil, Papers(local.endpoints))
+        case PapersPlease => {
+          log debug ("Asked for papers by %s", src)
+          broadcast(src :: Nil, Papers(local.endpoints))
 
-            if (remotes.get(src).isEmpty) // If we were asked for papers from someone we don't know, ask them!
-              broadcast(src :: Nil, PapersPlease)
-          }
-
-          case Papers(x) => remotes = remotes + (src -> Node(x))
-
-          case RelayedMessage(c, m) => ActorRegistry.actorsFor(c).foreach(_ send m)
-
-          case unknown => log debug ("Unknown message: %s", unknown.toString)
+          if (remotes.get(src).isEmpty) // If we were asked for papers from someone we don't know, ask them!
+            broadcast(src :: Nil, PapersPlease)
         }
+
+        case Papers(x) => remotes = remotes + (src -> Node(x))
+
+        case RelayedMessage(c, m) => ActorRegistry.actorsFor(c).foreach(_ send m)
+
+        case unknown => log debug ("Unknown message: %s", unknown.toString)
+      }
     }
 
     case RegisterLocalNode(s) => {
       log debug ("RegisterLocalNode: %s", s)
-      local = Node(local.endpoints + s)
+      local = Node(s :: local.endpoints)
       broadcast(Papers(local.endpoints))
     }
 
@@ -146,20 +174,20 @@ abstract class BasicClusterActor extends ClusterActor {
   /**
    * Implement this in a subclass to add node-to-node messaging
    */
-  protected def toOneNode(dest : ADDR_T, msg : Array[Byte]) : Unit
+  protected def toOneNode(dest: ADDR_T, msg: Array[Byte]): Unit
 
   /**
    *  Implement this in a subclass to add node-to-many-nodes messaging
    */
-  protected def toAllNodes(msg : Array[Byte]) : Unit
+  protected def toAllNodes(msg: Array[Byte]): Unit
 
   /**
    * Sends the specified message to the given recipients using the serializer
    * that's been set in the akka-conf
    */
   protected def broadcast[T <: AnyRef](recipients: Iterable[ADDR_T], msg: T): Unit = {
-    lazy val m = Cluster.serializer out msg
-    for (r <- recipients) toOneNode(r,m)
+    lazy val m = serializer out msg
+    for (r <- recipients) toOneNode(r, m)
   }
 
   /**
@@ -167,18 +195,18 @@ abstract class BasicClusterActor extends ClusterActor {
    * that's been set in the akka-conf
    */
   protected def broadcast[T <: AnyRef](msg: T): Unit =
-    if (!remotes.isEmpty) toAllNodes(Cluster.serializer out msg)
+    if (!remotes.isEmpty) toAllNodes(serializer out msg)
 
   /**
    * Applies the given PartialFunction to all known RemoteAddresses
    */
   def lookup[T](handleRemoteAddress: PartialFunction[RemoteAddress, T]): Option[T] =
     remotes.values.toList.flatMap(_.endpoints).find(handleRemoteAddress isDefinedAt _).map(handleRemoteAddress)
-  
+
   /**
    * Applies the given function to all remote addresses known
    */
-  def foreach(f : (RemoteAddress) => Unit) : Unit = remotes.values.toList.flatMap(_.endpoints).foreach(f)
+  def foreach(f: (RemoteAddress) => Unit): Unit = remotes.values.toList.flatMap(_.endpoints).foreach(f)
 
   /**
    * Registers a local endpoint
@@ -205,28 +233,31 @@ abstract class BasicClusterActor extends ClusterActor {
  * Loads a specified ClusterActor and delegates to that instance.
  */
 object Cluster extends Cluster with Logging {
-  @volatile private[remote] var clusterActor: Option[ClusterActor] = None
-  @volatile private[remote] var supervisor:   Option[Supervisor] = None
-  
-  private[remote] lazy val serializer: Serializer = {
-    val className = config.getString("akka.remote.cluster.serializer", Serializer.Java.getClass.getName)
-    Class.forName(className).newInstance.asInstanceOf[Serializer]
-  }
+  lazy val DEFAULT_SERIALIZER_CLASS_NAME = Serializer.Java.getClass.getName
 
-  private[remote] def createClusterActor : Option[ClusterActor] = {
+  @volatile private[remote] var clusterActor: Option[ClusterActor] = None 
+
+  private[remote] def createClusterActor(loader : ClassLoader): Option[ClusterActor] = {
     val name = config.getString("akka.remote.cluster.actor")
-
+    if (name.isEmpty) throw new IllegalArgumentException(
+      "Can't start cluster since the 'akka.remote.cluster.actor' configuration option is not defined")
+      
+    val serializer = Class.forName(config.getString("akka.remote.cluster.serializer", DEFAULT_SERIALIZER_CLASS_NAME)).newInstance.asInstanceOf[Serializer]
+    serializer.classLoader = Some(loader)
     try {
-      name map { fqn =>
-        Class.forName(fqn).newInstance.asInstanceOf[ClusterActor]
+      name map {
+        fqn =>
+          val a = Class.forName(fqn).newInstance.asInstanceOf[ClusterActor]
+          a setSerializer serializer
+          a
       }
     }
     catch {
-      case e => log.error(e,"Couldn't load Cluster provider: [%s]",name.getOrElse("Not specified")); None
+      case e => log.error(e, "Couldn't load Cluster provider: [%s]", name.getOrElse("Not specified")); None
     }
   }
 
-  private[remote] def createSupervisor(actor : ClusterActor) : Option[Supervisor] = {
+  private[akka] def createSupervisor(actor: ClusterActor): Option[Supervisor] = {
     val sup = SupervisorFactory(
       SupervisorConfig(
         RestartStrategy(OneForOne, 5, 1000, List(classOf[Exception])),
@@ -245,23 +276,28 @@ object Cluster extends Cluster with Logging {
   def deregisterLocalNode(hostname: String, port: Int): Unit = clusterActor.foreach(_.deregisterLocalNode(hostname, port))
 
   def relayMessage(to: Class[_ <: Actor], msg: AnyRef): Unit = clusterActor.foreach(_.relayMessage(to, msg))
-  
-  def foreach(f : (RemoteAddress) => Unit) : Unit = clusterActor.foreach(_.foreach(f))
 
-  def start : Unit = synchronized {
-    if(supervisor.isEmpty) {
-      for(actor <- createClusterActor;
-          sup   <- createSupervisor(actor)) {
-          clusterActor = Some(actor)
-          supervisor   = Some(sup)
-          sup.start
+  def foreach(f: (RemoteAddress) => Unit): Unit = clusterActor.foreach(_.foreach(f))
+
+  def start: Unit = start(None)
+
+  def start(serializerClassLoader : Option[ClassLoader]): Unit = synchronized {
+    log.info("Starting up Cluster Service...")
+    if (clusterActor.isEmpty) {
+      for{ actor <- createClusterActor(serializerClassLoader getOrElse getClass.getClassLoader)
+             sup <- createSupervisor(actor) } {
+        clusterActor = Some(actor)
+        sup.start
       }
     }
   }
 
-  def shutdown : Unit = synchronized {
-    supervisor.foreach(_.stop)
-    supervisor = None
+  def shutdown: Unit = synchronized {
+    log.info("Shutting down Cluster Service...")
+    for{
+      c <- clusterActor
+      s <- c._supervisor
+    } s.stop
     clusterActor = None
   }
 }
diff --git a/akka-core/src/main/scala/remote/RemoteClient.scala b/akka-core/src/main/scala/remote/RemoteClient.scala
index 0887ebcd82..ec3d837c01 100644
--- a/akka-core/src/main/scala/remote/RemoteClient.scala
+++ b/akka-core/src/main/scala/remote/RemoteClient.scala
@@ -8,7 +8,7 @@ import se.scalablesolutions.akka.remote.protobuf.RemoteProtocol.{RemoteRequest,
 import se.scalablesolutions.akka.actor.{Exit, Actor}
 import se.scalablesolutions.akka.dispatch.{DefaultCompletableFuture, CompletableFuture}
 import se.scalablesolutions.akka.util.{UUID, Logging}
-import se.scalablesolutions.akka.Config.config
+import se.scalablesolutions.akka.config.Config.config
 
 import org.jboss.netty.channel._
 import group.DefaultChannelGroup
diff --git a/akka-core/src/main/scala/remote/RemoteProtocolBuilder.scala b/akka-core/src/main/scala/remote/RemoteProtocolBuilder.scala
index 287168140a..bfeec1c34e 100644
--- a/akka-core/src/main/scala/remote/RemoteProtocolBuilder.scala
+++ b/akka-core/src/main/scala/remote/RemoteProtocolBuilder.scala
@@ -18,19 +18,17 @@ object RemoteProtocolBuilder {
   private var SERIALIZER_PROTOBUF: Serializer.Protobuf = Serializer.Protobuf
 
 
-  def setClassLoader(classLoader: ClassLoader) = {
-    SERIALIZER_JAVA = new Serializer.Java
-    SERIALIZER_JAVA_JSON = new Serializer.JavaJSON
-    SERIALIZER_SCALA_JSON = new Serializer.ScalaJSON
-    SERIALIZER_JAVA.setClassLoader(classLoader)
-    SERIALIZER_JAVA_JSON.setClassLoader(classLoader)
-    SERIALIZER_SCALA_JSON.setClassLoader(classLoader)
+  def setClassLoader(cl: ClassLoader) = {
+    SERIALIZER_JAVA.classLoader = Some(cl)
+    SERIALIZER_JAVA_JSON.classLoader = Some(cl)
+    SERIALIZER_SCALA_JSON.classLoader = Some(cl)
   }
   
   def getMessage(request: RemoteRequest): Any = {
     request.getProtocol match {
       case SerializationProtocol.SBINARY =>
-        val renderer = Class.forName(new String(request.getMessageManifest.toByteArray)).newInstance.asInstanceOf[SBinary[_ <: AnyRef]]
+        val renderer = Class.forName(
+          new String(request.getMessageManifest.toByteArray)).newInstance.asInstanceOf[SBinary[_ <: AnyRef]]
         renderer.fromBytes(request.getMessage.toByteArray)
       case SerializationProtocol.SCALA_JSON =>
         val manifest = SERIALIZER_JAVA.in(request.getMessageManifest.toByteArray, None).asInstanceOf[String]
diff --git a/akka-core/src/main/scala/remote/RemoteServer.scala b/akka-core/src/main/scala/remote/RemoteServer.scala
index 02cf98bcd2..8a40049fea 100644
--- a/akka-core/src/main/scala/remote/RemoteServer.scala
+++ b/akka-core/src/main/scala/remote/RemoteServer.scala
@@ -12,7 +12,7 @@ import java.util.{Map => JMap}
 import se.scalablesolutions.akka.actor._
 import se.scalablesolutions.akka.util._
 import se.scalablesolutions.akka.remote.protobuf.RemoteProtocol.{RemoteReply, RemoteRequest}
-import se.scalablesolutions.akka.Config.config
+import se.scalablesolutions.akka.config.Config.config
 
 import org.jboss.netty.bootstrap.ServerBootstrap
 import org.jboss.netty.channel._
@@ -58,7 +58,7 @@ object RemoteNode extends RemoteServer
  */
 object RemoteServer {
   val HOSTNAME = config.getString("akka.remote.server.hostname", "localhost")
-  val PORT = config.getInt("akka.remote.server.port", 9966)
+  val PORT = config.getInt("akka.remote.server.port", 9999)
 
   val CONNECTION_TIMEOUT_MILLIS = config.getInt("akka.remote.server.connection-timeout", 1000)
 
diff --git a/akka-core/src/main/scala/serialization/Serializable.scala b/akka-core/src/main/scala/serialization/Serializable.scala
index b9a3cf5927..b5998cfb2e 100644
--- a/akka-core/src/main/scala/serialization/Serializable.scala
+++ b/akka-core/src/main/scala/serialization/Serializable.scala
@@ -5,10 +5,15 @@
 package se.scalablesolutions.akka.serialization
 
 import org.codehaus.jackson.map.ObjectMapper
+
 import com.google.protobuf.Message
-import reflect.Manifest
+
+import scala.reflect.Manifest
+
 import sbinary.DefaultProtocol
+
 import java.io.{StringWriter, ByteArrayOutputStream, ObjectOutputStream}
+
 import sjson.json.{Serializer=>SJSONSerializer}
 
 object SerializationProtocol {
diff --git a/akka-core/src/main/scala/serialization/Serializer.scala b/akka-core/src/main/scala/serialization/Serializer.scala
index 3eb9315126..c878548711 100644
--- a/akka-core/src/main/scala/serialization/Serializer.scala
+++ b/akka-core/src/main/scala/serialization/Serializer.scala
@@ -18,8 +18,12 @@ import sjson.json.{Serializer => SJSONSerializer}
  * @author Jonas Bonér
  */
 trait Serializer {
+  var classLoader: Option[ClassLoader] = None
+
   def deepClone(obj: AnyRef): AnyRef
+
   def out(obj: AnyRef): Array[Byte]
+
   def in(bytes: Array[Byte], clazz: Option[Class[_]]): AnyRef
 }
 
@@ -51,11 +55,7 @@ object Serializer {
    * @author Jonas Bonér
    */
   object Java extends Java
-  class Java extends Serializer {
-    private var classLoader: Option[ClassLoader] = None
-
-    def setClassLoader(cl: ClassLoader) = classLoader = Some(cl)
-
+  trait Java extends Serializer {
     def deepClone(obj: AnyRef): AnyRef = in(out(obj), None)
 
     def out(obj: AnyRef): Array[Byte] = {
@@ -67,8 +67,9 @@ object Serializer {
     }
 
     def in(bytes: Array[Byte], clazz: Option[Class[_]]): AnyRef = {
-      val in = if (classLoader.isDefined) new ClassLoaderObjectInputStream(classLoader.get, new ByteArrayInputStream(bytes))
-               else new ObjectInputStream(new ByteArrayInputStream(bytes))
+      val in = 
+        if (classLoader.isDefined) new ClassLoaderObjectInputStream(classLoader.get, new ByteArrayInputStream(bytes))
+        else new ObjectInputStream(new ByteArrayInputStream(bytes))
       val obj = in.readObject
       in.close
       obj
@@ -79,18 +80,21 @@ object Serializer {
    * @author Jonas Bonér
    */
   object Protobuf extends Protobuf
-  class Protobuf extends Serializer {
+  trait Protobuf extends Serializer {
     def deepClone(obj: AnyRef): AnyRef = in(out(obj), Some(obj.getClass))
 
     def out(obj: AnyRef): Array[Byte] = {
-      if (!obj.isInstanceOf[Message]) throw new IllegalArgumentException("Can't serialize a non-protobuf message using protobuf [" + obj + "]")
+      if (!obj.isInstanceOf[Message]) throw new IllegalArgumentException(
+        "Can't serialize a non-protobuf message using protobuf [" + obj + "]")
       obj.asInstanceOf[Message].toByteArray
     }
     
     def in(bytes: Array[Byte], clazz: Option[Class[_]]): AnyRef = {
-      if (!clazz.isDefined) throw new IllegalArgumentException("Need a protobuf message class to be able to serialize bytes using protobuf") 
+      if (!clazz.isDefined) throw new IllegalArgumentException(
+        "Need a protobuf message class to be able to serialize bytes using protobuf") 
       // TODO: should we cache this method lookup?
-      val message = clazz.get.getDeclaredMethod("getDefaultInstance", EMPTY_CLASS_ARRAY: _*).invoke(null, EMPTY_ANY_REF_ARRAY: _*).asInstanceOf[Message]
+      val message = clazz.get.getDeclaredMethod(
+        "getDefaultInstance", EMPTY_CLASS_ARRAY: _*).invoke(null, EMPTY_ANY_REF_ARRAY: _*).asInstanceOf[Message]
       message.toBuilder().mergeFrom(bytes).build                                                                                  
     }
 
@@ -104,13 +108,9 @@ object Serializer {
    * @author Jonas Bonér
    */
   object JavaJSON extends JavaJSON
-  class JavaJSON extends Serializer {
+  trait JavaJSON extends Serializer {
     private val mapper = new ObjectMapper
 
-    private var classLoader: Option[ClassLoader] = None
-
-    def setClassLoader(cl: ClassLoader) = classLoader = Some(cl)
-
     def deepClone(obj: AnyRef): AnyRef = in(out(obj), Some(obj.getClass))
 
     def out(obj: AnyRef): Array[Byte] = {
@@ -122,9 +122,11 @@ object Serializer {
     }
 
     def in(bytes: Array[Byte], clazz: Option[Class[_]]): AnyRef = {
-      if (!clazz.isDefined) throw new IllegalArgumentException("Can't deserialize JSON to instance if no class is provided")
-      val in = if (classLoader.isDefined) new ClassLoaderObjectInputStream(classLoader.get, new ByteArrayInputStream(bytes))
-               else new ObjectInputStream(new ByteArrayInputStream(bytes))
+      if (!clazz.isDefined) throw new IllegalArgumentException(
+        "Can't deserialize JSON to instance if no class is provided")
+      val in = 
+        if (classLoader.isDefined) new ClassLoaderObjectInputStream(classLoader.get, new ByteArrayInputStream(bytes))
+        else new ObjectInputStream(new ByteArrayInputStream(bytes))
       val obj = mapper.readValue(in, clazz.get).asInstanceOf[AnyRef]
       in.close
       obj
@@ -140,13 +142,9 @@ object Serializer {
    * @author Jonas Bonér
    */
   object ScalaJSON extends ScalaJSON
-  class ScalaJSON extends Serializer {
+  trait ScalaJSON extends Serializer {
     def deepClone(obj: AnyRef): AnyRef = in(out(obj), None)
 
-    private var classLoader: Option[ClassLoader] = None
-
-    def setClassLoader(cl: ClassLoader) = classLoader = Some(cl)
-
     def out(obj: AnyRef): Array[Byte] = SJSONSerializer.SJSON.out(obj)
 
     // FIXME set ClassLoader on SJSONSerializer.SJSON
@@ -166,7 +164,7 @@ object Serializer {
    * @author Jonas Bonér
    */
   object SBinary extends SBinary
-  class SBinary {
+  trait SBinary {
     import sbinary.DefaultProtocol._
     
     def deepClone[T <: AnyRef](obj: T)(implicit w : Writes[T], r : Reads[T]): T = in[T](out[T](obj), None)
diff --git a/akka-core/src/main/scala/stm/DataFlowVariable.scala b/akka-core/src/main/scala/stm/DataFlowVariable.scala
index daed4ec55f..cb1b828db1 100644
--- a/akka-core/src/main/scala/stm/DataFlowVariable.scala
+++ b/akka-core/src/main/scala/stm/DataFlowVariable.scala
@@ -2,7 +2,7 @@
  * Copyright (C) 2009-2010 Scalable Solutions AB 
  */
 
-package se.scalablesolutions.akka.state
+package se.scalablesolutions.akka.stm
 
 import java.util.concurrent.atomic.AtomicReference
 import java.util.concurrent.{ConcurrentLinkedQueue, LinkedBlockingQueue}
diff --git a/akka-core/src/main/scala/stm/HashTrie.scala b/akka-core/src/main/scala/stm/HashTrie.scala
index 02b7ad2145..fcb35baff3 100644
--- a/akka-core/src/main/scala/stm/HashTrie.scala
+++ b/akka-core/src/main/scala/stm/HashTrie.scala
@@ -32,7 +32,7 @@
  POSSIBILITY OF SUCH DAMAGE.
  **/
 
-package se.scalablesolutions.akka.collection
+package se.scalablesolutions.akka.stm
 
 trait PersistentDataStructure
 
@@ -77,7 +77,7 @@ object HashTrie {
 // nodes
 
 @serializable
-private[collection] sealed trait Node[K, +V] {
+private[stm] sealed trait Node[K, +V] {
   val size: Int
   
   def apply(key: K, hash: Int): Option[V]
@@ -90,7 +90,7 @@ private[collection] sealed trait Node[K, +V] {
 }
 
 @serializable
-private[collection] class EmptyNode[K] extends Node[K, Nothing] {
+private[stm] class EmptyNode[K] extends Node[K, Nothing] {
   val size = 0
   
   def apply(key: K, hash: Int) = None
@@ -106,12 +106,12 @@ private[collection] class EmptyNode[K] extends Node[K, Nothing] {
   }
 }
 
-private[collection] abstract class SingleNode[K, +V] extends Node[K, V] {
+private[stm] abstract class SingleNode[K, +V] extends Node[K, V] {
   val hash: Int
 }
 
 
-private[collection] class LeafNode[K, +V](key: K, val hash: Int, value: V) extends SingleNode[K, V] {
+private[stm] class LeafNode[K, +V](key: K, val hash: Int, value: V) extends SingleNode[K, V] {
   val size = 1
   
   def apply(key: K, hash: Int) = if (this.key == key) Some(value) else None
@@ -141,7 +141,7 @@ private[collection] class LeafNode[K, +V](key: K, val hash: Int, value: V) exten
 }
 
 
-private[collection] class CollisionNode[K, +V](val hash: Int, bucket: List[(K, V)]) extends SingleNode[K, V] {
+private[stm] class CollisionNode[K, +V](val hash: Int, bucket: List[(K, V)]) extends SingleNode[K, V] {
   lazy val size = bucket.length
   
   def this(hash: Int, pairs: (K, V)*) = this(hash, pairs.toList)
@@ -185,7 +185,7 @@ private[collection] class CollisionNode[K, +V](val hash: Int, bucket: List[(K, V
   override def toString = "CollisionNode(" + bucket.toString + ")"
 }
 
-private[collection] class BitmappedNode[K, +V](shift: Int)(table: Array[Node[K, V]], bits: Int) extends Node[K, V] {
+private[stm] class BitmappedNode[K, +V](shift: Int)(table: Array[Node[K, V]], bits: Int) extends Node[K, V] {
   lazy val size = {
     val sizes = for {
       n <- table
@@ -284,7 +284,7 @@ private[collection] class BitmappedNode[K, +V](shift: Int)(table: Array[Node[K,
 }
 
 
-private[collection] object BitmappedNode {
+private[stm] object BitmappedNode {
   def apply[K, V](shift: Int)(node: SingleNode[K, V], key: K, hash: Int, value: V) = {
     val table = new Array[Node[K, V]](Math.max((hash >>> shift) & 0x01f, (node.hash >>> shift) & 0x01f) + 1)
     
@@ -312,7 +312,7 @@ private[collection] object BitmappedNode {
 }
 
 
-private[collection] class FullNode[K, +V](shift: Int)(table: Array[Node[K, V]]) extends Node[K, V] {
+private[stm] class FullNode[K, +V](shift: Int)(table: Array[Node[K, V]]) extends Node[K, V] {
   lazy val size = table.foldLeft(0) { _ + _.size }
   
   def apply(key: K, hash: Int) = table((hash >>> shift) & 0x01f)(key, hash)
diff --git a/akka-core/src/main/scala/stm/ResultOrFailure.scala b/akka-core/src/main/scala/stm/ResultOrFailure.scala
index 51ce6ddf68..ced5572104 100644
--- a/akka-core/src/main/scala/stm/ResultOrFailure.scala
+++ b/akka-core/src/main/scala/stm/ResultOrFailure.scala
@@ -2,9 +2,7 @@
  * Copyright (C) 2009-2010 Scalable Solutions AB 
  */
 
-package se.scalablesolutions.akka.util
-
-import stm.Transaction
+package se.scalablesolutions.akka.stm
 
 /**
  * Reference that can hold either a typed value or an exception.
diff --git a/akka-core/src/main/scala/stm/Transaction.scala b/akka-core/src/main/scala/stm/Transaction.scala
index 1637b4c906..a7184e969d 100644
--- a/akka-core/src/main/scala/stm/Transaction.scala
+++ b/akka-core/src/main/scala/stm/Transaction.scala
@@ -6,16 +6,18 @@ package se.scalablesolutions.akka.stm
 
 import java.util.concurrent.atomic.AtomicLong
 import java.util.concurrent.atomic.AtomicInteger
+import java.util.concurrent.TimeUnit
+
+import scala.collection.mutable.HashMap
 
-import se.scalablesolutions.akka.state.Committable
 import se.scalablesolutions.akka.util.Logging
 
 import org.multiverse.api.{Transaction => MultiverseTransaction}
 import org.multiverse.api.GlobalStmInstance.getGlobalStmInstance
 import org.multiverse.api.ThreadLocalTransaction._
-import org.multiverse.templates.OrElseTemplate
-
-import scala.collection.mutable.HashMap
+import org.multiverse.templates.{TransactionTemplate, OrElseTemplate}
+import org.multiverse.utils.backoff.ExponentialBackoffPolicy
+import org.multiverse.stms.alpha.AlphaStm
 
 class NoTransactionInScopeException extends RuntimeException
 class TransactionRetryException(message: String) extends RuntimeException(message)
@@ -30,8 +32,8 @@ class TransactionRetryException(message: String) extends RuntimeException(messag
  * Here are some examples (assuming implicit transaction family name in scope): 
  * 
  * import se.scalablesolutions.akka.stm.Transaction._
- * 
- * atomic {
+ *
+ * atomic  {
  *   .. // do something within a transaction
  * }
  * 
@@ -39,8 +41,8 @@ class TransactionRetryException(message: String) extends RuntimeException(messag * Example of atomic transaction management using atomic block with retry count: *
  * import se.scalablesolutions.akka.stm.Transaction._
- * 
- * atomic(maxNrOfRetries) {
+ *
+ * atomic(maxNrOfRetries)  {
  *   .. // do something within a transaction
  * }
  * 
@@ -49,10 +51,10 @@ class TransactionRetryException(message: String) extends RuntimeException(messag * Which is a good way to reduce contention and transaction collisions. *
  * import se.scalablesolutions.akka.stm.Transaction._
- * 
- * atomically {
+ *
+ * atomically  {
  *   .. // try to do something
- * } orElse {
+ * } orElse  {
  *   .. // if transaction clashes try do do something else to minimize contention
  * }
  * 
@@ -61,11 +63,11 @@ class TransactionRetryException(message: String) extends RuntimeException(messag * *
  * import se.scalablesolutions.akka.stm.Transaction._
- * for (tx <- Transaction) {
+ * for (tx <- Transaction)  {
  *   ... // do transactional stuff
  * }
  *
- * val result = for (tx <- Transaction) yield {
+ * val result = for (tx <- Transaction) yield  {
  *   ... // do transactional stuff yielding a result
  * }
  * 
@@ -78,17 +80,17 @@ class TransactionRetryException(message: String) extends RuntimeException(messag * * // You can use them together with Transaction in a for comprehension since * // TransactionalRef is also monadic - * for { + * for { * tx <- Transaction * ref <- refs * } { * ... // use the ref inside a transaction * } * - * val result = for { + * val result = for { * tx <- Transaction * ref <- refs - * } yield { + * } yield { * ... // use the ref inside a transaction, yield a result * } *
@@ -97,101 +99,87 @@ class TransactionRetryException(message: String) extends RuntimeException(messag */ object Transaction extends TransactionManagement { val idFactory = new AtomicLong(-1L) +/* + import AlphaStm._ + private val defaultTxBuilder = new AlphaTransactionFactoryBuilder + defaultTxBuilder.setReadonly(false) + defaultTxBuilder.setInterruptible(INTERRUPTIBLE) + defaultTxBuilder.setMaxRetryCount(MAX_NR_OF_RETRIES) + defaultTxBuilder.setPreventWriteSkew(PREVENT_WRITE_SKEW) + defaultTxBuilder.setAutomaticReadTracking(AUTOMATIC_READ_TRACKING) + defaultTxBuilder.setSmartTxLengthSelector(SMART_TX_LENGTH_SELECTOR) + defaultTxBuilder.setBackoffPolicy(new ExponentialBackoffPolicy) + private val readOnlyTxBuilder = new AlphaStm.AlphaTransactionFactoryBuilder + readOnlyTxBuilder.setReadonly(true) + readOnlyTxBuilder.setInterruptible(INTERRUPTIBLE) + readOnlyTxBuilder.setMaxRetryCount(MAX_NR_OF_RETRIES) + readOnlyTxBuilder.setPreventWriteSkew(PREVENT_WRITE_SKEW) + readOnlyTxBuilder.setAutomaticReadTracking(AUTOMATIC_READ_TRACKING) + readOnlyTxBuilder.setSmartTxLengthSelector(SMART_TX_LENGTH_SELECTOR) + readOnlyTxBuilder.setBackoffPolicy(new ExponentialBackoffPolicy) + */ + /** + * See ScalaDoc on class. + */ + def map[T](f: => T)(implicit transactionFamilyName: String): T = + atomic {f} /** * See ScalaDoc on class. */ - def map[T](f: Transaction => T)(implicit transactionFamilyName: String): T = atomic { f(getTransactionInScope) } + def flatMap[T](f: => T)(implicit transactionFamilyName: String): T = + atomic {f} /** * See ScalaDoc on class. */ - def flatMap[T](f: Transaction => T)(implicit transactionFamilyName: String): T = atomic { f(getTransactionInScope) } - - /** - * See ScalaDoc on class. - */ - def foreach(f: Transaction => Unit)(implicit transactionFamilyName: String): Unit = atomic { f(getTransactionInScope) } + def foreach(f: => Unit)(implicit transactionFamilyName: String): Unit = + atomic {f} /** * Creates a "pure" STM atomic transaction and by-passes all transactions hooks * such as persistence etc. * Only for internal usage. */ - private[akka] def pureAtomic[T](body: => T): T = new AtomicTemplate[T]( - getGlobalStmInstance, "internal", false, false, TransactionManagement.MAX_NR_OF_RETRIES) { + private[akka] def pureAtomic[T](body: => T): T = new TransactionTemplate[T]() { def execute(mtx: MultiverseTransaction): T = body }.execute() /** * See ScalaDoc on class. */ - def atomic[T](body: => T)(implicit transactionFamilyName: String): T = new AtomicTemplate[T]( - getGlobalStmInstance, transactionFamilyName, false, false, TransactionManagement.MAX_NR_OF_RETRIES) { - def execute(mtx: MultiverseTransaction): T = body - override def postStart(mtx: MultiverseTransaction) = { - val tx = new Transaction - tx.transaction = Some(mtx) - setTransaction(Some(tx)) - } - override def postCommit = { - if (isTransactionInScope) getTransactionInScope.commit - else throw new IllegalStateException("No transaction in scope") - } - }.execute() + def atomic[T](body: => T)(implicit transactionFamilyName: String): T = { + // defaultTxBuilder.setFamilyName(transactionFamilyName) + // new TransactionTemplate[T](defaultTxBuilder.build) { + new TransactionTemplate[T]() { // FIXME take factory + def execute(mtx: MultiverseTransaction): T = { + val result = body - /** - * See ScalaDoc on class. - */ - def atomic[T](retryCount: Int)(body: => T)(implicit transactionFamilyName: String): T = { - new AtomicTemplate[T](getGlobalStmInstance, transactionFamilyName, false, false, retryCount) { - def execute(mtx: MultiverseTransaction): T = body - override def postStart(mtx: MultiverseTransaction) = { + log.trace("Committing transaction [%s] \nwith family name [%s] \nby joining transaction set") + getTransactionSetInScope.joinCommit(mtx) + + // FIXME tryJoinCommit(mtx, TransactionManagement.TRANSACTION_TIMEOUT, TimeUnit.MILLISECONDS) + //getTransactionSetInScope.tryJoinCommit(mtx, TransactionManagement.TRANSACTION_TIMEOUT, TimeUnit.MILLISECONDS) + + clearTransaction + result + } + + override def onStart(mtx: MultiverseTransaction) = { + val txSet = if (!isTransactionSetInScope) createNewTransactionSet + else getTransactionSetInScope val tx = new Transaction tx.transaction = Some(mtx) setTransaction(Some(tx)) - } - override def postCommit = { - if (isTransactionInScope) getTransactionInScope.commit - else throw new IllegalStateException("No transaction in scope") - } - }.execute - } - /** - * See ScalaDoc on class. - */ - def atomicReadOnly[T](retryCount: Int)(body: => T)(implicit transactionFamilyName: String): T = { - new AtomicTemplate[T](getGlobalStmInstance, transactionFamilyName, false, true, retryCount) { - def execute(mtx: MultiverseTransaction): T = body - override def postStart(mtx: MultiverseTransaction) = { - val tx = new Transaction - tx.transaction = Some(mtx) - setTransaction(Some(tx)) + txSet.registerOnCommitTask(new Runnable() { + def run = tx.commit + }) + txSet.registerOnAbortTask(new Runnable() { + def run = tx.abort + }) } - override def postCommit = { - if (isTransactionInScope) getTransactionInScope.commit - else throw new IllegalStateException("No transaction in scope") - } - }.execute - } - - /** - * See ScalaDoc on class. - */ - def atomicReadOnly[T](body: => T): T = { - new AtomicTemplate[T](true) { - def execute(mtx: MultiverseTransaction): T = body - override def postStart(mtx: MultiverseTransaction) = { - val tx = new Transaction - tx.transaction = Some(mtx) - setTransaction(Some(tx)) - } - override def postCommit = { - if (isTransactionInScope) getTransactionInScope.commit - else throw new IllegalStateException("No transaction in scope") - } - }.execute + }.execute() } /** @@ -216,23 +204,28 @@ object Transaction extends TransactionManagement { */ @serializable class Transaction extends Logging { import Transaction._ - + + log.trace("Creating %s", toString) val id = Transaction.idFactory.incrementAndGet @volatile private[this] var status: TransactionStatus = TransactionStatus.New private[akka] var transaction: Option[MultiverseTransaction] = None private[this] val persistentStateMap = new HashMap[String, Committable] private[akka] val depth = new AtomicInteger(0) - + // --- public methods --------- def commit = synchronized { + log.trace("Committing transaction %s", toString) pureAtomic { persistentStateMap.values.foreach(_.commit) - TransactionManagement.clearTransaction } status = TransactionStatus.Completed } + def abort = synchronized { + log.trace("Aborting transaction %s", toString) + } + def isNew = synchronized { status == TransactionStatus.New } def isActive = synchronized { status == TransactionStatus.Active } @@ -259,13 +252,13 @@ object Transaction extends TransactionManagement { private def ensureIsActiveOrAborted = if (!(status == TransactionStatus.Active || status == TransactionStatus.Aborted)) - throw new IllegalStateException( - "Expected ACTIVE or ABORTED transaction - current status [" + status + "]: " + toString) + throw new IllegalStateException( + "Expected ACTIVE or ABORTED transaction - current status [" + status + "]: " + toString) private def ensureIsActiveOrNew = if (!(status == TransactionStatus.Active || status == TransactionStatus.New)) - throw new IllegalStateException( - "Expected ACTIVE or NEW transaction - current status [" + status + "]: " + toString) + throw new IllegalStateException( + "Expected ACTIVE or NEW transaction - current status [" + status + "]: " + toString) // For reinitialize transaction after sending it over the wire private[akka] def reinit = synchronized { @@ -277,14 +270,14 @@ object Transaction extends TransactionManagement { } override def equals(that: Any): Boolean = synchronized { - that != null && - that.isInstanceOf[Transaction] && - that.asInstanceOf[Transaction].id == this.id + that != null && + that.isInstanceOf[Transaction] && + that.asInstanceOf[Transaction].id == this.id } - + override def hashCode(): Int = synchronized { id.toInt } - - override def toString(): String = synchronized { "Transaction[" + id + ", " + status + "]" } + + override def toString = synchronized { "Transaction[" + id + ", " + status + "]" } } /** diff --git a/akka-core/src/main/scala/stm/TransactionManagement.scala b/akka-core/src/main/scala/stm/TransactionManagement.scala index 2dd7ed9c79..96742b9363 100644 --- a/akka-core/src/main/scala/stm/TransactionManagement.scala +++ b/akka-core/src/main/scala/stm/TransactionManagement.scala @@ -9,51 +9,80 @@ import java.util.concurrent.atomic.AtomicBoolean import se.scalablesolutions.akka.util.Logging import org.multiverse.api.ThreadLocalTransaction._ +import org.multiverse.commitbarriers.CountDownCommitBarrier class StmException(msg: String) extends RuntimeException(msg) -class TransactionAwareWrapperException( - val cause: Throwable, val tx: Option[Transaction]) extends RuntimeException(cause) { - override def toString(): String = "TransactionAwareWrapperException[" + cause + ", " + tx + "]" +class TransactionAwareWrapperException(val cause: Throwable, val tx: Option[Transaction]) extends RuntimeException(cause) { + override def toString = "TransactionAwareWrapperException[" + cause + ", " + tx + "]" } object TransactionManagement extends TransactionManagement { - import se.scalablesolutions.akka.Config._ - - val MAX_NR_OF_RETRIES = config.getInt("akka.stm.max-nr-of-retries", 100) - val TRANSACTION_ENABLED = new AtomicBoolean(config.getBool("akka.stm.service", false)) + import se.scalablesolutions.akka.config.Config._ + val TRANSACTION_ENABLED = new AtomicBoolean(config.getBool("akka.stm.service", false)) + val FAIR_TRANSACTIONS = config.getBool("akka.stm.fair", true) + val INTERRUPTIBLE = config.getBool("akka.stm.interruptible", true) + val MAX_NR_OF_RETRIES = config.getInt("akka.stm.max-nr-of-retries", 1000) + val TRANSACTION_TIMEOUT = config.getInt("akka.stm.timeout", 10000) + val SMART_TX_LENGTH_SELECTOR = config.getBool("akka.stm.smart-tx-length-selector", true) def isTransactionalityEnabled = TRANSACTION_ENABLED.get + def disableTransactions = TRANSACTION_ENABLED.set(false) - private[akka] val currentTransaction: ThreadLocal[Option[Transaction]] = new ThreadLocal[Option[Transaction]]() { + private[akka] val transactionSet = new ThreadLocal[Option[CountDownCommitBarrier]]() { + override protected def initialValue: Option[CountDownCommitBarrier] = None + } + + private[akka] val transaction = new ThreadLocal[Option[Transaction]]() { override protected def initialValue: Option[Transaction] = None } + + private[akka] def getTransactionSet: CountDownCommitBarrier = { + val option = transactionSet.get + if ((option eq null) || option.isEmpty) throw new IllegalStateException("No TransactionSet in scope") + option.get + } + + private[akka] def getTransaction: Transaction = { + val option = transaction.get + if ((option eq null) || option.isEmpty) throw new IllegalStateException("No Transaction in scope") + option.get + } } trait TransactionManagement extends Logging { - import TransactionManagement.currentTransaction - private[akka] def createNewTransaction = currentTransaction.set(Some(new Transaction)) - - private[akka] def setTransaction(transaction: Option[Transaction]) = if (transaction.isDefined) { - val tx = transaction.get - currentTransaction.set(transaction) - if (tx.transaction.isDefined) setThreadLocalTransaction(tx.transaction.get) - else throw new IllegalStateException("No transaction defined") + private[akka] def createNewTransactionSet: CountDownCommitBarrier = { + val txSet = new CountDownCommitBarrier(1, TransactionManagement.FAIR_TRANSACTIONS) + TransactionManagement.transactionSet.set(Some(txSet)) + txSet } + private[akka] def setTransactionSet(txSet: Option[CountDownCommitBarrier]) = + if (txSet.isDefined) TransactionManagement.transactionSet.set(txSet) + + private[akka] def setTransaction(tx: Option[Transaction]) = + if (tx.isDefined) TransactionManagement.transaction.set(tx) + + private[akka] def clearTransactionSet = TransactionManagement.transactionSet.set(None) + private[akka] def clearTransaction = { - currentTransaction.set(None) + TransactionManagement.transaction.set(None) setThreadLocalTransaction(null) } - private[akka] def getTransactionInScope = currentTransaction.get.get - - private[akka] def isTransactionInScope = currentTransaction.get.isDefined + private[akka] def getTransactionSetInScope = TransactionManagement.getTransactionSet - private[akka] def incrementTransaction = if (isTransactionInScope) getTransactionInScope.increment + private[akka] def getTransactionInScope = TransactionManagement.getTransaction - private[akka] def decrementTransaction = if (isTransactionInScope) getTransactionInScope.decrement -} + private[akka] def isTransactionSetInScope = { + val option = TransactionManagement.transactionSet.get + (option ne null) && option.isDefined + } + private[akka] def isTransactionInScope = { + val option = TransactionManagement.transaction.get + (option ne null) && option.isDefined + } +} \ No newline at end of file diff --git a/akka-core/src/main/scala/stm/TransactionalState.scala b/akka-core/src/main/scala/stm/TransactionalState.scala index 6003a89f89..4f35f1199e 100644 --- a/akka-core/src/main/scala/stm/TransactionalState.scala +++ b/akka-core/src/main/scala/stm/TransactionalState.scala @@ -2,14 +2,12 @@ * Copyright (C) 2009-2010 Scalable Solutions AB */ -package se.scalablesolutions.akka.state +package se.scalablesolutions.akka.stm import se.scalablesolutions.akka.stm.Transaction.atomic -import se.scalablesolutions.akka.stm.NoTransactionInScopeException -import se.scalablesolutions.akka.collection._ import se.scalablesolutions.akka.util.UUID -import org.multiverse.datastructures.refs.manual.Ref; +import org.multiverse.stms.alpha.AlphaRef /** * Example Scala usage: @@ -55,6 +53,17 @@ trait Committable { } /** + * Alias to TransactionalRef. + * + * @author Jonas Bonér + */ +object Ref { + def apply[T]() = new Ref[T] +} + +/** + * Alias to Ref. + * * @author Jonas Bonér */ object TransactionalRef { @@ -67,8 +76,17 @@ object TransactionalRef { def apply[T]() = new TransactionalRef[T] } +/** + * Implements a transactional managed reference. + * Alias to TransactionalRef. + * + * @author Jonas Bonér + */ +class Ref[T] extends TransactionalRef[T] + /** * Implements a transactional managed reference. + * Alias to Ref. * * @author Jonas Bonér */ @@ -78,7 +96,7 @@ class TransactionalRef[T] extends Transactional { implicit val txInitName = "TransactionalRef:Init" val uuid = UUID.newUuid.toString - private[this] val ref: Ref[T] = atomic { new Ref } + private[this] lazy val ref: AlphaRef[T] = new AlphaRef def swap(elem: T) = { ensureIsInTransaction diff --git a/akka-core/src/main/scala/stm/Vector.scala b/akka-core/src/main/scala/stm/Vector.scala index e341875990..a9667d2521 100644 --- a/akka-core/src/main/scala/stm/Vector.scala +++ b/akka-core/src/main/scala/stm/Vector.scala @@ -32,7 +32,7 @@ POSSIBILITY OF SUCH DAMAGE. **/ -package se.scalablesolutions.akka.collection +package se.scalablesolutions.akka.stm import Vector._ @@ -54,7 +54,7 @@ class Vector[+T] private (val length: Int, shift: Int, root: Array[AnyRef], tail * (somewhat dynamically-typed) implementation in place. */ - private[collection] def this() = this(0, 5, EmptyArray, EmptyArray) + private[stm] def this() = this(0, 5, EmptyArray, EmptyArray) def apply(i: Int): T = { if (i >= 0 && i < length) { @@ -317,14 +317,14 @@ class Vector[+T] private (val length: Int, shift: Int, root: Array[AnyRef], tail } object Vector { - private[collection] val EmptyArray = new Array[AnyRef](0) + private[stm] val EmptyArray = new Array[AnyRef](0) def apply[T](elems: T*) = elems.foldLeft(EmptyVector:Vector[T]) { _ + _ } def unapplySeq[T](vec: Vector[T]): Option[Seq[T]] = Some(vec) @inline - private[collection] def array(elems: AnyRef*) = { + private[stm] def array(elems: AnyRef*) = { val back = new Array[AnyRef](elems.length) Array.copy(elems, 0, back, 0, back.length) @@ -334,7 +334,7 @@ object Vector { object EmptyVector extends Vector[Nothing] -private[collection] abstract class VectorProjection[+T] extends Vector[T] { +private[stm] abstract class VectorProjection[+T] extends Vector[T] { override val length: Int override def apply(i: Int): T diff --git a/akka-patterns/src/test/scala/AgentTest.scala b/akka-core/src/test/scala/AgentTest.scala similarity index 72% rename from akka-patterns/src/test/scala/AgentTest.scala rename to akka-core/src/test/scala/AgentTest.scala index a415d9c218..013cd13ada 100644 --- a/akka-patterns/src/test/scala/AgentTest.scala +++ b/akka-core/src/test/scala/AgentTest.scala @@ -7,18 +7,23 @@ import org.scalatest.junit.JUnitRunner import org.scalatest.matchers.MustMatchers import org.junit.{Test} +/* @RunWith(classOf[JUnitRunner]) class AgentTest extends junit.framework.TestCase with Suite with MustMatchers with ActorTestUtil with Logging { - @Test def testAgent = verify(new TestActor { - def test = { - val t = Agent(5) - handle(t){ - t.update( _ + 1 ) - t.update( _ * 2 ) - val r = t() - r must be (12) - } - } + @Test def testAgent = verify(new TestActor { + def test = { + atomic { + val t = Agent(5) + handle(t) { + t.update(_ + 1) + t.update(_ * 2) + + val r = t() + r must be(12) + } + } + } }) } +*/ diff --git a/akka-core/src/test/scala/ClientInitiatedRemoteActorTest.scala b/akka-core/src/test/scala/ClientInitiatedRemoteActorTest.scala index 81fb4780da..ff2843efe8 100644 --- a/akka-core/src/test/scala/ClientInitiatedRemoteActorTest.scala +++ b/akka-core/src/test/scala/ClientInitiatedRemoteActorTest.scala @@ -49,7 +49,7 @@ class RemoteActorSpecActorAsyncSender extends Actor { class ClientInitiatedRemoteActorTest extends JUnitSuite { import Actor.Sender.Self - akka.Config.config + akka.config.Config.config val HOSTNAME = "localhost" val PORT1 = 9990 diff --git a/akka-core/src/test/scala/ExecutorBasedEventDrivenDispatcherActorsTest.scala b/akka-core/src/test/scala/ExecutorBasedEventDrivenDispatcherActorsTest.scala new file mode 100644 index 0000000000..b3e04f3244 --- /dev/null +++ b/akka-core/src/test/scala/ExecutorBasedEventDrivenDispatcherActorsTest.scala @@ -0,0 +1,85 @@ +package se.scalablesolutions.akka.actor + +import org.scalatest.junit.JUnitSuite +import org.junit.Test +import se.scalablesolutions.akka.dispatch.Dispatchers +import org.scalatest.matchers.MustMatchers +import java.util.concurrent.CountDownLatch + +/** + * Tests the behaviour of the executor based event driven dispatcher when multiple actors are being dispatched on it. + * + * @author Jan Van Besien + */ +class ExecutorBasedEventDrivenDispatcherActorsTest extends JUnitSuite with MustMatchers with ActorTestUtil { + class SlowActor(finishedCounter: CountDownLatch) extends Actor { + messageDispatcher = Dispatchers.globalExecutorBasedEventDrivenDispatcher + id = "SlowActor" + + def receive = { + case x: Int => { + Thread.sleep(50) // slow actor + finishedCounter.countDown + } + } + } + + class FastActor(finishedCounter: CountDownLatch) extends Actor { + messageDispatcher = Dispatchers.globalExecutorBasedEventDrivenDispatcher + id = "FastActor" + + def receive = { + case x: Int => { + finishedCounter.countDown + } + } + } + + @Test def slowActorShouldntBlockFastActor = verify(new TestActor { + def test = { + val sFinished = new CountDownLatch(50) + val fFinished = new CountDownLatch(10) + val s = new SlowActor(sFinished) + val f = new FastActor(fFinished) + + handle(s, f) { + // send a lot of stuff to s + for (i <- 1 to 50) { + s ! i + } + + // send some messages to f + for (i <- 1 to 10) { + f ! i + } + + // now assert that f is finished while s is still busy + fFinished.await + assert(sFinished.getCount > 0) + } + } + }) + +} + +trait ActorTestUtil { + def handle[T](actors: Actor*)(test: => T): T = { + for (a <- actors) a.start + try { + test + } + finally { + for (a <- actors) a.stop + } + } + + def verify(actor: TestActor): Unit = handle(actor) { + actor.test + } +} + +abstract class TestActor extends Actor with ActorTestUtil { + def test: Unit + + def receive = {case _ =>} +} diff --git a/akka-core/src/test/scala/InMemoryActorTest.scala b/akka-core/src/test/scala/InMemoryActorTest.scala index cd06b80d0a..5692d7b01f 100644 --- a/akka-core/src/test/scala/InMemoryActorTest.scala +++ b/akka-core/src/test/scala/InMemoryActorTest.scala @@ -3,7 +3,7 @@ package se.scalablesolutions.akka.actor import org.scalatest.junit.JUnitSuite import org.junit.Test -import se.scalablesolutions.akka.state.{TransactionalState, TransactionalMap, TransactionalRef, TransactionalVector} +import se.scalablesolutions.akka.stm.{TransactionalState, TransactionalMap, TransactionalRef, TransactionalVector} case class GetMapState(key: String) case object GetVectorState @@ -23,7 +23,7 @@ case class SuccessOneWay(key: String, value: String) case class FailureOneWay(key: String, value: String, failer: Actor) class InMemStatefulActor extends Actor { - timeout = 100000 + timeout = 5000 makeTransactionRequired private lazy val mapState = TransactionalState.newMap[String, String] @@ -86,8 +86,8 @@ class InMemFailerActor extends Actor { } class InMemoryActorTest extends JUnitSuite { + import Actor.Sender.Self - /* @Test def shouldOneWayMapShouldNotRollbackStateForStatefulServerInCaseOfSuccess = { val stateful = new InMemStatefulActor @@ -98,7 +98,7 @@ class InMemoryActorTest extends JUnitSuite { Thread.sleep(1000) assert("new state" === (stateful !! GetMapState("testShouldNotRollbackStateForStatefulServerInCaseOfSuccess")).get) } - */ + @Test def shouldMapShouldNotRollbackStateForStatefulServerInCaseOfSuccess = { val stateful = new InMemStatefulActor @@ -107,7 +107,7 @@ class InMemoryActorTest extends JUnitSuite { stateful !! Success("testShouldNotRollbackStateForStatefulServerInCaseOfSuccess", "new state") // transactionrequired assert("new state" === (stateful !! GetMapState("testShouldNotRollbackStateForStatefulServerInCaseOfSuccess")).get) } - /* + @Test def shouldOneWayMapShouldRollbackStateForStatefulServerInCaseOfFailure = { val stateful = new InMemStatefulActor @@ -120,7 +120,7 @@ class InMemoryActorTest extends JUnitSuite { Thread.sleep(1000) assert("init" === (stateful !! GetMapState("testShouldRollbackStateForStatefulServerInCaseOfFailure")).get) // check that state is == init state } - */ + @Test def shouldMapShouldRollbackStateForStatefulServerInCaseOfFailure = { val stateful = new InMemStatefulActor @@ -134,7 +134,7 @@ class InMemoryActorTest extends JUnitSuite { } catch {case e: RuntimeException => {}} assert("init" === (stateful !! GetMapState("testShouldRollbackStateForStatefulServerInCaseOfFailure")).get) // check that state is == init state } - /* + @Test def shouldOneWayVectorShouldNotRollbackStateForStatefulServerInCaseOfSuccess = { val stateful = new InMemStatefulActor @@ -145,7 +145,7 @@ class InMemoryActorTest extends JUnitSuite { Thread.sleep(1000) assert(2 === (stateful !! GetVectorSize).get) } - */ + @Test def shouldVectorShouldNotRollbackStateForStatefulServerInCaseOfSuccess = { val stateful = new InMemStatefulActor @@ -154,7 +154,7 @@ class InMemoryActorTest extends JUnitSuite { stateful !! Success("testShouldNotRollbackStateForStatefulServerInCaseOfSuccess", "new state") // transactionrequired assert(2 === (stateful !! GetVectorSize).get) } - /* + @Test def shouldOneWayVectorShouldRollbackStateForStatefulServerInCaseOfFailure = { val stateful = new InMemStatefulActor @@ -167,7 +167,7 @@ class InMemoryActorTest extends JUnitSuite { Thread.sleep(1000) assert(1 === (stateful !! GetVectorSize).get) } - */ + @Test def shouldVectorShouldRollbackStateForStatefulServerInCaseOfFailure = { val stateful = new InMemStatefulActor @@ -181,7 +181,7 @@ class InMemoryActorTest extends JUnitSuite { } catch {case e: RuntimeException => {}} assert(1 === (stateful !! GetVectorSize).get) } - /* + @Test def shouldOneWayRefShouldNotRollbackStateForStatefulServerInCaseOfSuccess = { val stateful = new InMemStatefulActor @@ -192,7 +192,7 @@ class InMemoryActorTest extends JUnitSuite { Thread.sleep(1000) assert("new state" === (stateful !! GetRefState).get) } - */ + @Test def shouldRefShouldNotRollbackStateForStatefulServerInCaseOfSuccess = { val stateful = new InMemStatefulActor @@ -201,7 +201,7 @@ class InMemoryActorTest extends JUnitSuite { stateful !! Success("testShouldNotRollbackStateForStatefulServerInCaseOfSuccess", "new state") // transactionrequired assert("new state" === (stateful !! GetRefState).get) } - /* + @Test def shouldOneWayRefShouldRollbackStateForStatefulServerInCaseOfFailure = { val stateful = new InMemStatefulActor @@ -212,9 +212,9 @@ class InMemoryActorTest extends JUnitSuite { failer.start stateful ! FailureOneWay("testShouldRollbackStateForStatefulServerInCaseOfFailure", "new state", failer) // call failing transactionrequired method Thread.sleep(1000) - assert("init" === (stateful !! GetRefState).get) // check that state is == init state + assert("init" === (stateful !! (GetRefState, 1000000)).get) // check that state is == init state } - */ + @Test def shouldRefShouldRollbackStateForStatefulServerInCaseOfFailure = { val stateful = new InMemStatefulActor diff --git a/akka-core/src/test/scala/MemoryTest.scala b/akka-core/src/test/scala/MemoryTest.scala index 2a56d61465..5684496c02 100644 --- a/akka-core/src/test/scala/MemoryTest.scala +++ b/akka-core/src/test/scala/MemoryTest.scala @@ -22,9 +22,11 @@ class MemoryFootprintTest extends JUnitSuite { // Actors are put in AspectRegistry when created so they won't be GCd here val totalMem = Runtime.getRuntime.totalMemory - Runtime.getRuntime.freeMemory + println("Memory before " + totalMem) (1 until NR_OF_ACTORS).foreach(i => new Mem) val newTotalMem = Runtime.getRuntime.totalMemory - Runtime.getRuntime.freeMemory + println("Memory aftor " + newTotalMem) val memPerActor = (newTotalMem - totalMem) / NR_OF_ACTORS println("Memory footprint per actor is : " + memPerActor) diff --git a/akka-core/src/test/scala/PerformanceTest.scala b/akka-core/src/test/scala/PerformanceTest.scala index d58d075202..43a4d46650 100644 --- a/akka-core/src/test/scala/PerformanceTest.scala +++ b/akka-core/src/test/scala/PerformanceTest.scala @@ -1,4 +1,4 @@ -package test +package se.scalablesolutions.akka import org.scalatest.junit.JUnitSuite import org.junit.Test @@ -31,12 +31,12 @@ class PerformanceTest extends JUnitSuite { case class Meet(from: Actor, colour: Colour) case class Change(colour: Colour) - case class MeetingCount(count: int) + case class MeetingCount(count: Int) case class ExitActor(actor: Actor, reason: String) var totalTime = 0L - class Mall(var nrMeets: int, numChameneos: int) extends Actor { + class Mall(var nrMeets: Int, numChameneos: Int) extends Actor { var waitingChameneo: Option[Actor] = None var sumMeetings = 0 var numFaded = 0 @@ -86,7 +86,7 @@ class PerformanceTest extends JUnitSuite { } } - case class Chameneo(var mall: Mall, var colour: Colour, cid: int) extends Actor { + case class Chameneo(var mall: Mall, var colour: Colour, cid: Int) extends Actor { var meetings = 0 override def start = { @@ -160,10 +160,10 @@ class PerformanceTest extends JUnitSuite { case class Meet(colour: Colour) case class Change(colour: Colour) - case class MeetingCount(count: int) + case class MeetingCount(count: Int) - class Mall(var n: int, numChameneos: int) extends Actor { + class Mall(var n: Int, numChameneos: Int) extends Actor { var waitingChameneo: Option[OutputChannel[Any]] = None var startTime: Long = 0L @@ -218,7 +218,7 @@ class PerformanceTest extends JUnitSuite { } } - case class Chameneo(var mall: Mall, var colour: Colour, id: int) extends Actor { + case class Chameneo(var mall: Mall, var colour: Colour, id: Int) extends Actor { var meetings = 0 def act() { @@ -279,7 +279,7 @@ class PerformanceTest extends JUnitSuite { var nrOfMessages = 2000000 var nrOfActors = 4 - var akkaTime = stressTestAkkaActors(nrOfMessages, nrOfActors, 1000 * 20) + var akkaTime = stressTestAkkaActors(nrOfMessages, nrOfActors, 1000 * 30) var scalaTime = stressTestScalaActors(nrOfMessages, nrOfActors, 1000 * 40) var ratio: Double = scalaTime.toDouble / akkaTime.toDouble diff --git a/akka-core/src/test/scala/RemoteClientShutdownTest.scala b/akka-core/src/test/scala/RemoteClientShutdownTest.scala index f6fbea1bb9..d330dce5ce 100644 --- a/akka-core/src/test/scala/RemoteClientShutdownTest.scala +++ b/akka-core/src/test/scala/RemoteClientShutdownTest.scala @@ -6,7 +6,7 @@ import Actor.Sender.Self import org.scalatest.junit.JUnitSuite import org.junit.Test - +/* class RemoteClientShutdownTest extends JUnitSuite { @Test def shouldShutdownRemoteClient = { RemoteNode.start("localhost", 9999) @@ -28,3 +28,4 @@ class TravelingActor extends RemoteActor("localhost", 9999) { case _ => log.info("message received") } } +*/ \ No newline at end of file diff --git a/akka-core/src/test/scala/RemoteSupervisorTest.scala b/akka-core/src/test/scala/RemoteSupervisorTest.scala index 57f01a6dda..7222e68b87 100644 --- a/akka-core/src/test/scala/RemoteSupervisorTest.scala +++ b/akka-core/src/test/scala/RemoteSupervisorTest.scala @@ -74,7 +74,7 @@ object Log { class RemoteSupervisorTest extends JUnitSuite { import Actor.Sender.Self - akka.Config.config + akka.config.Config.config new Thread(new Runnable() { def run = { RemoteNode.start diff --git a/akka-core/src/test/scala/ServerInitiatedRemoteActorTest.scala b/akka-core/src/test/scala/ServerInitiatedRemoteActorTest.scala index 2f1ef161c8..22ea078b1e 100644 --- a/akka-core/src/test/scala/ServerInitiatedRemoteActorTest.scala +++ b/akka-core/src/test/scala/ServerInitiatedRemoteActorTest.scala @@ -60,7 +60,7 @@ class ServerInitiatedRemoteActorTest extends JUnitSuite { import ServerInitiatedRemoteActorTest._ import Actor.Sender.Self - akka.Config.config + akka.config.Config.config private val unit = TimeUnit.MILLISECONDS diff --git a/akka-core/src/test/scala/ShutdownSpec.scala b/akka-core/src/test/scala/ShutdownSpec.scala index ba03fbe902..20927bbfb1 100644 --- a/akka-core/src/test/scala/ShutdownSpec.scala +++ b/akka-core/src/test/scala/ShutdownSpec.scala @@ -2,9 +2,8 @@ package se.scalablesolutions.akka.remote import se.scalablesolutions.akka.actor.Actor -object ActorShutdownSpec { +object ActorShutdownRunner { def main(args: Array[String]) { - class MyActor extends Actor { def receive = { case "test" => println("received test") @@ -22,7 +21,7 @@ object ActorShutdownSpec { // case 2 -object RemoteServerAndClusterShutdownSpec { +object RemoteServerAndClusterShutdownRunner { def main(args: Array[String]) { val s1 = new RemoteServer val s2 = new RemoteServer diff --git a/akka-core/src/test/scala/ThreadBasedDispatcherTest.scala b/akka-core/src/test/scala/ThreadBasedDispatcherTest.scala index b9663352c7..c848c56991 100644 --- a/akka-core/src/test/scala/ThreadBasedDispatcherTest.scala +++ b/akka-core/src/test/scala/ThreadBasedDispatcherTest.scala @@ -78,7 +78,7 @@ class ThreadBasedDispatcherTest extends JUnitSuite { }) dispatcher.start for (i <- 0 until 100) { - dispatcher.dispatch(new MessageInvocation(key1, new Integer(i), None, None, None)) + dispatcher.dispatch(new MessageInvocation(key1, i, None, None, None)) } assert(handleLatch.await(5, TimeUnit.SECONDS)) assert(!threadingIssueDetected.get) diff --git a/akka-fun-test-java/pom.xml b/akka-fun-test-java/pom.xml old mode 100644 new mode 100755 index beb19f25c5..5c55cf0be5 --- a/akka-fun-test-java/pom.xml +++ b/akka-fun-test-java/pom.xml @@ -5,31 +5,43 @@ Akka Functional Tests in Java akka-fun-test-java - + se.scalablesolutions.akka + 0.7-SNAPSHOT jar - - akka - se.scalablesolutions.akka - 0.7-SNAPSHOT - + + 2.7.7 + 0.5.2 + 1.1.5 + 1.9.18-i + akka-kernel - ${project.groupId} - ${project.version} + se.scalablesolutions.akka + 0.7-SNAPSHOT - ${project.groupId} + se.scalablesolutions.akka akka-persistence-cassandra - ${project.version} + 0.7-SNAPSHOT com.google.protobuf protobuf-java 2.2.0 + + org.codehaus.jackson + jackson-core-asl + 1.2.1 + + + org.codehaus.jackson + jackson-mapper-asl + 1.2.1 + com.sun.grizzly grizzly-servlet-webserver @@ -94,7 +106,6 @@ maven-surefire-plugin - **/InMemNestedStateTest* **/*Persistent* diff --git a/akka-fun-test-java/src/test/java/se/scalablesolutions/akka/api/ActiveObjectGuiceConfiguratorTest.java b/akka-fun-test-java/src/test/java/se/scalablesolutions/akka/api/ActiveObjectGuiceConfiguratorTest.java index d328f2452d..69f74ec537 100644 --- a/akka-fun-test-java/src/test/java/se/scalablesolutions/akka/api/ActiveObjectGuiceConfiguratorTest.java +++ b/akka-fun-test-java/src/test/java/se/scalablesolutions/akka/api/ActiveObjectGuiceConfiguratorTest.java @@ -9,7 +9,7 @@ import com.google.inject.Scopes; import junit.framework.TestCase; -import se.scalablesolutions.akka.Config; +import se.scalablesolutions.akka.config.Config; import se.scalablesolutions.akka.config.ActiveObjectConfigurator; import static se.scalablesolutions.akka.config.JavaConfig.*; import se.scalablesolutions.akka.dispatch.*; diff --git a/akka-fun-test-java/src/test/java/se/scalablesolutions/akka/api/Bar.java b/akka-fun-test-java/src/test/java/se/scalablesolutions/akka/api/Bar.java index 9a3ff80aca..3d85d89a17 100644 --- a/akka-fun-test-java/src/test/java/se/scalablesolutions/akka/api/Bar.java +++ b/akka-fun-test-java/src/test/java/se/scalablesolutions/akka/api/Bar.java @@ -1,6 +1,6 @@ package se.scalablesolutions.akka.api; -import se.scalablesolutions.akka.annotation.oneway; +import se.scalablesolutions.akka.actor.annotation.oneway; public interface Bar { @oneway diff --git a/akka-fun-test-java/src/test/java/se/scalablesolutions/akka/api/Foo.java b/akka-fun-test-java/src/test/java/se/scalablesolutions/akka/api/Foo.java index bb9cfd83d4..962f0b9424 100644 --- a/akka-fun-test-java/src/test/java/se/scalablesolutions/akka/api/Foo.java +++ b/akka-fun-test-java/src/test/java/se/scalablesolutions/akka/api/Foo.java @@ -1,7 +1,7 @@ package se.scalablesolutions.akka.api; import com.google.inject.Inject; -import se.scalablesolutions.akka.annotation.oneway; +import se.scalablesolutions.akka.actor.annotation.oneway; public class Foo extends se.scalablesolutions.akka.serialization.Serializable.JavaJSON { @Inject diff --git a/akka-fun-test-java/src/test/java/se/scalablesolutions/akka/api/InMemNestedStateTest.java b/akka-fun-test-java/src/test/java/se/scalablesolutions/akka/api/InMemNestedStateTest.java index 8a51feed6b..992c188fa1 100644 --- a/akka-fun-test-java/src/test/java/se/scalablesolutions/akka/api/InMemNestedStateTest.java +++ b/akka-fun-test-java/src/test/java/se/scalablesolutions/akka/api/InMemNestedStateTest.java @@ -4,14 +4,14 @@ package se.scalablesolutions.akka.api; -import se.scalablesolutions.akka.Config; import se.scalablesolutions.akka.config.*; +import se.scalablesolutions.akka.config.Config; import se.scalablesolutions.akka.config.ActiveObjectConfigurator; import static se.scalablesolutions.akka.config.JavaConfig.*; import se.scalablesolutions.akka.actor.*; -import se.scalablesolutions.akka.Kernel; +import se.scalablesolutions.akka.kernel.Kernel; import junit.framework.TestCase; -/* + public class InMemNestedStateTest extends TestCase { static String messageLog = ""; @@ -133,4 +133,3 @@ public class InMemNestedStateTest extends TestCase { assertEquals("init", nested.getRefState()); // check that state is == init state } } -*/ \ No newline at end of file diff --git a/akka-fun-test-java/src/test/java/se/scalablesolutions/akka/api/InMemStateful.java b/akka-fun-test-java/src/test/java/se/scalablesolutions/akka/api/InMemStateful.java index 60b2008716..afe2f2e232 100644 --- a/akka-fun-test-java/src/test/java/se/scalablesolutions/akka/api/InMemStateful.java +++ b/akka-fun-test-java/src/test/java/se/scalablesolutions/akka/api/InMemStateful.java @@ -1,10 +1,10 @@ package se.scalablesolutions.akka.api; -import se.scalablesolutions.akka.annotation.transactionrequired; -import se.scalablesolutions.akka.annotation.prerestart; -import se.scalablesolutions.akka.annotation.postrestart; -import se.scalablesolutions.akka.annotation.inittransactionalstate; -import se.scalablesolutions.akka.state.*; +import se.scalablesolutions.akka.actor.annotation.transactionrequired; +import se.scalablesolutions.akka.actor.annotation.prerestart; +import se.scalablesolutions.akka.actor.annotation.postrestart; +import se.scalablesolutions.akka.actor.annotation.inittransactionalstate; +import se.scalablesolutions.akka.stm.*; @transactionrequired public class InMemStateful { diff --git a/akka-fun-test-java/src/test/java/se/scalablesolutions/akka/api/InMemStatefulNested.java b/akka-fun-test-java/src/test/java/se/scalablesolutions/akka/api/InMemStatefulNested.java index abaedf8ae9..932dc2c162 100644 --- a/akka-fun-test-java/src/test/java/se/scalablesolutions/akka/api/InMemStatefulNested.java +++ b/akka-fun-test-java/src/test/java/se/scalablesolutions/akka/api/InMemStatefulNested.java @@ -1,8 +1,8 @@ package se.scalablesolutions.akka.api; -import se.scalablesolutions.akka.annotation.transactionrequired; -import se.scalablesolutions.akka.annotation.inittransactionalstate; -import se.scalablesolutions.akka.state.*; +import se.scalablesolutions.akka.actor.annotation.transactionrequired; +import se.scalablesolutions.akka.actor.annotation.inittransactionalstate; +import se.scalablesolutions.akka.stm.*; @transactionrequired public class InMemStatefulNested { diff --git a/akka-fun-test-java/src/test/java/se/scalablesolutions/akka/api/InMemoryStateTest.java b/akka-fun-test-java/src/test/java/se/scalablesolutions/akka/api/InMemoryStateTest.java index aa2704685f..740bfd892c 100644 --- a/akka-fun-test-java/src/test/java/se/scalablesolutions/akka/api/InMemoryStateTest.java +++ b/akka-fun-test-java/src/test/java/se/scalablesolutions/akka/api/InMemoryStateTest.java @@ -6,14 +6,14 @@ package se.scalablesolutions.akka.api; import junit.framework.TestCase; -import se.scalablesolutions.akka.Config; +import se.scalablesolutions.akka.config.Config; import se.scalablesolutions.akka.config.*; import se.scalablesolutions.akka.config.ActiveObjectConfigurator; import static se.scalablesolutions.akka.config.JavaConfig.*; import se.scalablesolutions.akka.actor.*; -import se.scalablesolutions.akka.Kernel; +import se.scalablesolutions.akka.kernel.Kernel; public class InMemoryStateTest extends TestCase { static String messageLog = ""; diff --git a/akka-fun-test-java/src/test/java/se/scalablesolutions/akka/api/PersistenceManager.java b/akka-fun-test-java/src/test/java/se/scalablesolutions/akka/api/PersistenceManager.java index cd856b64df..080c1cbd0b 100644 --- a/akka-fun-test-java/src/test/java/se/scalablesolutions/akka/api/PersistenceManager.java +++ b/akka-fun-test-java/src/test/java/se/scalablesolutions/akka/api/PersistenceManager.java @@ -4,7 +4,7 @@ public class PersistenceManager { private static volatile boolean isRunning = false; public static void init() { if (!isRunning) { - se.scalablesolutions.akka.Kernel$.MODULE$.startRemoteService(); + se.scalablesolutions.akka.kernel.Kernel$.MODULE$.startRemoteService(); isRunning = true; } } diff --git a/akka-fun-test-java/src/test/java/se/scalablesolutions/akka/api/PersistentClasher.java b/akka-fun-test-java/src/test/java/se/scalablesolutions/akka/api/PersistentClasher.java index d5360da3bc..d5c1bdf00c 100644 --- a/akka-fun-test-java/src/test/java/se/scalablesolutions/akka/api/PersistentClasher.java +++ b/akka-fun-test-java/src/test/java/se/scalablesolutions/akka/api/PersistentClasher.java @@ -1,7 +1,8 @@ package se.scalablesolutions.akka.api; -import se.scalablesolutions.akka.state.*; -import se.scalablesolutions.akka.annotation.inittransactionalstate; +import se.scalablesolutions.akka.persistence.common.*; +import se.scalablesolutions.akka.persistence.cassandra.*; +import se.scalablesolutions.akka.actor.annotation.inittransactionalstate; public class PersistentClasher { private PersistentMap state; diff --git a/akka-fun-test-java/src/test/java/se/scalablesolutions/akka/api/PersistentNestedStateTest.java b/akka-fun-test-java/src/test/java/se/scalablesolutions/akka/api/PersistentNestedStateTest.java index 7fd3a65dfb..796d3d913a 100644 --- a/akka-fun-test-java/src/test/java/se/scalablesolutions/akka/api/PersistentNestedStateTest.java +++ b/akka-fun-test-java/src/test/java/se/scalablesolutions/akka/api/PersistentNestedStateTest.java @@ -8,7 +8,7 @@ import se.scalablesolutions.akka.config.*; import se.scalablesolutions.akka.config.ActiveObjectConfigurator; import static se.scalablesolutions.akka.config.JavaConfig.*; import se.scalablesolutions.akka.actor.*; - import se.scalablesolutions.akka.Kernel; +import se.scalablesolutions.akka.kernel.Kernel; import junit.framework.TestCase; diff --git a/akka-fun-test-java/src/test/java/se/scalablesolutions/akka/api/PersistentStateful.java b/akka-fun-test-java/src/test/java/se/scalablesolutions/akka/api/PersistentStateful.java index 3cac0ae062..6a8d3353b7 100644 --- a/akka-fun-test-java/src/test/java/se/scalablesolutions/akka/api/PersistentStateful.java +++ b/akka-fun-test-java/src/test/java/se/scalablesolutions/akka/api/PersistentStateful.java @@ -1,8 +1,9 @@ package se.scalablesolutions.akka.api; -import se.scalablesolutions.akka.annotation.inittransactionalstate; -import se.scalablesolutions.akka.annotation.transactionrequired; -import se.scalablesolutions.akka.state.*; +import se.scalablesolutions.akka.actor.annotation.inittransactionalstate; +import se.scalablesolutions.akka.actor.annotation.transactionrequired; +import se.scalablesolutions.akka.persistence.common.*; +import se.scalablesolutions.akka.persistence.cassandra.*; @transactionrequired public class PersistentStateful { diff --git a/akka-fun-test-java/src/test/java/se/scalablesolutions/akka/api/PersistentStatefulNested.java b/akka-fun-test-java/src/test/java/se/scalablesolutions/akka/api/PersistentStatefulNested.java index 50e9b7ae1d..bd931ef108 100644 --- a/akka-fun-test-java/src/test/java/se/scalablesolutions/akka/api/PersistentStatefulNested.java +++ b/akka-fun-test-java/src/test/java/se/scalablesolutions/akka/api/PersistentStatefulNested.java @@ -1,8 +1,9 @@ package se.scalablesolutions.akka.api; -import se.scalablesolutions.akka.annotation.inittransactionalstate; -import se.scalablesolutions.akka.annotation.transactionrequired; -import se.scalablesolutions.akka.state.*; +import se.scalablesolutions.akka.actor.annotation.inittransactionalstate; +import se.scalablesolutions.akka.actor.annotation.transactionrequired; +import se.scalablesolutions.akka.persistence.common.*; +import se.scalablesolutions.akka.persistence.cassandra.*; @transactionrequired public class PersistentStatefulNested { diff --git a/akka-fun-test-java/src/test/java/se/scalablesolutions/akka/api/RemoteInMemoryStateTest.java b/akka-fun-test-java/src/test/java/se/scalablesolutions/akka/api/RemoteInMemoryStateTest.java index d2f67e4bc7..d0c22470e2 100644 --- a/akka-fun-test-java/src/test/java/se/scalablesolutions/akka/api/RemoteInMemoryStateTest.java +++ b/akka-fun-test-java/src/test/java/se/scalablesolutions/akka/api/RemoteInMemoryStateTest.java @@ -4,7 +4,7 @@ package se.scalablesolutions.akka.api; -import se.scalablesolutions.akka.Config; +import se.scalablesolutions.akka.config.Config; import se.scalablesolutions.akka.actor.ActiveObject; import se.scalablesolutions.akka.config.ActiveObjectConfigurator; import se.scalablesolutions.akka.remote.RemoteNode; diff --git a/akka-kernel/pom.xml b/akka-kernel/pom.xml deleted file mode 100644 index 4b1d114d45..0000000000 --- a/akka-kernel/pom.xml +++ /dev/null @@ -1,136 +0,0 @@ - - 4.0.0 - - akka-kernel - Akka Kernel Module - - jar - - - akka - se.scalablesolutions.akka - 0.7-SNAPSHOT - - - - - - akka-rest - ${project.groupId} - ${project.version} - - - akka-amqp - ${project.groupId} - ${project.version} - - - akka-security - ${project.groupId} - ${project.version} - - - akka-persistence-cassandra - ${project.groupId} - ${project.version} - - - akka-persistence-mongo - ${project.groupId} - ${project.version} - - - akka-persistence-redis - ${project.groupId} - ${project.version} - - - akka-comet - ${project.groupId} - ${project.version} - - - akka-cluster-jgroups - ${project.groupId} - ${project.version} - - - - - - com.sun.jersey - jersey-server - ${jersey.version} - - - org.atmosphere - atmosphere-annotations - ${atmosphere.version} - - - org.atmosphere - atmosphere-jersey - ${atmosphere.version} - - - org.atmosphere - atmosphere-runtime - ${atmosphere.version} - - - - - - - org.apache.maven.plugins - maven-shade-plugin - 1.2.1 - - - install - - shade - - - - - junit:junit - - - - - - - se.scalablesolutions.akka.Main - - - - - - - - maven-antrun-plugin - - - install - - - - - - - run - - - - - - - diff --git a/akka-kernel/src/main/scala/Kernel.scala b/akka-kernel/src/main/scala/Kernel.scala index f63a50a0a7..6c0cd87058 100644 --- a/akka-kernel/src/main/scala/Kernel.scala +++ b/akka-kernel/src/main/scala/Kernel.scala @@ -2,11 +2,14 @@ * Copyright (C) 2009-2010 Scalable Solutions AB */ -package se.scalablesolutions.akka +package se.scalablesolutions.akka.kernel import se.scalablesolutions.akka.remote.BootableRemoteActorService +import se.scalablesolutions.akka.comet.BootableCometActorService import se.scalablesolutions.akka.actor.BootableActorLoaderService -import se.scalablesolutions.akka.util.{Logging,Bootable} +import se.scalablesolutions.akka.camel.service.CamelService +import se.scalablesolutions.akka.config.Config +import se.scalablesolutions.akka.util.{Logging, Bootable} import javax.servlet.{ServletContextListener, ServletContextEvent} @@ -27,12 +30,16 @@ object Kernel extends Logging { /** * Holds a reference to the services that has been booted */ - @volatile private var bundles : Option[Bootable] = None + @volatile private var bundles: Option[Bootable] = None /** - * Boots up the Kernel with default bootables + * Boots up the Kernel with default bootables */ - def boot : Unit = boot(true, new BootableActorLoaderService with BootableRemoteActorService with BootableCometActorService) + def boot: Unit = boot(true, + new BootableActorLoaderService + with BootableRemoteActorService + with BootableCometActorService + with CamelService) /** * Boots up the Kernel. @@ -63,8 +70,8 @@ object Kernel extends Logging { } //For testing purposes only - def startRemoteService : Unit = bundles.foreach( _ match { - case x : BootableRemoteActorService => x.startRemoteService + def startRemoteService: Unit = bundles.foreach( _ match { + case x: BootableRemoteActorService => x.startRemoteService case _ => }) @@ -79,16 +86,18 @@ object Kernel extends Logging { (____ /__|_ \__|_ \(____ / \/ \/ \/ \/ """) - log.info(" Running version %s", Config.VERSION) + log.info(" Running version %s", Config.VERSION) log.info("==============================") } } - /* - And this one can be added to web.xml mappings as a listener to boot and shutdown Akka - */ - + /** + * This class can be added to web.xml mappings as a listener to boot and shutdown Akka. + */ class Kernel extends ServletContextListener { - def contextDestroyed(e : ServletContextEvent) : Unit = Kernel.shutdown - def contextInitialized(e : ServletContextEvent) : Unit = Kernel.boot(true,new BootableActorLoaderService with BootableRemoteActorService) + def contextDestroyed(e: ServletContextEvent): Unit = + Kernel.shutdown + + def contextInitialized(e: ServletContextEvent): Unit = + Kernel.boot(true, new BootableActorLoaderService with BootableRemoteActorService) } \ No newline at end of file diff --git a/akka-patterns/pom.xml b/akka-patterns/pom.xml deleted file mode 100644 index 6d3f8e0b41..0000000000 --- a/akka-patterns/pom.xml +++ /dev/null @@ -1,39 +0,0 @@ - - 4.0.0 - - akka-patterns - Akka Patterns Module - - jar - - - akka - se.scalablesolutions.akka - 0.7-SNAPSHOT - - - - - - akka-core - ${project.groupId} - ${project.version} - - - - - org.scalatest - scalatest - 1.0 - test - - - junit - junit - 4.5 - test - - - diff --git a/akka-patterns/src/main/scala/Agent.scala b/akka-patterns/src/main/scala/Agent.scala deleted file mode 100644 index 4dd8640c32..0000000000 --- a/akka-patterns/src/main/scala/Agent.scala +++ /dev/null @@ -1,146 +0,0 @@ -// ScalaAgent -// -// Copyright © 2008-9 The original author or authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package se.scalablesolutions.akka.actor - -import se.scalablesolutions.akka.state.TransactionalState -import se.scalablesolutions.akka.stm.Transaction.atomic - -import java.util.concurrent.atomic.AtomicReference -import java.util.concurrent.{CountDownLatch} - -/** -* The Agent class was strongly inspired by the agent principle in Clojure. Essentially, an agent wraps a shared mutable state -* and hides it behind a message-passing interface. Agents accept messages and process them on behalf of the wrapped state. -* Typically agents accept functions / commands as messages and ensure the submitted commands are executed against the internal -* agent's state in a thread-safe manner (sequentially). -* The submitted functions / commands take the internal state as a parameter and their output becomes the new internal state value. -* The code that is submitted to an agent doesn't need to pay attention to threading or synchronization, the agent will -* provide such guarantees by itself. -* See the examples of use for more details. -* -* @author Vaclav Pech -* Date: Oct 18, 2009 -* -* AKKA retrofit by -* @author Viktor Klang -* Date: Jan 24 2010 -*/ -sealed class Agent[T] private (initialValue: T) extends Actor { - import Agent._ - - private val value = TransactionalState.newRef[T] - - updateData(initialValue) - - /** - * Periodically handles incoming messages - */ - def receive = { - case FunctionHolder(fun: (T => T)) => atomic { updateData(fun(value.getOrWait)) } - - case ValueHolder(x: T) => updateData(x) - - case ProcedureHolder(fun: (T => Unit)) => atomic { fun(copyStrategy(value.getOrWait)) } - } - - /** - * Specifies how a copy of the value is made, defaults to using identity - */ - protected def copyStrategy(t : T) : T = t - - - /** - * Updates the internal state with the value provided as a by-name parameter - */ - private final def updateData(newData: => T) : Unit = atomic { value.swap(newData) } - - /** - * Submits a request to read the internal state. - * A copy of the internal state will be returned, depending on the underlying effective copyStrategy. - * Internally leverages the asynchronous getValue() method and then waits for its result on a CountDownLatch. - */ - final def get : T = { - val ref = new AtomicReference[T] - val latch = new CountDownLatch(1) - get((x: T) => {ref.set(x); latch.countDown}) - latch.await - ref.get - } - - /** - * Asynchronously submits a request to read the internal state. The supplied function will be executed on the returned internal state value. - * A copy of the internal state will be used, depending on the underlying effective copyStrategy. - */ - final def get(message: (T => Unit)) : Unit = this ! ProcedureHolder(message) - - /** - * Submits a request to read the internal state. - * A copy of the internal state will be returned, depending on the underlying effective copyStrategy. - * Internally leverages the asynchronous getValue() method and then waits for its result on a CountDownLatch. - */ - final def apply() : T = get - - /** - * Asynchronously submits a request to read the internal state. The supplied function will be executed on the returned internal state value. - * A copy of the internal state will be used, depending on the underlying effective copyStrategy. - */ -// final def apply(message: (T => Unit)) : Unit = get(message) - - /** - * Submits the provided function for execution against the internal agent's state - */ - final def apply(message: (T => T)) : Unit = this ! FunctionHolder(message) - - /** - * Submits a new value to be set as the new agent's internal state - */ - final def apply(message: T) : Unit = this ! ValueHolder(message) - - /** - * Submits the provided function for execution against the internal agent's state - */ - final def update(message: (T => T)) : Unit = this ! FunctionHolder(message) - - /** - * Submits a new value to be set as the new agent's internal state - */ - final def update(message: T) : Unit = this ! ValueHolder(message) -} - -/** -* Provides factory methods to create Agents. -*/ -object Agent { - /** - * The internal messages for passing around requests - */ - private case class ProcedureHolder[T](val fun: ((T) => Unit)) - private case class FunctionHolder[T](val fun: ((T) => T)) - private case class ValueHolder[T](val value: T) - - /** - * Creates a new Agent of type T with the initial value of value - */ - def apply[T](value:T) : Agent[T] = new Agent(value) - - /** - * Creates a new Agent of type T with the initial value of value and with the specified copy function - */ - def apply[T](value:T, newCopyStrategy: (T) => T) = new Agent(value) { - override def copyStrategy(t : T) = newCopyStrategy(t) - } -} diff --git a/akka-patterns/src/main/scala/Patterns.scala b/akka-patterns/src/main/scala/Patterns.scala index b967c07df7..3b7982148e 100644 --- a/akka-patterns/src/main/scala/Patterns.scala +++ b/akka-patterns/src/main/scala/Patterns.scala @@ -1,16 +1,16 @@ -package se.scalablesolutions.akka.actor.patterns +package se.scalablesolutions.akka.patterns import se.scalablesolutions.akka.actor.Actor object Patterns { - type PF[A,B] = PartialFunction[A,B] + type PF[A, B] = PartialFunction[A, B] /** * Creates a new PartialFunction whose isDefinedAt is a combination * of the two parameters, and whose apply is first to call filter.apply and then filtered.apply */ - def filter[A,B](filter : PF[A,Unit],filtered : PF[A,B]) : PF[A,B] = { - case a : A if filtered.isDefinedAt(a) && filter.isDefinedAt(a) => + def filter[A, B](filter: PF[A, Unit], filtered: PF[A, B]): PF[A, B] = { + case a: A if filtered.isDefinedAt(a) && filter.isDefinedAt(a) => filter(a) filtered(a) } @@ -18,61 +18,58 @@ object Patterns { /** * Interceptor is a filter(x,y) where x.isDefinedAt is considered to be always true */ - def intercept[A,B](interceptor : (A) => Unit, interceptee : PF[A,B]) : PF[A,B] = filter( - { case a if a.isInstanceOf[A] => interceptor(a) }, - interceptee - ) - + def intercept[A, B](interceptor: (A) => Unit, interceptee: PF[A, B]): PF[A, B] = + filter({case a if a.isInstanceOf[A] => interceptor(a)}, interceptee) + //FIXME 2.8, use default params with CyclicIterator - def loadBalancerActor(actors : => InfiniteIterator[Actor]) : Actor = new Actor with LoadBalancer { + def loadBalancerActor(actors: => InfiniteIterator[Actor]): Actor = new Actor with LoadBalancer { val seq = actors } - def dispatcherActor(routing : PF[Any,Actor], msgTransformer : (Any) => Any) : Actor = new Actor with Dispatcher { - override def transform(msg : Any) = msgTransformer(msg) + def dispatcherActor(routing: PF[Any, Actor], msgTransformer: (Any) => Any): Actor = + new Actor with Dispatcher { + override def transform(msg: Any) = msgTransformer(msg) def routes = routing } - - def dispatcherActor(routing : PF[Any,Actor]) : Actor = new Actor with Dispatcher { - def routes = routing + + def dispatcherActor(routing: PF[Any, Actor]): Actor = new Actor with Dispatcher { + def routes = routing } - def loggerActor(actorToLog : Actor, logger : (Any) => Unit) : Actor = dispatcherActor ( - { case _ => actorToLog }, - logger - ) + def loggerActor(actorToLog: Actor, logger: (Any) => Unit): Actor = + dispatcherActor({case _ => actorToLog}, logger) } -trait Dispatcher { self : Actor => +trait Dispatcher { self: Actor => - protected def transform(msg : Any) : Any = msg - protected def routes : PartialFunction[Any,Actor] - - protected def dispatch : PartialFunction[Any,Unit] = { - case a if routes.isDefinedAt(a) => { - if(self.sender.isDefined) - routes(a) forward transform(a) - else - routes(a) send transform(a) - } + protected def transform(msg: Any): Any = msg + + protected def routes: PartialFunction[Any, Actor] + + protected def dispatch: PartialFunction[Any, Unit] = { + case a if routes.isDefinedAt(a) => + if (self.sender.isDefined) routes(a) forward transform(a) + else routes(a) send transform(a) } def receive = dispatch } -trait LoadBalancer extends Dispatcher { self : Actor => - protected def seq : InfiniteIterator[Actor] +trait LoadBalancer extends Dispatcher { self: Actor => + protected def seq: InfiniteIterator[Actor] protected def routes = { case x if seq.hasNext => seq.next } } trait InfiniteIterator[T] extends Iterator[T] -class CyclicIterator[T](items : List[T]) extends InfiniteIterator[T] { - @volatile private[this] var current : List[T] = items +class CyclicIterator[T](items: List[T]) extends InfiniteIterator[T] { + @volatile private[this] var current: List[T] = items + def hasNext = items != Nil + def next = { - val nc = if(current == Nil) items else current + val nc = if (current == Nil) items else current current = nc.tail nc.head } diff --git a/akka-patterns/src/test/scala/ActorPatternsTest.scala b/akka-patterns/src/test/scala/ActorPatternsTest.scala index 11f2664640..3019af0436 100644 --- a/akka-patterns/src/test/scala/ActorPatternsTest.scala +++ b/akka-patterns/src/test/scala/ActorPatternsTest.scala @@ -1,11 +1,11 @@ -package se.scalablesolutions.akka.actor +package se.scalablesolutions.akka.patterns - -import config.ScalaConfig._ +import se.scalablesolutions.akka.config.ScalaConfig._ +import se.scalablesolutions.akka.actor.Actor +import se.scalablesolutions.akka.actor.Actor._ +import se.scalablesolutions.akka.util.Logging import org.scalatest.Suite -import patterns.Patterns -import se.scalablesolutions.akka.util.Logging import org.junit.runner.RunWith import org.scalatest.junit.JUnitRunner import org.scalatest.matchers.MustMatchers @@ -14,19 +14,18 @@ import scala.collection.mutable.HashSet @RunWith(classOf[JUnitRunner]) class ActorPatternsTest extends junit.framework.TestCase with Suite with MustMatchers with ActorTestUtil with Logging { - import Actor._ import Patterns._ @Test def testDispatcher = verify(new TestActor { def test = { val (testMsg1,testMsg2,testMsg3,testMsg4) = ("test1","test2","test3","test4") var targetOk = 0 - val t1 = actor() receive { + val t1: Actor = actor { case `testMsg1` => targetOk += 2 case `testMsg2` => targetOk += 4 } - val t2 = actor() receive { + val t2: Actor = actor { case `testMsg3` => targetOk += 8 } @@ -48,7 +47,7 @@ class ActorPatternsTest extends junit.framework.TestCase with Suite with MustMat @Test def testLogger = verify(new TestActor { def test = { val msgs = new HashSet[Any] - val t1 = actor() receive { + val t1: Actor = actor { case _ => } val l = loggerActor(t1,(x) => msgs += x) diff --git a/akka-persistence/akka-persistence-cassandra/pom.xml b/akka-persistence/akka-persistence-cassandra/pom.xml deleted file mode 100644 index d8490382d5..0000000000 --- a/akka-persistence/akka-persistence-cassandra/pom.xml +++ /dev/null @@ -1,86 +0,0 @@ - - 4.0.0 - - akka-persistence-cassandra - Akka Persistence Cassandra Module - - jar - - - akka-persistence-parent - se.scalablesolutions.akka - 0.7-SNAPSHOT - - - - - akka-persistence-common - ${project.groupId} - ${project.version} - - - com.google.code.google-collections - google-collect - - - - - - - org.apache.cassandra - cassandra - 0.5.0 - - - org.apache.cassandra - high-scale-lib - 0.5.0 - test - - - org.apache.cassandra - clhm-production - 0.5.0 - test - - - com.google.collections - google-collections - 1.0-rc1 - test - - - commons-collections - commons-collections - 3.2.1 - test - - - commons-lang - commons-lang - 2.4 - test - - - org.slf4j - slf4j-api - 1.5.8 - test - - - org.slf4j - slf4j-log4j12 - 1.5.8 - test - - - - - log4j - log4j - 1.2.13 - - - - diff --git a/akka-persistence/akka-persistence-cassandra/src/main/scala/CassandraSession.scala b/akka-persistence/akka-persistence-cassandra/src/main/scala/CassandraSession.scala index 0b0c5ca43a..5141dc7cb2 100644 --- a/akka-persistence/akka-persistence-cassandra/src/main/scala/CassandraSession.scala +++ b/akka-persistence/akka-persistence-cassandra/src/main/scala/CassandraSession.scala @@ -2,14 +2,15 @@ * Copyright (C) 2009-2010 Scalable Solutions AB */ -package se.scalablesolutions.akka.state +package se.scalablesolutions.akka.persistence.cassandra import java.io.{Flushable, Closeable} +import se.scalablesolutions.akka.persistence.common._ import se.scalablesolutions.akka.util.Logging import se.scalablesolutions.akka.util.Helpers._ import se.scalablesolutions.akka.serialization.Serializer -import se.scalablesolutions.akka.Config.config +import se.scalablesolutions.akka.config.Config.config import scala.collection.mutable.Map diff --git a/akka-persistence/akka-persistence-cassandra/src/main/scala/CassandraStorage.scala b/akka-persistence/akka-persistence-cassandra/src/main/scala/CassandraStorage.scala index 59a27963f4..be5fc4f4c7 100644 --- a/akka-persistence/akka-persistence-cassandra/src/main/scala/CassandraStorage.scala +++ b/akka-persistence/akka-persistence-cassandra/src/main/scala/CassandraStorage.scala @@ -2,16 +2,18 @@ * Copyright (C) 2009-2010 Scalable Solutions AB */ -package se.scalablesolutions.akka.state +package se.scalablesolutions.akka.persistence.cassandra -import org.codehaus.aspectwerkz.proxy.Uuid +import se.scalablesolutions.akka.util.UUID +import se.scalablesolutions.akka.stm._ +import se.scalablesolutions.akka.persistence.common._ object CassandraStorage extends Storage { type ElementType = Array[Byte] - def newMap: PersistentMap[ElementType, ElementType] = newMap(Uuid.newUuid.toString) - def newVector: PersistentVector[ElementType] = newVector(Uuid.newUuid.toString) - def newRef: PersistentRef[ElementType] = newRef(Uuid.newUuid.toString) + def newMap: PersistentMap[ElementType, ElementType] = newMap(UUID.newUuid.toString) + def newVector: PersistentVector[ElementType] = newVector(UUID.newUuid.toString) + def newRef: PersistentRef[ElementType] = newRef(UUID.newUuid.toString) def getMap(id: String): PersistentMap[ElementType, ElementType] = newMap(id) def getVector(id: String): PersistentVector[ElementType] = newVector(id) diff --git a/akka-persistence/akka-persistence-cassandra/src/main/scala/CassandraStorageBackend.scala b/akka-persistence/akka-persistence-cassandra/src/main/scala/CassandraStorageBackend.scala index d3c011ef79..8e91753211 100644 --- a/akka-persistence/akka-persistence-cassandra/src/main/scala/CassandraStorageBackend.scala +++ b/akka-persistence/akka-persistence-cassandra/src/main/scala/CassandraStorageBackend.scala @@ -2,11 +2,13 @@ * Copyright (C) 2009-2010 Scalable Solutions AB */ -package se.scalablesolutions.akka.state +package se.scalablesolutions.akka.persistence.cassandra +import se.scalablesolutions.akka.stm._ +import se.scalablesolutions.akka.persistence.common._ import se.scalablesolutions.akka.util.Logging import se.scalablesolutions.akka.util.Helpers._ -import se.scalablesolutions.akka.Config.config +import se.scalablesolutions.akka.config.Config.config import org.apache.cassandra.service._ diff --git a/akka-persistence/akka-persistence-cassandra/src/test/scala/CassandraPersistentActorSpec.scala b/akka-persistence/akka-persistence-cassandra/src/test/scala/CassandraPersistentActorSpec.scala index 0e232f5ce9..46d1b48a2d 100644 --- a/akka-persistence/akka-persistence-cassandra/src/test/scala/CassandraPersistentActorSpec.scala +++ b/akka-persistence/akka-persistence-cassandra/src/test/scala/CassandraPersistentActorSpec.scala @@ -1,13 +1,9 @@ -package se.scalablesolutions.akka.state +package se.scalablesolutions.akka.persistence.cassandra -import se.scalablesolutions.akka.actor.Actor - -import junit.framework.TestCase +import se.scalablesolutions.akka.actor.{Actor, Transactor} import org.junit.Test import org.junit.Assert._ -import org.apache.cassandra.service.CassandraDaemon -import org.junit.BeforeClass import org.junit.Before import org.scalatest.junit.JUnitSuite @@ -28,9 +24,8 @@ case class SetRefStateOneWay(key: String) case class SuccessOneWay(key: String, value: String) case class FailureOneWay(key: String, value: String, failer: Actor) -class CassandraPersistentActor extends Actor { +class CassandraPersistentActor extends Transactor { timeout = 100000 - makeTransactionRequired private lazy val mapState = CassandraStorage.newMap private lazy val vectorState = CassandraStorage.newVector @@ -66,8 +61,7 @@ class CassandraPersistentActor extends Actor { } } -@serializable class PersistentFailerActor extends Actor { - makeTransactionRequired +@serializable class PersistentFailerActor extends Transactor { def receive = { case "Failure" => throw new RuntimeException("expected") @@ -76,8 +70,8 @@ class CassandraPersistentActor extends Actor { class CassandraPersistentActorSpec extends JUnitSuite { - @Before - def startCassandra = EmbeddedCassandraService.start + //@Before + //def startCassandra = EmbeddedCassandraService.start @Test def testMapShouldNotRollbackStateForStatefulServerInCaseOfSuccess = { diff --git a/akka-persistence/akka-persistence-common/pom.xml b/akka-persistence/akka-persistence-common/pom.xml deleted file mode 100644 index 623fbea571..0000000000 --- a/akka-persistence/akka-persistence-common/pom.xml +++ /dev/null @@ -1,29 +0,0 @@ - - 4.0.0 - - akka-persistence-common - Akka Persistence Common Module - - jar - - - akka-persistence-parent - se.scalablesolutions.akka - 0.7-SNAPSHOT - - - - - com.facebook - thrift - 1.0 - - - commons-pool - commons-pool - 1.5.1 - - - - diff --git a/akka-persistence/akka-persistence-common/src/main/scala/Pool.scala b/akka-persistence/akka-persistence-common/src/main/scala/Pool.scala index d290455cad..73b64f3dd5 100644 --- a/akka-persistence/akka-persistence-common/src/main/scala/Pool.scala +++ b/akka-persistence/akka-persistence-common/src/main/scala/Pool.scala @@ -2,7 +2,7 @@ * Copyright (C) 2009-2010 Scalable Solutions AB */ -package se.scalablesolutions.akka.state +package se.scalablesolutions.akka.persistence.common import org.apache.commons.pool._ import org.apache.commons.pool.impl._ diff --git a/akka-persistence/akka-persistence-common/src/main/scala/Storage.scala b/akka-persistence/akka-persistence-common/src/main/scala/Storage.scala index dc55e0eca1..0f0eeac912 100644 --- a/akka-persistence/akka-persistence-common/src/main/scala/Storage.scala +++ b/akka-persistence/akka-persistence-common/src/main/scala/Storage.scala @@ -2,16 +2,17 @@ * Copyright (C) 2009-2010 Scalable Solutions AB */ -package se.scalablesolutions.akka.state +package se.scalablesolutions.akka.persistence.common -import se.scalablesolutions.akka.stm.TransactionManagement.currentTransaction -import se.scalablesolutions.akka.collection._ +import se.scalablesolutions.akka.stm._ +import se.scalablesolutions.akka.stm.TransactionManagement.transaction import se.scalablesolutions.akka.util.Logging -import org.codehaus.aspectwerkz.proxy.Uuid - +// FIXME move to 'stm' package + add message with more info class NoTransactionInScopeException extends RuntimeException +class StorageException(message: String) extends RuntimeException(message) + /** * Example Scala usage. *

@@ -50,24 +51,26 @@ trait Storage { def newRef: PersistentRef[ElementType] def newQueue: PersistentQueue[ElementType] = // only implemented for redis throw new UnsupportedOperationException + def newSortedSet: PersistentSortedSet[ElementType] = // only implemented for redis + throw new UnsupportedOperationException def getMap(id: String): PersistentMap[ElementType, ElementType] def getVector(id: String): PersistentVector[ElementType] def getRef(id: String): PersistentRef[ElementType] def getQueue(id: String): PersistentQueue[ElementType] = // only implemented for redis throw new UnsupportedOperationException + def getSortedSet(id: String): PersistentSortedSet[ElementType] = // only implemented for redis + throw new UnsupportedOperationException def newMap(id: String): PersistentMap[ElementType, ElementType] def newVector(id: String): PersistentVector[ElementType] def newRef(id: String): PersistentRef[ElementType] def newQueue(id: String): PersistentQueue[ElementType] = // only implemented for redis throw new UnsupportedOperationException + def newSortedSet(id: String): PersistentSortedSet[ElementType] = // only implemented for redis + throw new UnsupportedOperationException } - - - - /** * Implementation of PersistentMap for every concrete * storage will have the same workflow. This abstracts the workflow. @@ -162,8 +165,8 @@ trait PersistentMap[K, V] extends scala.collection.mutable.Map[K, V] } private def register = { - if (currentTransaction.get.isEmpty) throw new NoTransactionInScopeException - currentTransaction.get.get.register(uuid, this) + if (transaction.get.isEmpty) throw new NoTransactionInScopeException + transaction.get.get.register(uuid, this) } } @@ -236,8 +239,8 @@ trait PersistentVector[T] extends RandomAccessSeq[T] with Transactional with Com def length: Int = storage.getVectorStorageSizeFor(uuid) + newElems.length private def register = { - if (currentTransaction.get.isEmpty) throw new NoTransactionInScopeException - currentTransaction.get.get.register(uuid, this) + if (transaction.get.isEmpty) throw new NoTransactionInScopeException + transaction.get.get.register(uuid, this) } } @@ -272,8 +275,8 @@ trait PersistentRef[T] extends Transactional with Committable { } private def register = { - if (currentTransaction.get.isEmpty) throw new NoTransactionInScopeException - currentTransaction.get.get.register(uuid, this) + if (transaction.get.isEmpty) throw new NoTransactionInScopeException + transaction.get.get.register(uuid, this) } } @@ -305,7 +308,7 @@ trait PersistentRef[T] extends Transactional with Committable { trait PersistentQueue[A] extends scala.collection.mutable.Queue[A] with Transactional with Committable with Logging { - abstract case class QueueOp + sealed trait QueueOp case object ENQ extends QueueOp case object DEQ extends QueueOp @@ -397,7 +400,123 @@ trait PersistentQueue[A] extends scala.collection.mutable.Queue[A] throw new UnsupportedOperationException("dequeueAll not supported") private def register = { - if (currentTransaction.get.isEmpty) throw new NoTransactionInScopeException - currentTransaction.get.get.register(uuid, this) + if (transaction.get.isEmpty) throw new NoTransactionInScopeException + transaction.get.get.register(uuid, this) + } +} + +/** + * Implements a template for a concrete persistent transactional sorted set based storage. + *

+ * Sorting is done based on a zscore. But the computation of zscore has been kept + * outside the abstraction. + *

+ * zscore can be implemented in a variety of ways by the calling class: + *

+ * trait ZScorable {
+ *   def toZScore: Float
+ * }
+ *
+ * class Foo extends ZScorable {
+ *   //.. implemnetation
+ * }
+ * 
+ * Or we can also use views: + *
+ * class Foo {
+ *   //..
+ * }
+ * 
+ * implicit def Foo2Scorable(foo: Foo): ZScorable = new ZScorable {
+ *   def toZScore = {
+ *     //..
+ *   }
+ * }
+ * 
+ * + * and use foo.toZScore to compute the zscore and pass to the APIs. + * + * @author + */ +trait PersistentSortedSet[A] + extends Transactional + with Committable { + + protected val newElems = TransactionalState.newMap[A, Float] + protected val removedElems = TransactionalState.newVector[A] + + val storage: SortedSetStorageBackend[A] + + def commit = { + for ((element, score) <- newElems) storage.zadd(uuid, String.valueOf(score), element) + for (element <- removedElems) storage.zrem(uuid, element) + newElems.clear + removedElems.clear + } + + def +(elem: A, score: Float) = add(elem, score) + + def add(elem: A, score: Float) = { + register + newElems.put(elem, score) + } + + def -(elem: A) = remove(elem) + + def remove(elem: A) = { + register + removedElems.add(elem) + } + + private def inStorage(elem: A): Option[Float] = storage.zscore(uuid, elem) match { + case Some(s) => Some(s.toFloat) + case None => None + } + + def contains(elem: A): Boolean = { + if (newElems contains elem) true + else { + inStorage(elem) match { + case Some(f) => true + case None => false + } + } + } + + def size: Int = newElems.size + storage.zcard(uuid) - removedElems.size + + def zscore(elem: A): Float = { + if (newElems contains elem) newElems.get(elem).get + inStorage(elem) match { + case Some(f) => f + case None => + throw new Predef.NoSuchElementException(elem + " not present") + } + } + + implicit def order(x: (A, Float)) = new Ordered[(A, Float)] { + def compare(that: (A, Float)) = x._2 compare that._2 + } + + def zrange(start: Int, end: Int): List[(A, Float)] = { + // need to operate on the whole range + // get all from the underlying storage + val fromStore = storage.zrangeWithScore(uuid, 0, -1) + val ts = scala.collection.immutable.TreeSet(fromStore: _*) ++ newElems.toList + val l = ts.size + + // -1 means the last element, -2 means the second last + val s = if (start < 0) start + l else start + val e = + if (end < 0) end + l + else if (end >= l) (l - 1) + else end + // slice is open at the end, we need a closed end range + ts.elements.slice(s, e + 1).toList + } + + private def register = { + if (transaction.get.isEmpty) throw new NoTransactionInScopeException + transaction.get.get.register(uuid, this) } } diff --git a/akka-persistence/akka-persistence-common/src/main/scala/StorageBackend.scala b/akka-persistence/akka-persistence-common/src/main/scala/StorageBackend.scala index 94233acd0a..ab0cfaf4d3 100644 --- a/akka-persistence/akka-persistence-common/src/main/scala/StorageBackend.scala +++ b/akka-persistence/akka-persistence-common/src/main/scala/StorageBackend.scala @@ -2,7 +2,7 @@ * Copyright (C) 2009-2010 Scalable Solutions AB */ -package se.scalablesolutions.akka.state +package se.scalablesolutions.akka.persistence.common // abstracts persistence storage trait StorageBackend @@ -33,6 +33,14 @@ trait VectorStorageBackend[T] extends StorageBackend { trait RefStorageBackend[T] extends StorageBackend { def insertRefStorageFor(name: String, element: T) def getRefStorageFor(name: String): Option[T] + def incrementAtomically(name: String): Option[Int] = + throw new UnsupportedOperationException // only for redis + def incrementByAtomically(name: String, by: Int): Option[Int] = + throw new UnsupportedOperationException // only for redis + def decrementAtomically(name: String): Option[Int] = + throw new UnsupportedOperationException // only for redis + def decrementByAtomically(name: String, by: Int): Option[Int] = + throw new UnsupportedOperationException // only for redis } // for Queue @@ -61,11 +69,15 @@ trait SortedSetStorageBackend[T] extends StorageBackend { // remove item from sorted set identified by name def zrem(name: String, item: T): Boolean - // cardinality of the set idnetified by name + // cardinality of the set identified by name def zcard(name: String): Int - def zscore(name: String, item: T): String + // zscore of the item from sorted set identified by name + def zscore(name: String, item: T): Option[Float] + // zrange from the sorted set identified by name def zrange(name: String, start: Int, end: Int): List[T] -} + // zrange with score from the sorted set identified by name + def zrangeWithScore(name: String, start: Int, end: Int): List[(T, Float)] +} diff --git a/akka-persistence/akka-persistence-mongo/pom.xml b/akka-persistence/akka-persistence-mongo/pom.xml deleted file mode 100644 index 616deb7492..0000000000 --- a/akka-persistence/akka-persistence-mongo/pom.xml +++ /dev/null @@ -1,31 +0,0 @@ - - 4.0.0 - - akka-persistence-mongo - Akka Persistence Mongo Module - - jar - - - akka-persistence-parent - se.scalablesolutions.akka - 0.7-SNAPSHOT - - - - - akka-persistence-common - ${project.groupId} - ${project.version} - - - - - org.mongodb - mongo-java-driver - 1.1 - - - - diff --git a/akka-persistence/akka-persistence-mongo/src/main/scala/MongoStorage.scala b/akka-persistence/akka-persistence-mongo/src/main/scala/MongoStorage.scala index 9aaf7a601d..70c7937eae 100644 --- a/akka-persistence/akka-persistence-mongo/src/main/scala/MongoStorage.scala +++ b/akka-persistence/akka-persistence-mongo/src/main/scala/MongoStorage.scala @@ -2,16 +2,18 @@ * Copyright (C) 2009-2010 Scalable Solutions AB */ -package se.scalablesolutions.akka.state +package se.scalablesolutions.akka.persistence.mongo -import org.codehaus.aspectwerkz.proxy.Uuid +import se.scalablesolutions.akka.stm._ +import se.scalablesolutions.akka.persistence.common._ +import se.scalablesolutions.akka.util.UUID object MongoStorage extends Storage { type ElementType = AnyRef - def newMap: PersistentMap[ElementType, ElementType] = newMap(Uuid.newUuid.toString) - def newVector: PersistentVector[ElementType] = newVector(Uuid.newUuid.toString) - def newRef: PersistentRef[ElementType] = newRef(Uuid.newUuid.toString) + def newMap: PersistentMap[ElementType, ElementType] = newMap(UUID.newUuid.toString) + def newVector: PersistentVector[ElementType] = newVector(UUID.newUuid.toString) + def newRef: PersistentRef[ElementType] = newRef(UUID.newUuid.toString) def getMap(id: String): PersistentMap[ElementType, ElementType] = newMap(id) def getVector(id: String): PersistentVector[ElementType] = newVector(id) diff --git a/akka-persistence/akka-persistence-mongo/src/main/scala/MongoStorageBackend.scala b/akka-persistence/akka-persistence-mongo/src/main/scala/MongoStorageBackend.scala index 0641b676e5..7cf3de21a4 100644 --- a/akka-persistence/akka-persistence-mongo/src/main/scala/MongoStorageBackend.scala +++ b/akka-persistence/akka-persistence-mongo/src/main/scala/MongoStorageBackend.scala @@ -2,10 +2,12 @@ * Copyright (C) 2009-2010 Scalable Solutions AB */ -package se.scalablesolutions.akka.state +package se.scalablesolutions.akka.persistence.mongo +import se.scalablesolutions.akka.stm._ +import se.scalablesolutions.akka.persistence.common._ import se.scalablesolutions.akka.util.Logging -import se.scalablesolutions.akka.Config.config +import se.scalablesolutions.akka.config.Config.config import sjson.json.Serializer._ diff --git a/akka-persistence/akka-persistence-mongo/src/test/scala/MongoPersistentActorSpec.scala b/akka-persistence/akka-persistence-mongo/src/test/scala/MongoPersistentActorSpec.scala index 8681ebadb9..93aa1862d1 100644 --- a/akka-persistence/akka-persistence-mongo/src/test/scala/MongoPersistentActorSpec.scala +++ b/akka-persistence/akka-persistence-mongo/src/test/scala/MongoPersistentActorSpec.scala @@ -1,4 +1,4 @@ -package se.scalablesolutions.akka.state +package se.scalablesolutions.akka.persistence.mongo import junit.framework.TestCase @@ -8,7 +8,7 @@ import org.junit.Assert._ import _root_.dispatch.json.{JsNumber, JsValue} import _root_.dispatch.json.Js._ -import se.scalablesolutions.akka.actor.Actor +import se.scalablesolutions.akka.actor.{Transactor, Actor} /** * A persistent actor based on MongoDB storage. @@ -29,10 +29,10 @@ case class MultiDebit(accountNo: String, amounts: List[BigInt], failer: Actor) case class Credit(accountNo: String, amount: BigInt) case object LogSize -class BankAccountActor extends Actor { - makeTransactionRequired - private val accountState = MongoStorage.newMap - private val txnLog = MongoStorage.newVector +class BankAccountActor extends Transactor { + + private lazy val accountState = MongoStorage.newMap + private lazy val txnLog = MongoStorage.newVector def receive: PartialFunction[Any, Unit] = { // check balance @@ -91,8 +91,7 @@ class BankAccountActor extends Actor { } } -@serializable class PersistentFailerActor extends Actor { - makeTransactionRequired +@serializable class PersistentFailerActor extends Transactor { def receive = { case "Failure" => throw new RuntimeException("expected") diff --git a/akka-persistence/akka-persistence-mongo/src/test/scala/MongoStorageSpec.scala b/akka-persistence/akka-persistence-mongo/src/test/scala/MongoStorageSpec.scala index fae6d7f00d..bf13c62390 100644 --- a/akka-persistence/akka-persistence-mongo/src/test/scala/MongoStorageSpec.scala +++ b/akka-persistence/akka-persistence-mongo/src/test/scala/MongoStorageSpec.scala @@ -1,4 +1,4 @@ -package se.scalablesolutions.akka.state +package se.scalablesolutions.akka.persistence.mongo import junit.framework.TestCase diff --git a/akka-persistence/akka-persistence-redis/pom.xml b/akka-persistence/akka-persistence-redis/pom.xml deleted file mode 100644 index 112d4764cb..0000000000 --- a/akka-persistence/akka-persistence-redis/pom.xml +++ /dev/null @@ -1,31 +0,0 @@ - - 4.0.0 - - akka-persistence-redis - Akka Persistence Redis Module - - jar - - - akka-persistence-parent - se.scalablesolutions.akka - 0.7-SNAPSHOT - - - - - akka-persistence-common - ${project.groupId} - ${project.version} - - - - - com.redis - redisclient - 1.1 - - - - diff --git a/akka-persistence/akka-persistence-redis/src/main/scala/RedisStorage.scala b/akka-persistence/akka-persistence-redis/src/main/scala/RedisStorage.scala index fffa0011e5..b8aada0572 100644 --- a/akka-persistence/akka-persistence-redis/src/main/scala/RedisStorage.scala +++ b/akka-persistence/akka-persistence-redis/src/main/scala/RedisStorage.scala @@ -2,27 +2,33 @@ * Copyright (C) 2009-2010 Scalable Solutions AB */ -package se.scalablesolutions.akka.state +package se.scalablesolutions.akka.persistence.redis -import org.codehaus.aspectwerkz.proxy.Uuid +import se.scalablesolutions.akka.util.UUID +import se.scalablesolutions.akka.stm._ +import se.scalablesolutions.akka.persistence.common._ object RedisStorage extends Storage { type ElementType = Array[Byte] - def newMap: PersistentMap[ElementType, ElementType] = newMap(Uuid.newUuid.toString) - def newVector: PersistentVector[ElementType] = newVector(Uuid.newUuid.toString) - def newRef: PersistentRef[ElementType] = newRef(Uuid.newUuid.toString) - override def newQueue: PersistentQueue[ElementType] = newQueue(Uuid.newUuid.toString) + def newMap: PersistentMap[ElementType, ElementType] = newMap(UUID.newUuid.toString) + def newVector: PersistentVector[ElementType] = newVector(UUID.newUuid.toString) + def newRef: PersistentRef[ElementType] = newRef(UUID.newUuid.toString) + override def newQueue: PersistentQueue[ElementType] = newQueue(UUID.newUuid.toString) + override def newSortedSet: PersistentSortedSet[ElementType] = newSortedSet(UUID.newUuid.toString) def getMap(id: String): PersistentMap[ElementType, ElementType] = newMap(id) def getVector(id: String): PersistentVector[ElementType] = newVector(id) def getRef(id: String): PersistentRef[ElementType] = newRef(id) override def getQueue(id: String): PersistentQueue[ElementType] = newQueue(id) + override def getSortedSet(id: String): PersistentSortedSet[ElementType] = newSortedSet(id) def newMap(id: String): PersistentMap[ElementType, ElementType] = new RedisPersistentMap(id) def newVector(id: String): PersistentVector[ElementType] = new RedisPersistentVector(id) def newRef(id: String): PersistentRef[ElementType] = new RedisPersistentRef(id) override def newQueue(id: String): PersistentQueue[ElementType] = new RedisPersistentQueue(id) + override def newSortedSet(id: String): PersistentSortedSet[ElementType] = + new RedisPersistentSortedSet(id) } /** @@ -61,3 +67,14 @@ class RedisPersistentQueue(id: String) extends PersistentQueue[Array[Byte]] { val uuid = id val storage = RedisStorageBackend } + +/** + * Implements a persistent transactional sorted set based on the Redis + * storage. + * + * @author Debasish Ghosh + */ +class RedisPersistentSortedSet(id: String) extends PersistentSortedSet[Array[Byte]] { + val uuid = id + val storage = RedisStorageBackend +} diff --git a/akka-persistence/akka-persistence-redis/src/main/scala/RedisStorageBackend.scala b/akka-persistence/akka-persistence-redis/src/main/scala/RedisStorageBackend.scala index 48945d6b8c..c48c84fa39 100644 --- a/akka-persistence/akka-persistence-redis/src/main/scala/RedisStorageBackend.scala +++ b/akka-persistence/akka-persistence-redis/src/main/scala/RedisStorageBackend.scala @@ -2,10 +2,12 @@ * Copyright (C) 2009-2010 Scalable Solutions AB */ -package se.scalablesolutions.akka.state +package se.scalablesolutions.akka.persistence.redis +import se.scalablesolutions.akka.stm._ +import se.scalablesolutions.akka.persistence.common._ import se.scalablesolutions.akka.util.Logging -import se.scalablesolutions.akka.Config.config +import se.scalablesolutions.akka.config.Config.config import com.redis._ @@ -72,11 +74,11 @@ private [akka] object RedisStorageBackend extends * base64(T1):base64("debasish.programming_language") -> "scala" * */ - def insertMapStorageEntryFor(name: String, key: Array[Byte], value: Array[Byte]) { + def insertMapStorageEntryFor(name: String, key: Array[Byte], value: Array[Byte]): Unit = withErrorHandling { insertMapStorageEntriesFor(name, List((key, value))) } - def insertMapStorageEntriesFor(name: String, entries: List[Tuple2[Array[Byte], Array[Byte]]]) { + def insertMapStorageEntriesFor(name: String, entries: List[Tuple2[Array[Byte], Array[Byte]]]): Unit = withErrorHandling { mset(entries.map(e => (makeRedisKey(name, e._1), new String(e._2)))) } @@ -89,22 +91,22 @@ private [akka] object RedisStorageBackend extends *
  • : is chosen since it cannot appear in base64 encoding charset
  • *
  • both parts of the key need to be based64 encoded since there can be spaces within each of them
  • */ - private [this] def makeRedisKey(name: String, key: Array[Byte]): String = { + private [this] def makeRedisKey(name: String, key: Array[Byte]): String = withErrorHandling { "%s:%s".format(new String(encode(name.getBytes)), new String(encode(key))) } - private [this] def makeKeyFromRedisKey(redisKey: String) = { + private [this] def makeKeyFromRedisKey(redisKey: String) = withErrorHandling { val nk = redisKey.split(':').map{e: String => decode(e.getBytes)} (nk(0), nk(1)) } - private [this] def mset(entries: List[(String, String)]) { + private [this] def mset(entries: List[(String, String)]): Unit = withErrorHandling { entries.foreach {e: (String, String) => db.set(e._1, e._2) } } - def removeMapStorageFor(name: String): Unit = { + def removeMapStorageFor(name: String): Unit = withErrorHandling { db.keys("%s:*".format(encode(name.getBytes))) match { case None => throw new Predef.NoSuchElementException(name + " not present") @@ -113,18 +115,19 @@ private [akka] object RedisStorageBackend extends } } - def removeMapStorageFor(name: String, key: Array[Byte]): Unit = { + def removeMapStorageFor(name: String, key: Array[Byte]): Unit = withErrorHandling { db.delete(makeRedisKey(name, key)) } - def getMapStorageEntryFor(name: String, key: Array[Byte]): Option[Array[Byte]] = + def getMapStorageEntryFor(name: String, key: Array[Byte]): Option[Array[Byte]] = withErrorHandling { db.get(makeRedisKey(name, key)) match { case None => throw new Predef.NoSuchElementException(new String(key) + " not present") case Some(s) => Some(s.getBytes) } + } - def getMapStorageSizeFor(name: String): Int = { + def getMapStorageSizeFor(name: String): Int = withErrorHandling { db.keys("%s:*".format(new String(encode(name.getBytes)))) match { case None => 0 case Some(keys) => @@ -132,7 +135,7 @@ private [akka] object RedisStorageBackend extends } } - def getMapStorageFor(name: String): List[(Array[Byte], Array[Byte])] = { + def getMapStorageFor(name: String): List[(Array[Byte], Array[Byte])] = withErrorHandling { db.keys("%s:*".format(new String(encode(name.getBytes)))) match { case None => throw new Predef.NoSuchElementException(name + " not present") @@ -143,7 +146,7 @@ private [akka] object RedisStorageBackend extends def getMapStorageRangeFor(name: String, start: Option[Array[Byte]], finish: Option[Array[Byte]], - count: Int): List[(Array[Byte], Array[Byte])] = { + count: Int): List[(Array[Byte], Array[Byte])] = withErrorHandling { import scala.collection.immutable.TreeMap val wholeSorted = @@ -188,19 +191,19 @@ private [akka] object RedisStorageBackend extends } } - def insertVectorStorageEntryFor(name: String, element: Array[Byte]) { + def insertVectorStorageEntryFor(name: String, element: Array[Byte]): Unit = withErrorHandling { db.lpush(new String(encode(name.getBytes)), new String(element)) } - def insertVectorStorageEntriesFor(name: String, elements: List[Array[Byte]]) { + def insertVectorStorageEntriesFor(name: String, elements: List[Array[Byte]]): Unit = withErrorHandling { elements.foreach(insertVectorStorageEntryFor(name, _)) } - def updateVectorStorageEntryFor(name: String, index: Int, elem: Array[Byte]) { + def updateVectorStorageEntryFor(name: String, index: Int, elem: Array[Byte]): Unit = withErrorHandling { db.lset(new String(encode(name.getBytes)), index, new String(elem)) } - def getVectorStorageEntryFor(name: String, index: Int): Array[Byte] = { + def getVectorStorageEntryFor(name: String, index: Int): Array[Byte] = withErrorHandling { db.lindex(new String(encode(name.getBytes)), index) match { case None => throw new Predef.NoSuchElementException(name + " does not have element at " + index) @@ -208,7 +211,7 @@ private [akka] object RedisStorageBackend extends } } - def getVectorStorageRangeFor(name: String, start: Option[Int], finish: Option[Int], count: Int): List[Array[Byte]] = { + def getVectorStorageRangeFor(name: String, start: Option[Int], finish: Option[Int], count: Int): List[Array[Byte]] = withErrorHandling { /** * count is the max number of results to return. Start with * start or 0 (if start is not defined) and go until @@ -237,11 +240,11 @@ private [akka] object RedisStorageBackend extends } } - def insertRefStorageFor(name: String, element: Array[Byte]) { + def insertRefStorageFor(name: String, element: Array[Byte]): Unit = withErrorHandling { db.set(new String(encode(name.getBytes)), new String(element)) } - def getRefStorageFor(name: String): Option[Array[Byte]] = { + def getRefStorageFor(name: String): Option[Array[Byte]] = withErrorHandling { db.get(new String(encode(name.getBytes))) match { case None => throw new Predef.NoSuchElementException(name + " not present") @@ -249,13 +252,46 @@ private [akka] object RedisStorageBackend extends } } + override def incrementAtomically(name: String): Option[Int] = withErrorHandling { + db.incr(new String(encode(name.getBytes))) match { + case Some(i) => Some(i) + case None => + throw new Predef.IllegalArgumentException(name + " exception in incr") + } + } + + override def incrementByAtomically(name: String, by: Int): Option[Int] = withErrorHandling { + db.incrBy(new String(encode(name.getBytes)), by) match { + case Some(i) => Some(i) + case None => + throw new Predef.IllegalArgumentException(name + " exception in incrby") + } + } + + override def decrementAtomically(name: String): Option[Int] = withErrorHandling { + db.decr(new String(encode(name.getBytes))) match { + case Some(i) => Some(i) + case None => + throw new Predef.IllegalArgumentException(name + " exception in decr") + } + } + + override def decrementByAtomically(name: String, by: Int): Option[Int] = withErrorHandling { + db.decrBy(new String(encode(name.getBytes)), by) match { + case Some(i) => Some(i) + case None => + throw new Predef.IllegalArgumentException(name + " exception in decrby") + } + } + // add to the end of the queue - def enqueue(name: String, item: Array[Byte]): Boolean = { + def enqueue(name: String, item: Array[Byte]): Boolean = withErrorHandling { db.rpush(new String(encode(name.getBytes)), new String(item)) } + // pop from the front of the queue - def dequeue(name: String): Option[Array[Byte]] = { + def dequeue(name: String): Option[Array[Byte]] = withErrorHandling { db.lpop(new String(encode(name.getBytes))) match { case None => throw new Predef.NoSuchElementException(name + " not present") @@ -265,7 +301,7 @@ private [akka] object RedisStorageBackend extends } // get the size of the queue - def size(name: String): Int = { + def size(name: String): Int = withErrorHandling { db.llen(new String(encode(name.getBytes))) match { case None => throw new Predef.NoSuchElementException(name + " not present") @@ -275,26 +311,28 @@ private [akka] object RedisStorageBackend extends // return an array of items currently stored in the queue // start is the item to begin, count is how many items to return - def peek(name: String, start: Int, count: Int): List[Array[Byte]] = count match { - case 1 => - db.lindex(new String(encode(name.getBytes)), start) match { - case None => - throw new Predef.NoSuchElementException("No element at " + start) - case Some(s) => - List(s.getBytes) - } - case n => - db.lrange(new String(encode(name.getBytes)), start, start + count - 1) match { - case None => - throw new Predef.NoSuchElementException( - "No element found between " + start + " and " + (start + count - 1)) - case Some(es) => - es.map(_.get.getBytes) - } + def peek(name: String, start: Int, count: Int): List[Array[Byte]] = withErrorHandling { + count match { + case 1 => + db.lindex(new String(encode(name.getBytes)), start) match { + case None => + throw new Predef.NoSuchElementException("No element at " + start) + case Some(s) => + List(s.getBytes) + } + case n => + db.lrange(new String(encode(name.getBytes)), start, start + count - 1) match { + case None => + throw new Predef.NoSuchElementException( + "No element found between " + start + " and " + (start + count - 1)) + case Some(es) => + es.map(_.get.getBytes) + } + } } // completely delete the queue - def remove(name: String): Boolean = { + def remove(name: String): Boolean = withErrorHandling { db.delete(new String(encode(name.getBytes))) match { case Some(1) => true case _ => false @@ -302,7 +340,7 @@ private [akka] object RedisStorageBackend extends } // add item to sorted set identified by name - def zadd(name: String, zscore: String, item: Array[Byte]): Boolean = { + def zadd(name: String, zscore: String, item: Array[Byte]): Boolean = withErrorHandling { db.zadd(new String(encode(name.getBytes)), zscore, new String(item)) match { case Some(1) => true case _ => false @@ -310,7 +348,7 @@ private [akka] object RedisStorageBackend extends } // remove item from sorted set identified by name - def zrem(name: String, item: Array[Byte]): Boolean = { + def zrem(name: String, item: Array[Byte]): Boolean = withErrorHandling { db.zrem(new String(encode(name.getBytes)), new String(item)) match { case Some(1) => true case _ => false @@ -318,7 +356,7 @@ private [akka] object RedisStorageBackend extends } // cardinality of the set identified by name - def zcard(name: String): Int = { + def zcard(name: String): Int = withErrorHandling { db.zcard(new String(encode(name.getBytes))) match { case None => throw new Predef.NoSuchElementException(name + " not present") @@ -326,15 +364,14 @@ private [akka] object RedisStorageBackend extends } } - def zscore(name: String, item: Array[Byte]): String = { + def zscore(name: String, item: Array[Byte]): Option[Float] = withErrorHandling { db.zscore(new String(encode(name.getBytes)), new String(item)) match { - case None => - throw new Predef.NoSuchElementException(new String(item) + " not present") - case Some(s) => s + case Some(s) => Some(s.toFloat) + case None => None } } - def zrange(name: String, start: Int, end: Int): List[Array[Byte]] = { + def zrange(name: String, start: Int, end: Int): List[Array[Byte]] = withErrorHandling { db.zrange(new String(encode(name.getBytes)), start.toString, end.toString, RedisClient.ASC, false) match { case None => throw new Predef.NoSuchElementException(name + " not present") @@ -342,6 +379,27 @@ private [akka] object RedisStorageBackend extends s.map(_.get.getBytes) } } + + def zrangeWithScore(name: String, start: Int, end: Int): List[(Array[Byte], Float)] = withErrorHandling { + db.zrangeWithScore( + new String(encode(name.getBytes)), start.toString, end.toString, RedisClient.ASC) match { + case None => + throw new Predef.NoSuchElementException(name + " not present") + case Some(l) => + l.map{ case (elem, score) => (elem.get.getBytes, score.get.toFloat) } + } + } - def flushDB = db.flushDb + def flushDB = withErrorHandling(db.flushDb) + + private def withErrorHandling[T](body: => T): T = { + try { + body + } catch { + case e: java.lang.NullPointerException => + throw new StorageException("Could not connect to Redis server") + case e => + throw new StorageException("Error in Redis: " + e.getMessage) + } + } } diff --git a/akka-persistence/akka-persistence-redis/src/test/scala/RedisPersistentActorSpec.scala b/akka-persistence/akka-persistence-redis/src/test/scala/RedisPersistentActorSpec.scala index 86d4384b70..41ee6fb909 100644 --- a/akka-persistence/akka-persistence-redis/src/test/scala/RedisPersistentActorSpec.scala +++ b/akka-persistence/akka-persistence-redis/src/test/scala/RedisPersistentActorSpec.scala @@ -1,4 +1,4 @@ -package se.scalablesolutions.akka.state +package se.scalablesolutions.akka.persistence.redis import junit.framework.TestCase @@ -29,6 +29,7 @@ case object LogSize class AccountActor extends Transactor { private lazy val accountState = RedisStorage.newMap private lazy val txnLog = RedisStorage.newVector + //timeout = 5000 def receive = { // check balance @@ -86,6 +87,7 @@ class AccountActor extends Transactor { } @serializable class PersistentFailerActor extends Transactor { + // timeout = 5000 def receive = { case "Failure" => throw new RuntimeException("expected") @@ -138,7 +140,7 @@ class RedisPersistentActorSpec extends TestCase { bactor.start bactor !! Credit("a-123", 5000) - assertEquals(BigInt(5000), (bactor !! Balance("a-123")).get) + assertEquals(BigInt(5000), (bactor !! (Balance("a-123"), 5000)).get) val failer = new PersistentFailerActor failer.start @@ -147,7 +149,7 @@ class RedisPersistentActorSpec extends TestCase { fail("should throw exception") } catch { case e: RuntimeException => {}} - assertEquals(BigInt(5000), (bactor !! Balance("a-123")).get) + assertEquals(BigInt(5000), (bactor !! (Balance("a-123"), 5000)).get) // should not count the failed one assertEquals(3, (bactor !! LogSize).get) diff --git a/akka-persistence/akka-persistence-redis/src/test/scala/RedisPersistentQSpec.scala b/akka-persistence/akka-persistence-redis/src/test/scala/RedisPersistentQSpec.scala index ad67dbfdbe..9741a9acfd 100644 --- a/akka-persistence/akka-persistence-redis/src/test/scala/RedisPersistentQSpec.scala +++ b/akka-persistence/akka-persistence-redis/src/test/scala/RedisPersistentQSpec.scala @@ -1,4 +1,4 @@ -package se.scalablesolutions.akka.state +package se.scalablesolutions.akka.persistence.redis import junit.framework.TestCase @@ -15,9 +15,9 @@ import se.scalablesolutions.akka.actor.{Actor, Transactor} */ case class NQ(accountNo: String) -case class DQ +case object DQ case class MNDQ(accountNos: List[String], noOfDQs: Int, failer: Actor) -case class SZ +case object SZ class QueueActor extends Transactor { private lazy val accounts = RedisStorage.newQueue diff --git a/akka-persistence/akka-persistence-redis/src/test/scala/RedisPersistentSortedSetSpec.scala b/akka-persistence/akka-persistence-redis/src/test/scala/RedisPersistentSortedSetSpec.scala new file mode 100644 index 0000000000..a2abb2cd40 --- /dev/null +++ b/akka-persistence/akka-persistence-redis/src/test/scala/RedisPersistentSortedSetSpec.scala @@ -0,0 +1,237 @@ +package se.scalablesolutions.akka.persistence.redis + +import org.scalatest.Spec +import org.scalatest.Assertions +import org.scalatest.matchers.ShouldMatchers +import org.scalatest.BeforeAndAfterAll +import org.scalatest.junit.JUnitRunner +import org.junit.runner.RunWith + +import se.scalablesolutions.akka.actor.{Actor, Transactor} + +/** + * A persistent actor based on Redis sortedset storage. + *

    + * Needs a running Redis server. + * @author Debasish Ghosh + */ + +trait ZScorable { + def zscore: Float +} + +case class Hacker(name: String, birth: String) extends ZScorable { + def zscore = birth.toFloat +} + +class SetThresholdViolationException extends RuntimeException + +// add hacker to the set +case class ADD(h: Hacker) + +// remove hacker from set +case class REMOVE(h: Hacker) + +// size of the set +case object SIZE + +// zscore of the hacker +case class SCORE(h: Hacker) + +// zrange +case class RANGE(start: Int, end: Int) + +// add and remove subject to the condition that there will be at least 3 hackers +case class MULTI(add: List[Hacker], rem: List[Hacker], failer: Actor) + +class SortedSetActor extends Transactor { + timeout = 100000 + private lazy val hackers = RedisStorage.newSortedSet + + def receive = { + case ADD(h) => + hackers.+(h.name.getBytes, h.zscore) + reply(true) + + case REMOVE(h) => + hackers.-(h.name.getBytes) + reply(true) + + case SIZE => + reply(hackers.size) + + case SCORE(h) => + reply(hackers.zscore(h.name.getBytes)) + + case RANGE(s, e) => + reply(hackers.zrange(s, e)) + + case MULTI(a, r, failer) => + a.foreach{ h: Hacker => + hackers.+(h.name.getBytes, h.zscore) + } + try { + r.foreach{ h => + if (hackers.size <= 3) + throw new SetThresholdViolationException + hackers.-(h.name.getBytes) + } + } catch { + case e: Exception => + failer !! "Failure" + } + reply((a.size, r.size)) + } +} + +import RedisStorageBackend._ + +@RunWith(classOf[JUnitRunner]) +class RedisPersistentSortedSetSpec extends + Spec with + ShouldMatchers with + BeforeAndAfterAll { + + override def beforeAll { + flushDB + println("** destroyed database") + } + + override def afterAll { + flushDB + println("** destroyed database") + } + + val h1 = Hacker("Alan kay", "1940") + val h2 = Hacker("Richard Stallman", "1953") + val h3 = Hacker("Yukihiro Matsumoto", "1965") + val h4 = Hacker("Claude Shannon", "1916") + val h5 = Hacker("Linus Torvalds", "1969") + val h6 = Hacker("Alan Turing", "1912") + + describe("Add and report cardinality of the set") { + val qa = new SortedSetActor + qa.start + + it("should enter 6 hackers") { + qa !! ADD(h1) + qa !! ADD(h2) + qa !! ADD(h3) + qa !! ADD(h4) + qa !! ADD(h5) + qa !! ADD(h6) + (qa !! SIZE).get.asInstanceOf[Int] should equal(6) + } + + it("should fetch correct scores for hackers") { + (qa !! SCORE(h1)).get.asInstanceOf[Float] should equal(1940.0f) + (qa !! SCORE(h5)).get.asInstanceOf[Float] should equal(1969.0f) + (qa !! SCORE(h6)).get.asInstanceOf[Float] should equal(1912.0f) + } + + it("should fetch proper range") { + (qa !! RANGE(0, 4)).get.asInstanceOf[List[_]].size should equal(5) + (qa !! RANGE(0, 6)).get.asInstanceOf[List[_]].size should equal(6) + } + + it("should remove and throw exception for removing non-existent hackers") { + qa !! REMOVE(h2) + (qa !! SIZE).get.asInstanceOf[Int] should equal(5) + qa !! REMOVE(h3) + (qa !! SIZE).get.asInstanceOf[Int] should equal(4) + val h7 = Hacker("Paul Snively", "1952") + try { + qa !! REMOVE(h7) + } + catch { + case e: Predef.NoSuchElementException => + e.getMessage should endWith("not present") + } + } + + it("should change score for entering the same hacker name with diff score") { + (qa !! SIZE).get.asInstanceOf[Int] should equal(4) + + // same name as h6 + val h7 = Hacker("Alan Turing", "1992") + qa !! ADD(h7) + + // size remains same + (qa !! SIZE).get.asInstanceOf[Int] should equal(4) + + // score updated + (qa !! SCORE(h7)).get.asInstanceOf[Float] should equal(1992.0f) + } + } + + describe("Transaction semantics") { + it("should rollback on exception") { + val qa = new SortedSetActor + qa.start + + val failer = new PersistentFailerActor + failer.start + + (qa !! SIZE).get.asInstanceOf[Int] should equal(0) + val add = List(h1, h2, h3, h4) + val rem = List(h2) + (qa !! MULTI(add, rem, failer)).get.asInstanceOf[Tuple2[Int, Int]] should equal((4,1)) + (qa !! SIZE).get.asInstanceOf[Int] should equal(3) + // size == 3 + + // add 2 more + val add1 = List(h5, h6) + + // remove 3 + val rem1 = List(h1, h3, h4) + try { + qa !! MULTI(add1, rem1, failer) + } catch { case e: Exception => {} + } + (qa !! SIZE).get.asInstanceOf[Int] should equal(3) + } + } + + describe("zrange") { + it ("should report proper range") { + val qa = new SortedSetActor + qa.start + qa !! ADD(h1) + qa !! ADD(h2) + qa !! ADD(h3) + qa !! ADD(h4) + qa !! ADD(h5) + qa !! ADD(h6) + (qa !! SIZE).get.asInstanceOf[Int] should equal(6) + val l = (qa !! RANGE(0, 6)).get.asInstanceOf[List[(Array[Byte], Float)]] + l.map { case (e, s) => (new String(e), s) }.head should equal(("Alan Turing", 1912.0f)) + val h7 = Hacker("Alan Turing", "1992") + qa !! ADD(h7) + (qa !! SIZE).get.asInstanceOf[Int] should equal(6) + val m = (qa !! RANGE(0, 6)).get.asInstanceOf[List[(Array[Byte], Float)]] + m.map { case (e, s) => (new String(e), s) }.head should equal(("Claude Shannon", 1916.0f)) + } + + it ("should report proper rge") { + val qa = new SortedSetActor + qa.start + qa !! ADD(h1) + qa !! ADD(h2) + qa !! ADD(h3) + qa !! ADD(h4) + qa !! ADD(h5) + qa !! ADD(h6) + (qa !! SIZE).get.asInstanceOf[Int] should equal(6) + (qa !! RANGE(0, 5)).get.asInstanceOf[List[_]].size should equal(6) + (qa !! RANGE(0, 6)).get.asInstanceOf[List[_]].size should equal(6) + (qa !! RANGE(0, 3)).get.asInstanceOf[List[_]].size should equal(4) + (qa !! RANGE(0, 1)).get.asInstanceOf[List[_]].size should equal(2) + (qa !! RANGE(0, 0)).get.asInstanceOf[List[_]].size should equal(1) + (qa !! RANGE(3, 1)).get.asInstanceOf[List[_]].size should equal(0) + (qa !! RANGE(0, -1)).get.asInstanceOf[List[_]].size should equal(6) + (qa !! RANGE(0, -2)).get.asInstanceOf[List[_]].size should equal(5) + (qa !! RANGE(0, -4)).get.asInstanceOf[List[_]].size should equal(3) + (qa !! RANGE(-4, -1)).get.asInstanceOf[List[_]].size should equal(4) + } + } +} diff --git a/akka-persistence/akka-persistence-redis/src/test/scala/RedisStorageBackendSpec.scala b/akka-persistence/akka-persistence-redis/src/test/scala/RedisStorageBackendSpec.scala index 504a0e114d..44081a43c6 100644 --- a/akka-persistence/akka-persistence-redis/src/test/scala/RedisStorageBackendSpec.scala +++ b/akka-persistence/akka-persistence-redis/src/test/scala/RedisStorageBackendSpec.scala @@ -1,4 +1,4 @@ -package se.scalablesolutions.akka.state +package se.scalablesolutions.akka.persistence.redis import org.scalatest.Spec import org.scalatest.matchers.ShouldMatchers @@ -114,6 +114,48 @@ class RedisStorageBackendSpec extends } } + describe("atomic increment in ref") { + it("should increment an existing key value by 1") { + insertRefStorageFor("T-4-1", "1200".getBytes) + new String(getRefStorageFor("T-4-1").get) should equal("1200") + incrementAtomically("T-4-1").get should equal(1201) + } + it("should create and increment a non-existing key value by 1") { + incrementAtomically("T-4-2").get should equal(1) + new String(getRefStorageFor("T-4-2").get) should equal("1") + } + it("should increment an existing key value by the amount specified") { + insertRefStorageFor("T-4-3", "1200".getBytes) + new String(getRefStorageFor("T-4-3").get) should equal("1200") + incrementByAtomically("T-4-3", 50).get should equal(1250) + } + it("should create and increment a non-existing key value by the amount specified") { + incrementByAtomically("T-4-4", 20).get should equal(20) + new String(getRefStorageFor("T-4-4").get) should equal("20") + } + } + + describe("atomic decrement in ref") { + it("should decrement an existing key value by 1") { + insertRefStorageFor("T-4-5", "1200".getBytes) + new String(getRefStorageFor("T-4-5").get) should equal("1200") + decrementAtomically("T-4-5").get should equal(1199) + } + it("should create and decrement a non-existing key value by 1") { + decrementAtomically("T-4-6").get should equal(-1) + new String(getRefStorageFor("T-4-6").get) should equal("-1") + } + it("should decrement an existing key value by the amount specified") { + insertRefStorageFor("T-4-7", "1200".getBytes) + new String(getRefStorageFor("T-4-7").get) should equal("1200") + decrementByAtomically("T-4-7", 50).get should equal(1150) + } + it("should create and decrement a non-existing key value by the amount specified") { + decrementByAtomically("T-4-8", 20).get should equal(-20) + new String(getRefStorageFor("T-4-8").get) should equal("-20") + } + } + describe("store and query in queue") { it("should give proper queue semantics") { enqueue("T-5", "alan kay".getBytes) @@ -149,10 +191,10 @@ class RedisStorageBackendSpec extends zcard("hackers") should equal(6) - zscore("hackers", "alan turing".getBytes) should equal("1912") - zscore("hackers", "richard stallman".getBytes) should equal("1953") - zscore("hackers", "claude shannon".getBytes) should equal("1916") - zscore("hackers", "linus torvalds".getBytes) should equal("1969") + zscore("hackers", "alan turing".getBytes).get should equal(1912.0f) + zscore("hackers", "richard stallman".getBytes).get should equal(1953.0f) + zscore("hackers", "claude shannon".getBytes).get should equal(1916.0f) + zscore("hackers", "linus torvalds".getBytes).get should equal(1969.0f) val s: List[Array[Byte]] = zrange("hackers", 0, 2) s.size should equal(3) @@ -164,6 +206,10 @@ class RedisStorageBackendSpec extends val t: List[Array[Byte]] = zrange("hackers", 0, -1) t.size should equal(6) t.map(new String(_)) should equal(sorted) + + val u: List[(Array[Byte], Float)] = zrangeWithScore("hackers", 0, -1) + u.size should equal(6) + u.map{ case (e, s) => new String(e) } should equal(sorted) } } } diff --git a/akka-persistence/pom.xml b/akka-persistence/pom.xml deleted file mode 100644 index cad4757353..0000000000 --- a/akka-persistence/pom.xml +++ /dev/null @@ -1,43 +0,0 @@ - - 4.0.0 - - akka-persistence-parent - Akka Persistence Modules - - pom - - - akka - se.scalablesolutions.akka - 0.7-SNAPSHOT - - - - akka-persistence-common - akka-persistence-redis - akka-persistence-mongo - akka-persistence-cassandra - - - - - akka-core - ${project.groupId} - ${project.version} - - - - org.scalatest - scalatest - 1.0 - test - - - junit - junit - 4.5 - test - - - diff --git a/akka-rest/pom.xml b/akka-rest/pom.xml deleted file mode 100644 index 4e875cb310..0000000000 --- a/akka-rest/pom.xml +++ /dev/null @@ -1,57 +0,0 @@ - - 4.0.0 - - akka-rest - Akka REST Module - - jar - - - akka - se.scalablesolutions.akka - 0.7-SNAPSHOT - - - - - - akka-core - ${project.groupId} - ${project.version} - - - - - javax.servlet - servlet-api - 2.5 - - - com.sun.jersey - jersey-core - ${jersey.version} - - - com.sun.jersey - jersey-server - ${jersey.version} - - - com.sun.jersey - jersey-json - ${jersey.version} - - - javax.ws.rs - jsr311-api - 1.1 - - - com.sun.jersey.contribs - jersey-scala - ${jersey.version} - - - diff --git a/akka-rest/src/main/scala/ActorComponentProvider.scala b/akka-rest/src/main/scala/ActorComponentProvider.scala index ed9fb225fb..5d9d49bef2 100644 --- a/akka-rest/src/main/scala/ActorComponentProvider.scala +++ b/akka-rest/src/main/scala/ActorComponentProvider.scala @@ -7,8 +7,8 @@ package se.scalablesolutions.akka.rest import com.sun.jersey.core.spi.component.ComponentScope import com.sun.jersey.core.spi.component.ioc.IoCFullyManagedComponentProvider -import config.Configurator -import util.Logging +import se.scalablesolutions.akka.config.Configurator +import se.scalablesolutions.akka.util.Logging class ActorComponentProvider(val clazz: Class[_], val configurators: List[Configurator]) extends IoCFullyManagedComponentProvider with Logging { diff --git a/akka-rest/src/main/scala/AkkaServlet.scala b/akka-rest/src/main/scala/AkkaServlet.scala index 2aa2a6b2c5..fbf14cad31 100644 --- a/akka-rest/src/main/scala/AkkaServlet.scala +++ b/akka-rest/src/main/scala/AkkaServlet.scala @@ -5,6 +5,7 @@ package se.scalablesolutions.akka.rest import se.scalablesolutions.akka.config.ConfiguratorRepository +import se.scalablesolutions.akka.config.Config.config import com.sun.jersey.api.core.ResourceConfig import com.sun.jersey.spi.container.servlet.ServletContainer @@ -20,14 +21,12 @@ class AkkaServlet extends ServletContainer { import org.scala_tools.javautils.Imports._ override def initiate(resourceConfig: ResourceConfig, webApplication: WebApplication) = { - //Kernel.boot // will boot if not already booted by 'main' - val configurators = ConfiguratorRepository.getConfigurators resourceConfig.getClasses.addAll(configurators.flatMap(_.getComponentInterfaces).asJava) resourceConfig.getProperties.put( "com.sun.jersey.spi.container.ResourceFilters", - Config.config.getList("akka.rest.filters").mkString(",")) + config.getList("akka.rest.filters").mkString(",")) webApplication.initiate(resourceConfig, new ActorComponentProviderFactory(configurators)) } diff --git a/akka-samples/akka-sample-camel/src/main/resources/sample-camel-context.xml b/akka-samples/akka-sample-camel/src/main/resources/sample-camel-context.xml new file mode 100644 index 0000000000..b3d811d8de --- /dev/null +++ b/akka-samples/akka-sample-camel/src/main/resources/sample-camel-context.xml @@ -0,0 +1,23 @@ + + + + + + + + + + + + + + + + + + + diff --git a/akka-samples/akka-sample-camel/src/main/scala/Actors.scala b/akka-samples/akka-sample-camel/src/main/scala/Actors.scala new file mode 100644 index 0000000000..c82b29afc9 --- /dev/null +++ b/akka-samples/akka-sample-camel/src/main/scala/Actors.scala @@ -0,0 +1,92 @@ +package sample.camel + +import se.scalablesolutions.akka.actor.{Actor, RemoteActor} +import se.scalablesolutions.akka.actor.annotation.consume +import se.scalablesolutions.akka.camel.{Producer, Message, Consumer} +import se.scalablesolutions.akka.util.Logging + +/** + * Client-initiated remote actor. + */ +class RemoteActor1 extends RemoteActor("localhost", 7777) with Consumer { + def endpointUri = "jetty:http://localhost:6644/remote1" + + protected def receive = { + case msg: Message => reply(Message("hello %s" format msg.body, Map("sender" -> "remote1"))) + } +} + +/** + * Server-initiated remote actor. + */ +class RemoteActor2 extends Actor with Consumer { + def endpointUri = "jetty:http://localhost:6644/remote2" + + protected def receive = { + case msg: Message => reply(Message("hello %s" format msg.body, Map("sender" -> "remote2"))) + } +} + +class Producer1 extends Actor with Producer { + def endpointUri = "direct:welcome" + + override def oneway = false // default + override def async = true // default + + protected def receive = produce +} + +class Consumer1 extends Actor with Consumer with Logging { + def endpointUri = "file:data/input" + + def receive = { + case msg: Message => log.info("received %s" format msg.bodyAs(classOf[String])) + } +} + +@consume("jetty:http://0.0.0.0:8877/camel/test1") +class Consumer2 extends Actor { + def receive = { + case msg: Message => reply("Hello %s" format msg.bodyAs(classOf[String])) + } +} + +class Consumer3(transformer: Actor) extends Actor with Consumer { + def endpointUri = "jetty:http://0.0.0.0:8877/camel/welcome" + + def receive = { + case msg: Message => transformer.forward(msg.setBodyAs(classOf[String])) + } +} + +class Transformer(producer: Actor) extends Actor { + protected def receive = { + case msg: Message => producer.forward(msg.transformBody[String]("- %s -" format _)) + } +} + +class Subscriber(name:String, uri: String) extends Actor with Consumer { + def endpointUri = uri + + protected def receive = { + case msg: Message => log.info("%s received: %s" format (name, msg.body)) + } +} + +class Publisher(name: String, uri: String) extends Actor with Producer { + id = name + def endpointUri = uri + override def oneway = true + protected def receive = produce +} + +class PublisherBridge(uri: String, publisher: Actor) extends Actor with Consumer { + def endpointUri = uri + + protected def receive = { + case msg: Message => { + publisher ! msg.bodyAs(classOf[String]) + reply("message published") + } + } +} \ No newline at end of file diff --git a/akka-samples/akka-sample-camel/src/main/scala/Application1.scala b/akka-samples/akka-sample-camel/src/main/scala/Application1.scala new file mode 100644 index 0000000000..4a55f2014f --- /dev/null +++ b/akka-samples/akka-sample-camel/src/main/scala/Application1.scala @@ -0,0 +1,28 @@ +package sample.camel + +import se.scalablesolutions.akka.actor.Actor +import se.scalablesolutions.akka.camel.Message +import se.scalablesolutions.akka.remote.RemoteClient + +/** + * @author Martin Krasser + */ +object Application1 { + + // + // TODO: completion of example + // + + def main(args: Array[String]) { + implicit val sender: Option[Actor] = None + + val actor1 = new RemoteActor1 + val actor2 = RemoteClient.actorFor("remote2", "localhost", 7777) + + actor1.start + + println(actor1 !! Message("actor1")) + println(actor2 !! Message("actor2")) + } + +} \ No newline at end of file diff --git a/akka-samples/akka-sample-camel/src/main/scala/Application2.scala b/akka-samples/akka-sample-camel/src/main/scala/Application2.scala new file mode 100644 index 0000000000..83c6e8c439 --- /dev/null +++ b/akka-samples/akka-sample-camel/src/main/scala/Application2.scala @@ -0,0 +1,22 @@ +package sample.camel + +import se.scalablesolutions.akka.camel.service.CamelService +import se.scalablesolutions.akka.remote.RemoteNode + +/** + * @author Martin Krasser + */ +object Application2 { + + // + // TODO: completion of example + // + + def main(args: Array[String]) { + val camelService = CamelService.newInstance + camelService.load + RemoteNode.start("localhost", 7777) + RemoteNode.register("remote2", new RemoteActor2().start) + } + +} \ No newline at end of file diff --git a/akka-samples/akka-sample-camel/src/main/scala/Boot.scala b/akka-samples/akka-sample-camel/src/main/scala/Boot.scala new file mode 100644 index 0000000000..481804de64 --- /dev/null +++ b/akka-samples/akka-sample-camel/src/main/scala/Boot.scala @@ -0,0 +1,76 @@ +package sample.camel + +import org.apache.camel.{Exchange, Processor} +import org.apache.camel.builder.RouteBuilder +import org.apache.camel.impl.DefaultCamelContext +import org.apache.camel.spring.spi.ApplicationContextRegistry +import org.springframework.context.support.ClassPathXmlApplicationContext + +import se.scalablesolutions.akka.actor.SupervisorFactory +import se.scalablesolutions.akka.camel.CamelContextManager +import se.scalablesolutions.akka.config.ScalaConfig._ + +/** + * @author Martin Krasser + */ +class Boot { + + // Create CamelContext with Spring-based registry and custom route builder + + val context = new ClassPathXmlApplicationContext("/sample-camel-context.xml", getClass) + val registry = new ApplicationContextRegistry(context) + CamelContextManager.init(new DefaultCamelContext(registry)) + CamelContextManager.context.addRoutes(new CustomRouteBuilder) + + // Basic example + + val factory = SupervisorFactory( + SupervisorConfig( + RestartStrategy(OneForOne, 3, 100, List(classOf[Exception])), + Supervise(new Consumer1, LifeCycle(Permanent)) :: + Supervise(new Consumer2, LifeCycle(Permanent)) :: Nil)) + factory.newInstance.start + + // Routing example + + val producer = new Producer1 + val mediator = new Transformer(producer) + val consumer = new Consumer3(mediator) + + producer.start + mediator.start + consumer.start + + // Publish subscribe example + + // + // Cometd example is disabled because of unresolved sbt/ivy dependency resolution issues. + // If you want to run this example, make sure to replace all jetty-*-6.1.22.jar files + // on the classpath with corresponding jetty-*-6.1.11.jar files. + // + + //val cometdUri = "cometd://localhost:8111/test/abc?resourceBase=target" + //val cometdSubscriber = new Subscriber("cometd-subscriber", cometdUri).start + //val cometdPublisher = new Publisher("cometd-publisher", cometdUri).start + + val jmsUri = "jms:topic:test" + val jmsSubscriber1 = new Subscriber("jms-subscriber-1", jmsUri).start + val jmsSubscriber2 = new Subscriber("jms-subscriber-2", jmsUri).start + val jmsPublisher = new Publisher("jms-publisher", jmsUri).start + + //val cometdPublisherBridge = new PublisherBridge("jetty:http://0.0.0.0:8877/camel/pub/cometd", cometdPublisher).start + val jmsPublisherBridge = new PublisherBridge("jetty:http://0.0.0.0:8877/camel/pub/jms", jmsPublisher).start + +} + +class CustomRouteBuilder extends RouteBuilder { + def configure { + val actorUri = "actor:%s" format classOf[Consumer2].getName + from("jetty:http://0.0.0.0:8877/camel/test2").to(actorUri) + from("direct:welcome").process(new Processor() { + def process(exchange: Exchange) { + exchange.getOut.setBody("Welcome %s" format exchange.getIn.getBody) + } + }) + } +} \ No newline at end of file diff --git a/akka-samples/akka-sample-chat/README b/akka-samples/akka-sample-chat/README index d2049cd7c0..88720d8c55 100644 --- a/akka-samples/akka-sample-chat/README +++ b/akka-samples/akka-sample-chat/README @@ -10,19 +10,22 @@ For details on how to set up Redis server have a look at http://code.google.com/ Then to run the sample: -1. Set ‘AKKA_HOME’ environment variable to the root of the Akka distribution. -2. Open up a shell and step into the Akka distribution root folder. -3. Build Akka by invoking ‘mvn install -Dmaven.test.skip=true’. This will also build the sample application and deploy it to the ‘$AKKA_HOME/deploy’ directory. -4. Run the microkernel - export AKKA_HOME=... - cd $AKKA_HOME - java -jar ./dist/akka-0.6.jar -5. Now start up a new shell and go down into the ‘./akka-samples/akka-sample-chat’ directory. -6. Invoke ‘mvn scala:console -o’. This will give you a Scala REPL (interpreter) with the chat application and all its dependency JARs on the classpath. -7. Simply paste in the whole code block with the ‘Runner’ object above and invoke ‘Runner.run’. This runs a simulated client session that will connect to the running server in the microkernel. -8. Invoke ‘Runner.run’ again and again… +1. Install the Redis network storage. Download it from [http://code.google.com/p/redis/]. +2. Open up a shell and start up an instance of Redis. +3. Fire up two shells. For each of them: + - Step down into to the root of the Akka distribution. + - Set 'export AKKA_HOME=. + - Run 'sbt console' to start up a REPL (interpreter). +4. In the first REPL you get execute: + - scala> import se.scalablesolutions.akka.sample.chat._ + - scala> ChatService.start +5. In the first REPL you get execute: + - scala> import se.scalablesolutions.akka.sample.chat._ + - scala> Runner.run +6. See the chat simulation run. +7. Run it again to see full speed after first initialization. -Now you could test client reconnect by killing the running microkernel and start it up again. See the client reconnect take place in the REPL shell. +Now you could test client reconnect by killing the console running the ChatService and start it up again. See the client reconnect take place in the REPL shell. That’s it. Have fun. diff --git a/akka-samples/akka-sample-chat/pom.xml b/akka-samples/akka-sample-chat/pom.xml deleted file mode 100644 index 20ee421978..0000000000 --- a/akka-samples/akka-sample-chat/pom.xml +++ /dev/null @@ -1,38 +0,0 @@ - - 4.0.0 - - akka-sample-chat - Akka Chat Sample Module - - jar - - - akka-samples-parent - se.scalablesolutions.akka - 0.7-SNAPSHOT - - - - src/main/scala - - - maven-antrun-plugin - - - install - - - - - - - run - - - - - - - diff --git a/akka-samples/akka-sample-chat/src/main/scala/ChatServer.scala b/akka-samples/akka-sample-chat/src/main/scala/ChatServer.scala index d03c209706..4ff7e1e0c6 100644 --- a/akka-samples/akka-sample-chat/src/main/scala/ChatServer.scala +++ b/akka-samples/akka-sample-chat/src/main/scala/ChatServer.scala @@ -1,28 +1,48 @@ -/**ChatStorage +/** * Copyright (C) 2009-2010 Scalable Solutions AB . */ package se.scalablesolutions.akka.sample.chat +import scala.collection.mutable.HashMap + import se.scalablesolutions.akka.actor.{SupervisorFactory, Actor, RemoteActor} +import se.scalablesolutions.akka.remote.{RemoteNode, RemoteClient} +import se.scalablesolutions.akka.persistence.common.PersistentVector +import se.scalablesolutions.akka.persistence.redis.RedisStorage import se.scalablesolutions.akka.stm.Transaction._ -import se.scalablesolutions.akka.remote.RemoteServer -import se.scalablesolutions.akka.util.Logging import se.scalablesolutions.akka.config.ScalaConfig._ import se.scalablesolutions.akka.config.OneForOneStrategy -import scala.collection.mutable.HashMap -import se.scalablesolutions.akka.state.{PersistentVector, RedisStorage} +import se.scalablesolutions.akka.util.Logging /****************************************************************************** - To run the sample: - 1. Run 'mvn install' (builds and deploys jar to AKKA_HOME/deploy) - 2. In another shell run 'java -jar ./dist/akka-0.6.jar' to start up Akka microkernel - 3. In the first shell run 'mvn scala:console -o' - 4. In the REPL you get execute: +Akka Chat Client/Server Sample Application + +First we need to download, build and start up Redis: + +1. Download Redis from http://code.google.com/p/redis/downloads/list. +2. Step into the distribution. +3. Build: ‘make install’. +4. Run: ‘./redis-server’. +For details on how to set up Redis server have a look at http://code.google.com/p/redis/wiki/QuickStart. + +Then to run the sample: + +1. Fire up two shells. For each of them: + - Step down into to the root of the Akka distribution. + - Set 'export AKKA_HOME=. + - Run 'sbt console' to start up a REPL (interpreter). +2. In the first REPL you get execute: + - scala> import se.scalablesolutions.akka.sample.chat._ + - scala> ChatService.start +3. In the first REPL you get execute: - scala> import se.scalablesolutions.akka.sample.chat._ - scala> Runner.run - 5. See the chat simulation run - 6. Run it again to see full speed after first initialization +4. See the chat simulation run. +5. Run it again to see full speed after first initialization. + +That’s it. Have fun. + ******************************************************************************/ /** @@ -40,10 +60,12 @@ case class ChatMessage(from: String, message: String) extends Event */ class ChatClient(val name: String) { import Actor.Sender.Self - def login = ChatService ! Login(name) - def logout = ChatService ! Logout(name) - def post(message: String) = ChatService ! ChatMessage(name, name + ": " + message) - def chatLog: ChatLog = (ChatService !! GetChatLog(name)).getOrElse(throw new Exception("Couldn't get the chat log from ChatServer")) + val chat = RemoteClient.actorFor("chat:service", "localhost", 9999) + + def login = chat ! Login(name) + def logout = chat ! Logout(name) + def post(message: String) = chat ! ChatMessage(name, name + ": " + message) + def chatLog: ChatLog = (chat !! GetChatLog(name)).getOrElse(throw new Exception("Couldn't get the chat log from ChatServer")) } /** @@ -75,8 +97,9 @@ trait ChatStorage extends Actor */ class RedisChatStorage extends ChatStorage { lifeCycle = Some(LifeCycle(Permanent)) - - private var chatLog = atomic { RedisStorage.getVector("akka.chat.log") } + val CHAT_LOG = "akka.chat.log" + + private var chatLog = atomic { RedisStorage.getVector(CHAT_LOG) } log.info("Redis-based chat storage is starting up...") @@ -94,7 +117,7 @@ class RedisChatStorage extends ChatStorage { reply(ChatLog(messageList)) } - override def postRestart(reason: Throwable) = chatLog = RedisStorage.getVector("akka.chat.log") + override def postRestart(reason: Throwable) = chatLog = RedisStorage.getVector(CHAT_LOG) } /** @@ -180,16 +203,19 @@ object ChatService extends ChatServer with SessionManagement with ChatManagement with - RedisChatStorageFactory + RedisChatStorageFactory { + override def start: Actor = { + super.start + RemoteNode.start("localhost", 9999) + RemoteNode.register("chat:service", this) + this + } +} /** * Test runner emulating a chat session. */ object Runner { - // create a handle to the remote ChatService - ChatService.makeRemote("localhost", 9999) - ChatService.start - def run = { val client = new ChatClient("jonas") diff --git a/akka-samples/akka-sample-lift/config/akka.conf b/akka-samples/akka-sample-lift/config/akka.conf deleted file mode 100644 index 4a02b208bb..0000000000 --- a/akka-samples/akka-sample-lift/config/akka.conf +++ /dev/null @@ -1,64 +0,0 @@ -##################### -# Akka Config File # -################### - -# This file has all the default settings, so all these could be removed with no visible effect. -# Modify as needed. - - - filename = "./logs/akka.log" - roll = "daily" # Options: never, hourly, daily, sunday/monday/... - level = "debug" # Options: fatal, critical, error, warning, info, debug, trace - console = on - # syslog_host = "" - # syslog_server_name = "" - - - - version = "0.7-SNAPSHOT" - - - timeout = 5000 # default timeout for future based invocations - concurrent-mode = off # if turned on, then the same actor instance is allowed to execute concurrently - - # e.g. departing from the actor model for better performance - serialize-messages = on # does a deep clone of (non-primitive) messages to ensure immutability - - - - service = on - restart-on-collision = off # (not implemented yet) if 'on' then it reschedules the transaction, - # if 'off' then throws an exception or rollback for user to handle - wait-for-completion = 100 # how long time in millis a transaction should be given time to complete when a collision is detected - wait-nr-of-times = 3 # the number of times it should check for completion of a pending transaction upon collision - distributed = off # not implemented yet - - - - service = on - hostname = "localhost" - port = 9999 - connection-timeout = 1000 # in millis - - - - service = on - hostname = "localhost" - port = 9998 - - - - system = "cassandra" # Options: cassandra (coming: terracotta, redis, tokyo-cabinet, tokyo-tyrant, voldemort, memcached, hazelcast) - - - service = on - storage-format = "java" # Options: java, scala-json, java-json - blocking = false # inserts and queries should be blocking or not - - - service = on - pidfile = "akka.pid" - - - - - diff --git a/akka-samples/akka-sample-lift/pom.xml b/akka-samples/akka-sample-lift/pom.xml deleted file mode 100644 index a07c288e31..0000000000 --- a/akka-samples/akka-sample-lift/pom.xml +++ /dev/null @@ -1,50 +0,0 @@ - - - 4.0.0 - - akka-sample-lift - Akka Lift Sample Module - - war - - - akka-samples-parent - se.scalablesolutions.akka - 0.7-SNAPSHOT - - - - 1.1-M6 - - - - - net.liftweb - lift-util - ${lift.version} - - - net.liftweb - lift-webkit - ${lift.version} - - - javax.servlet - servlet-api - 2.5 - provided - - - junit - junit - 4.5 - test - - - org.mortbay.jetty - jetty - [6.1.6,) - test - - - diff --git a/akka-samples/akka-sample-lift/src/main/scala/akka/SimpleService.scala b/akka-samples/akka-sample-lift/src/main/scala/akka/SimpleService.scala index 3f18f7d357..35a4158642 100644 --- a/akka-samples/akka-sample-lift/src/main/scala/akka/SimpleService.scala +++ b/akka-samples/akka-sample-lift/src/main/scala/akka/SimpleService.scala @@ -2,7 +2,8 @@ package sample.lift import se.scalablesolutions.akka.actor.{Transactor, Actor} import se.scalablesolutions.akka.config.ScalaConfig._ -import se.scalablesolutions.akka.state.{CassandraStorage, TransactionalState} +import se.scalablesolutions.akka.stm.TransactionalState +import se.scalablesolutions.akka.persistence.cassandra.CassandraStorage import java.lang.Integer import javax.ws.rs.{GET, Path, Produces} diff --git a/akka-samples/akka-sample-rest-java/pom.xml b/akka-samples/akka-sample-rest-java/pom.xml deleted file mode 100644 index 6539a0234b..0000000000 --- a/akka-samples/akka-sample-rest-java/pom.xml +++ /dev/null @@ -1,49 +0,0 @@ - - 4.0.0 - - akka-sample-rest-java - Akka REST Java Sample Module - - jar - - - akka-samples-parent - se.scalablesolutions.akka - 0.7-SNAPSHOT - - - - src/main/java - - - org.apache.maven.plugins - maven-compiler-plugin - - 1.5 - 1.5 - - **/* - - - - - maven-antrun-plugin - - - install - - - - - - - run - - - - - - - diff --git a/akka-samples/akka-sample-rest-java/src/main/java/sample/java/PersistentSimpleService.java b/akka-samples/akka-sample-rest-java/src/main/java/sample/java/PersistentSimpleService.java index 9a0a38f619..221b5613b8 100644 --- a/akka-samples/akka-sample-rest-java/src/main/java/sample/java/PersistentSimpleService.java +++ b/akka-samples/akka-sample-rest-java/src/main/java/sample/java/PersistentSimpleService.java @@ -8,11 +8,11 @@ import javax.ws.rs.Path; import javax.ws.rs.GET; import javax.ws.rs.Produces; -import se.scalablesolutions.akka.annotation.transactionrequired; -import se.scalablesolutions.akka.annotation.prerestart; -import se.scalablesolutions.akka.annotation.postrestart; -import se.scalablesolutions.akka.state.PersistentMap; -import se.scalablesolutions.akka.state.CassandraStorage; +import se.scalablesolutions.akka.actor.annotation.transactionrequired; +import se.scalablesolutions.akka.actor.annotation.prerestart; +import se.scalablesolutions.akka.actor.annotation.postrestart; +import se.scalablesolutions.akka.persistence.common.PersistentMap; +import se.scalablesolutions.akka.persistence.cassandra.CassandraStorage; import java.nio.ByteBuffer; diff --git a/akka-samples/akka-sample-rest-java/src/main/java/sample/java/SimpleService.java b/akka-samples/akka-sample-rest-java/src/main/java/sample/java/SimpleService.java index 54468495bc..b10bcdaea4 100644 --- a/akka-samples/akka-sample-rest-java/src/main/java/sample/java/SimpleService.java +++ b/akka-samples/akka-sample-rest-java/src/main/java/sample/java/SimpleService.java @@ -8,11 +8,11 @@ import javax.ws.rs.Path; import javax.ws.rs.GET; import javax.ws.rs.Produces; -import se.scalablesolutions.akka.annotation.transactionrequired; -import se.scalablesolutions.akka.annotation.prerestart; -import se.scalablesolutions.akka.annotation.postrestart; -import se.scalablesolutions.akka.state.TransactionalState; -import se.scalablesolutions.akka.state.TransactionalMap; +import se.scalablesolutions.akka.actor.annotation.transactionrequired; +import se.scalablesolutions.akka.actor.annotation.prerestart; +import se.scalablesolutions.akka.actor.annotation.postrestart; +import se.scalablesolutions.akka.stm.TransactionalState; +import se.scalablesolutions.akka.stm.TransactionalMap; /** * Try service out by invoking (multiple times): diff --git a/akka-samples/akka-sample-rest-scala/pom.xml b/akka-samples/akka-sample-rest-scala/pom.xml deleted file mode 100644 index e62a329f8c..0000000000 --- a/akka-samples/akka-sample-rest-scala/pom.xml +++ /dev/null @@ -1,46 +0,0 @@ - - 4.0.0 - - akka-sample-rest-scala - Akka REST Scala Sample Module - - jar - - - akka-samples-parent - se.scalablesolutions.akka - 0.7-SNAPSHOT - - - - - javax.ws.rs - jsr311-api - 1.0 - - - - - src/main/scala - - - maven-antrun-plugin - - - install - - - - - - - run - - - - - - - diff --git a/akka-samples/akka-sample-rest-scala/src/main/scala/SimpleService.scala b/akka-samples/akka-sample-rest-scala/src/main/scala/SimpleService.scala index 39a6a2a0d2..04408fc0f4 100644 --- a/akka-samples/akka-sample-rest-scala/src/main/scala/SimpleService.scala +++ b/akka-samples/akka-sample-rest-scala/src/main/scala/SimpleService.scala @@ -5,10 +5,11 @@ package sample.scala import se.scalablesolutions.akka.actor.{Transactor, SupervisorFactory, Actor} -import se.scalablesolutions.akka.state.{CassandraStorage, TransactionalState} +import se.scalablesolutions.akka.stm.TransactionalState +import se.scalablesolutions.akka.persistence.cassandra.CassandraStorage import se.scalablesolutions.akka.config.ScalaConfig._ import se.scalablesolutions.akka.util.Logging -import se.scalablesolutions.akka.comet.{AkkaClusterBroadcastFilter} +import se.scalablesolutions.akka.comet.AkkaClusterBroadcastFilter import java.lang.Integer import java.nio.ByteBuffer diff --git a/akka-samples/akka-sample-security/pom.xml b/akka-samples/akka-sample-security/pom.xml deleted file mode 100644 index 86f331fd65..0000000000 --- a/akka-samples/akka-sample-security/pom.xml +++ /dev/null @@ -1,52 +0,0 @@ - - 4.0.0 - - akka-sample-security - Akka Sample Security Module - - jar - - - akka-samples-parent - se.scalablesolutions.akka - 0.7-SNAPSHOT - - - - - javax.ws.rs - jsr311-api - 1.0 - - - javax.annotation - jsr250-api - 1.0 - - - - - - src/main/scala - - - maven-antrun-plugin - - - install - - - - - - - run - - - - - - - diff --git a/akka-samples/akka-sample-security/src/main/resources/akka.conf b/akka-samples/akka-sample-security/src/main/resources/akka.conf deleted file mode 100644 index 60f68a64ec..0000000000 --- a/akka-samples/akka-sample-security/src/main/resources/akka.conf +++ /dev/null @@ -1,35 +0,0 @@ -#################### -# Akka Config File # -#################### - -# This file has all the default settings, so all these could be removed with no visible effect. -# Modify as needed. - - - version = "0.7-SNAPSHOT" - - boot = ["se.scalablesolutions.akka.security.samples.Boot"] # FQN to the class doing initial active object/actor - # supervisor bootstrap, should be defined in default constructor - - - filters = "se.scalablesolutions.akka.security.AkkaSecurityFilterFactory" - - # only one authenticator can be enabled for the security filter factory - authenticator = "se.scalablesolutions.akka.security.samples.BasicAuthenticationService" -# authenticator = "se.scalablesolutions.akka.security.samples.DigestAuthenticationService" -# authenticator = "se.scalablesolutions.akka.security.samples.SpnegoAuthenticationService" - -# -# -# servicePrincipal = "HTTP/localhost@EXAMPLE.COM" -# keyTabLocation = "URL to keytab" -# kerberosDebug = "true" -# realm = "EXAMPLE.COM" -# - - # service = on - # hostname = "localhost" - # port = 9998 - - - diff --git a/akka-samples/akka-sample-security/src/main/scala/SimpleService.scala b/akka-samples/akka-sample-security/src/main/scala/SimpleService.scala index 59e3a5c85e..b6183cfda9 100644 --- a/akka-samples/akka-sample-security/src/main/scala/SimpleService.scala +++ b/akka-samples/akka-sample-security/src/main/scala/SimpleService.scala @@ -8,7 +8,7 @@ import se.scalablesolutions.akka.actor.{SupervisorFactory, Actor} import se.scalablesolutions.akka.config.ScalaConfig._ import se.scalablesolutions.akka.util.Logging import se.scalablesolutions.akka.security.{DigestAuthenticationActor, UserInfo} -import se.scalablesolutions.akka.state.TransactionalState +import se.scalablesolutions.akka.stm.TransactionalState class Boot { val factory = SupervisorFactory( diff --git a/akka-samples/pom.xml b/akka-samples/pom.xml deleted file mode 100644 index ad94fc8aab..0000000000 --- a/akka-samples/pom.xml +++ /dev/null @@ -1,56 +0,0 @@ - - 4.0.0 - - akka-samples-parent - Akka Sample Modules - - pom - - - akka - se.scalablesolutions.akka - 0.7-SNAPSHOT - - - - akka-sample-chat - akka-sample-lift - akka-sample-security - akka-sample-rest-scala - akka-sample-rest-java - - - - - akka-core - ${project.groupId} - ${project.version} - - - akka-persistence-cassandra - ${project.groupId} - ${project.version} - - - akka-persistence-redis - ${project.groupId} - ${project.version} - - - akka-rest - ${project.groupId} - ${project.version} - - - akka-comet - ${project.groupId} - ${project.version} - - - akka-security - ${project.groupId} - ${project.version} - - - diff --git a/akka-security/pom.xml b/akka-security/pom.xml deleted file mode 100644 index 8d7c66a5f7..0000000000 --- a/akka-security/pom.xml +++ /dev/null @@ -1,63 +0,0 @@ - - 4.0.0 - - akka-security - Akka Security Module - - jar - - - akka - se.scalablesolutions.akka - 0.7-SNAPSHOT - - - - - akka-core - ${project.groupId} - ${project.version} - - - javax.annotation - jsr250-api - 1.0 - - - com.sun.jersey - jersey-server - 1.1.3-ea - - - javax.ws.rs - jsr311-api - 1.0 - - - net.liftweb - lift-util - 1.1-M6 - - - - - org.scalatest - scalatest - 1.0 - test - - - junit - junit - 4.5 - test - - - org.mockito - mockito-all - 1.8.0 - test - - - diff --git a/akka-security/src/main/scala/Security.scala b/akka-security/src/main/scala/Security.scala index f6f2b939a1..8a144f4282 100644 --- a/akka-security/src/main/scala/Security.scala +++ b/akka-security/src/main/scala/Security.scala @@ -22,20 +22,21 @@ package se.scalablesolutions.akka.security -import _root_.se.scalablesolutions.akka.actor.{Scheduler, Actor, ActorRegistry} -import _root_.se.scalablesolutions.akka.util.Logging -import _root_.se.scalablesolutions.akka.Config +import se.scalablesolutions.akka.actor.{Scheduler, Actor, ActorRegistry} +import se.scalablesolutions.akka.util.Logging +import se.scalablesolutions.akka.config.Config -import _root_.com.sun.jersey.api.model.AbstractMethod -import _root_.com.sun.jersey.spi.container.{ResourceFilterFactory, ContainerRequest, ContainerRequestFilter, ContainerResponse, ContainerResponseFilter, ResourceFilter} -import _root_.com.sun.jersey.core.util.Base64 -import _root_.javax.ws.rs.core.{SecurityContext, Context, Response} -import _root_.javax.ws.rs.WebApplicationException -import _root_.javax.annotation.security.{DenyAll, PermitAll, RolesAllowed} -import _root_.java.security.Principal -import _root_.java.util.concurrent.TimeUnit +import com.sun.jersey.api.model.AbstractMethod +import com.sun.jersey.spi.container.{ResourceFilterFactory, ContainerRequest, ContainerRequestFilter, ContainerResponse, ContainerResponseFilter, ResourceFilter} +import com.sun.jersey.core.util.Base64 -import _root_.net.liftweb.util.{SecurityHelpers, StringHelpers, IoHelpers} +import javax.ws.rs.core.{SecurityContext, Context, Response} +import javax.ws.rs.WebApplicationException +import javax.annotation.security.{DenyAll, PermitAll, RolesAllowed} +import java.security.Principal +import java.util.concurrent.TimeUnit + +import net.liftweb.util.{SecurityHelpers, StringHelpers, IoHelpers} object Enc extends SecurityHelpers with StringHelpers with IoHelpers @@ -86,10 +87,11 @@ class AkkaSecurityFilterFactory extends ResourceFilterFactory with Logging { override def filter(request: ContainerRequest): ContainerRequest = rolesAllowed match { case Some(roles) => { - (authenticator !! (Authenticate(request, roles), 10000)).get.asInstanceOf[AnyRef] match { - case OK => request - case r if r.isInstanceOf[Response] => + (authenticator.!![AnyRef](Authenticate(request, roles), 10000)) match { + case Some(OK) => request + case Some(r) if r.isInstanceOf[Response] => throw new WebApplicationException(r.asInstanceOf[Response]) + case None => throw new WebApplicationException(408) case x => { log.error("Authenticator replied with unexpected result [%s]", x); throw new WebApplicationException(Response.Status.INTERNAL_SERVER_ERROR) @@ -329,19 +331,19 @@ trait DigestAuthenticationActor extends AuthenticationActor[DigestCredentials] { def noncePurgeInterval = 2 * 60 * 1000 //ms } -import _root_.java.security.Principal -import _root_.java.security.PrivilegedActionException -import _root_.java.security.PrivilegedExceptionAction +import java.security.Principal +import java.security.PrivilegedActionException +import java.security.PrivilegedExceptionAction -import _root_.javax.security.auth.login.AppConfigurationEntry -import _root_.javax.security.auth.login.Configuration -import _root_.javax.security.auth.login.LoginContext -import _root_.javax.security.auth.Subject -import _root_.javax.security.auth.kerberos.KerberosPrincipal +import javax.security.auth.login.AppConfigurationEntry +import javax.security.auth.login.Configuration +import javax.security.auth.login.LoginContext +import javax.security.auth.Subject +import javax.security.auth.kerberos.KerberosPrincipal -import _root_.org.ietf.jgss.GSSContext -import _root_.org.ietf.jgss.GSSCredential -import _root_.org.ietf.jgss.GSSManager +import org.ietf.jgss.GSSContext +import org.ietf.jgss.GSSCredential +import org.ietf.jgss.GSSManager trait SpnegoAuthenticationActor extends AuthenticationActor[SpnegoCredentials] { override def unauthorized = @@ -349,7 +351,7 @@ trait SpnegoAuthenticationActor extends AuthenticationActor[SpnegoCredentials] { // for some reason the jersey Base64 class does not work with kerberos // but the commons Base64 does - import _root_.org.apache.commons.codec.binary.Base64 + import org.apache.commons.codec.binary.Base64 override def extractCredentials(r: Req): Option[SpnegoCredentials] = { val AuthHeader = """Negotiate\s(.*)""".r diff --git a/akka-security/src/test/scala/SecuritySpec.scala b/akka-security/src/test/scala/SecuritySpec.scala index 15e84381ea..e56148b5df 100644 --- a/akka-security/src/test/scala/SecuritySpec.scala +++ b/akka-security/src/test/scala/SecuritySpec.scala @@ -14,9 +14,9 @@ import org.mockito.Mockito._ import org.mockito.Matchers._ import org.junit.{Before, After, Test} -import _root_.javax.ws.rs.core.{SecurityContext, Context, Response} -import _root_.com.sun.jersey.spi.container.{ResourceFilterFactory, ContainerRequest, ContainerRequestFilter, ContainerResponse, ContainerResponseFilter, ResourceFilter} -import _root_.com.sun.jersey.core.util.Base64 +import javax.ws.rs.core.{SecurityContext, Context, Response} +import com.sun.jersey.spi.container.{ResourceFilterFactory, ContainerRequest, ContainerRequestFilter, ContainerResponse, ContainerResponseFilter, ResourceFilter} +import com.sun.jersey.core.util.Base64 class BasicAuthenticatorSpec extends junit.framework.TestCase with Suite with MockitoSugar with MustMatchers { diff --git a/akka-util-java/pom.xml b/akka-util-java/pom.xml deleted file mode 100644 index e0a729491b..0000000000 --- a/akka-util-java/pom.xml +++ /dev/null @@ -1,74 +0,0 @@ - - 4.0.0 - - akka-util-java - Akka Java Utilities Module - - jar - - - akka - se.scalablesolutions.akka - 0.7-SNAPSHOT - - - - - org.guiceyfruit - guice-core - 2.0-beta-4 - - - com.google.protobuf - protobuf-java - 2.2.0 - - - org.multiverse - multiverse-alpha - 0.3 - jar-with-dependencies - - - org.multiverse - multiverse-core - - - asm - asm-tree - - - asm - asm-analysis - - - asm - asm-commons - - - asm - asm-util - - - - - - - src/main/java - src/test/java - - - org.apache.maven.plugins - maven-compiler-plugin - - 1.5 - 1.5 - - **/* - - - - - - diff --git a/akka-util-java/project/build.properties b/akka-util-java/project/build.properties new file mode 100644 index 0000000000..f9ec63abf2 --- /dev/null +++ b/akka-util-java/project/build.properties @@ -0,0 +1,7 @@ +project.organization=se.scalablesolutions.akka +project.name=akka-util-java +project.version=0.7-SNAPSHOT +scala.version=2.7.7 +sbt.version=0.7.1 +def.scala.version=2.7.7 +build.scala.versions=2.7.7 diff --git a/akka-util-java/project/build/AkkaJavaUtilProject.scala b/akka-util-java/project/build/AkkaJavaUtilProject.scala new file mode 100644 index 0000000000..0376ff83d9 --- /dev/null +++ b/akka-util-java/project/build/AkkaJavaUtilProject.scala @@ -0,0 +1,17 @@ +import sbt._ + +class AkkaJavaUtilProject(info: ProjectInfo) extends DefaultProject(info) { + + val databinder = "DataBinder" at "http://databinder.net/repo" + val configgy = "Configgy" at "http://www.lag.net/repo" + val multiverse = "Multiverse" at "http://multiverse.googlecode.com/svn/maven-repository/releases" + val jBoss = "jBoss" at "http://repository.jboss.org/maven2" + val guiceyfruit = "GuiceyFruit" at "http://guiceyfruit.googlecode.com/svn/repo/releases/" + + val guicey = "org.guiceyfruit" % "guice-core" % "2.0-beta-4" % "compile" + val proto = "com.google.protobuf" % "protobuf-java" % "2.2.0" % "compile" + val multi = "org.multiverse" % "multiverse-alpha" % "0.3" % "compile" + + override def packageDocsJar = defaultJarPath("-javadoc.jar") + override def packageSrcJar= defaultJarPath("-sources.jar") +} diff --git a/akka-util-java/src/main/java/se/scalablesolutions/akka/annotation/configuration.java b/akka-util-java/src/main/java/se/scalablesolutions/akka/annotation/configuration.java index b0139ac6f0..9c5375398b 100644 --- a/akka-util-java/src/main/java/se/scalablesolutions/akka/annotation/configuration.java +++ b/akka-util-java/src/main/java/se/scalablesolutions/akka/annotation/configuration.java @@ -2,7 +2,7 @@ * Copyright (C) 2009-2010 Scalable Solutions AB */ -package se.scalablesolutions.akka.annotation; +package se.scalablesolutions.akka.actor.annotation; import java.lang.annotation.*; diff --git a/akka-util-java/src/main/java/se/scalablesolutions/akka/annotation/consume.java b/akka-util-java/src/main/java/se/scalablesolutions/akka/annotation/consume.java new file mode 100644 index 0000000000..17ac05bf17 --- /dev/null +++ b/akka-util-java/src/main/java/se/scalablesolutions/akka/annotation/consume.java @@ -0,0 +1,18 @@ +/** + * Copyright (C) 2009-2010 Scalable Solutions AB + */ + + package se.scalablesolutions.akka.actor.annotation; + +import java.lang.annotation.ElementType; +import java.lang.annotation.Retention; +import java.lang.annotation.RetentionPolicy; +import java.lang.annotation.Target; + +@Retention(RetentionPolicy.RUNTIME) +@Target(ElementType.TYPE) +public @interface consume { + + public abstract String value(); + +} \ No newline at end of file diff --git a/akka-util-java/src/main/java/se/scalablesolutions/akka/annotation/immutable.java b/akka-util-java/src/main/java/se/scalablesolutions/akka/annotation/immutable.java index 9dd2d17322..84dbbf4636 100644 --- a/akka-util-java/src/main/java/se/scalablesolutions/akka/annotation/immutable.java +++ b/akka-util-java/src/main/java/se/scalablesolutions/akka/annotation/immutable.java @@ -2,7 +2,7 @@ * Copyright (C) 2009-2010 Scalable Solutions AB */ -package se.scalablesolutions.akka.annotation; +package se.scalablesolutions.akka.actor.annotation; import java.lang.annotation.*; diff --git a/akka-util-java/src/main/java/se/scalablesolutions/akka/annotation/inittransactionalstate.java b/akka-util-java/src/main/java/se/scalablesolutions/akka/annotation/inittransactionalstate.java index 50e42546ad..35c5f05afe 100644 --- a/akka-util-java/src/main/java/se/scalablesolutions/akka/annotation/inittransactionalstate.java +++ b/akka-util-java/src/main/java/se/scalablesolutions/akka/annotation/inittransactionalstate.java @@ -2,7 +2,7 @@ * Copyright (C) 2009-2010 Scalable Solutions AB */ -package se.scalablesolutions.akka.annotation; +package se.scalablesolutions.akka.actor.annotation; import java.lang.annotation.*; diff --git a/akka-util-java/src/main/java/se/scalablesolutions/akka/annotation/oneway.java b/akka-util-java/src/main/java/se/scalablesolutions/akka/annotation/oneway.java index fa7084bb07..45440b5613 100644 --- a/akka-util-java/src/main/java/se/scalablesolutions/akka/annotation/oneway.java +++ b/akka-util-java/src/main/java/se/scalablesolutions/akka/annotation/oneway.java @@ -2,7 +2,7 @@ * Copyright (C) 2009-2010 Scalable Solutions AB */ -package se.scalablesolutions.akka.annotation; +package se.scalablesolutions.akka.actor.annotation; import java.lang.annotation.*; diff --git a/akka-util-java/src/main/java/se/scalablesolutions/akka/annotation/postrestart.java b/akka-util-java/src/main/java/se/scalablesolutions/akka/annotation/postrestart.java index d003a38df8..5eed474832 100644 --- a/akka-util-java/src/main/java/se/scalablesolutions/akka/annotation/postrestart.java +++ b/akka-util-java/src/main/java/se/scalablesolutions/akka/annotation/postrestart.java @@ -2,7 +2,7 @@ * Copyright (C) 2009-2010 Scalable Solutions AB */ -package se.scalablesolutions.akka.annotation; +package se.scalablesolutions.akka.actor.annotation; import java.lang.annotation.*; diff --git a/akka-util-java/src/main/java/se/scalablesolutions/akka/annotation/prerestart.java b/akka-util-java/src/main/java/se/scalablesolutions/akka/annotation/prerestart.java index e65f38cad6..94f9a01405 100644 --- a/akka-util-java/src/main/java/se/scalablesolutions/akka/annotation/prerestart.java +++ b/akka-util-java/src/main/java/se/scalablesolutions/akka/annotation/prerestart.java @@ -2,7 +2,7 @@ * Copyright (C) 2009-2010 Scalable Solutions AB */ -package se.scalablesolutions.akka.annotation; +package se.scalablesolutions.akka.actor.annotation; import java.lang.annotation.*; diff --git a/akka-util-java/src/main/java/se/scalablesolutions/akka/annotation/state.java b/akka-util-java/src/main/java/se/scalablesolutions/akka/annotation/state.java index 1e627dde8e..509d129c1b 100644 --- a/akka-util-java/src/main/java/se/scalablesolutions/akka/annotation/state.java +++ b/akka-util-java/src/main/java/se/scalablesolutions/akka/annotation/state.java @@ -2,7 +2,7 @@ * Copyright (C) 2009-2010 Scalable Solutions AB */ -package se.scalablesolutions.akka.annotation; +package se.scalablesolutions.akka.actor.annotation; import java.lang.annotation.*; diff --git a/akka-util-java/src/main/java/se/scalablesolutions/akka/annotation/transactionrequired.java b/akka-util-java/src/main/java/se/scalablesolutions/akka/annotation/transactionrequired.java index c45482c467..c41a09ee46 100644 --- a/akka-util-java/src/main/java/se/scalablesolutions/akka/annotation/transactionrequired.java +++ b/akka-util-java/src/main/java/se/scalablesolutions/akka/annotation/transactionrequired.java @@ -2,7 +2,7 @@ * Copyright (C) 2009-2010 Scalable Solutions AB */ -package se.scalablesolutions.akka.annotation; +package se.scalablesolutions.akka.actor.annotation; import java.lang.annotation.*; diff --git a/akka-util-java/src/main/java/se/scalablesolutions/akka/stm/AtomicTemplate.java b/akka-util-java/src/main/java/se/scalablesolutions/akka/stm/AtomicTemplate.java deleted file mode 100644 index a693ff1248..0000000000 --- a/akka-util-java/src/main/java/se/scalablesolutions/akka/stm/AtomicTemplate.java +++ /dev/null @@ -1,341 +0,0 @@ -package se.scalablesolutions.akka.stm; - -import static org.multiverse.api.GlobalStmInstance.getGlobalStmInstance; -import org.multiverse.api.Stm; -import static org.multiverse.api.ThreadLocalTransaction.getThreadLocalTransaction; -import static org.multiverse.api.ThreadLocalTransaction.setThreadLocalTransaction; -import org.multiverse.api.Transaction; -import org.multiverse.api.TransactionStatus; -import org.multiverse.api.exceptions.CommitFailureException; -import org.multiverse.api.exceptions.LoadException; -import org.multiverse.api.exceptions.RetryError; -import org.multiverse.api.exceptions.TooManyRetriesException; -import org.multiverse.templates.AbortedException; -import org.multiverse.utils.latches.CheapLatch; -import org.multiverse.utils.latches.Latch; - -import static java.lang.String.format; -import java.util.logging.Logger; - -/** - * A Template that handles the boilerplate code for transactions. A transaction will be placed if none is available - * around a section and if all goes right, commits at the end. - *

    - * example: - *

    - * new AtomicTemplate(){
    - *    Object execute(Transaction t){
    - *        queue.push(1);
    - *        return null;
    - *    }
    - * }.execute();
    - * 
    - *

    - * It could also be that the transaction is retried (e.g. caused by optimistic locking failures). This is also a task - * for template. In the future this retry behavior will be customizable. - *

    - * If a transaction already is available on the TransactionThreadLocal, no new transaction is started and essentially - * the whole AtomicTemplate is ignored. - *

    - * If no transaction is available on the TransactionThreadLocal, a new one will be created and used during the execution - * of the AtomicTemplate and will be removed once the AtomicTemplate finishes. - *

    - * All uncaught throwable's lead to a rollback of the transaction. - *

    - * AtomicTemplates are not thread-safe to use. - *

    - * AtomicTemplates can completely work without threadlocals. See the {@link AtomicTemplate#AtomicTemplate(org.multiverse.api.Stm - * ,String, boolean, boolean, int)} for more information. - * - * @author Peter Veentjer - */ -public abstract class AtomicTemplate { - - private final static Logger logger = Logger.getLogger(AtomicTemplate.class.getName()); - - private final Stm stm; - private final boolean ignoreThreadLocalTransaction; - private final int retryCount; - private final boolean readonly; - private int attemptCount; - private final String familyName; - - /** - * Creates a new AtomicTemplate that uses the STM stored in the GlobalStm and works the the {@link - * org.multiverse.utils.ThreadLocalTransaction}. - */ - public AtomicTemplate() { - this(getGlobalStmInstance()); - } - - public AtomicTemplate(boolean readonly) { - this(getGlobalStmInstance(), null, false, readonly, Integer.MAX_VALUE); - } - - /** - * Creates a new AtomicTemplate using the provided stm. The transaction used is stores/retrieved from the {@link - * org.multiverse.utils.ThreadLocalTransaction}. - * - * @param stm the stm to use for transactions. - * @throws NullPointerException if stm is null. - */ - public AtomicTemplate(Stm stm) { - this(stm, null, false, false, Integer.MAX_VALUE); - } - - public AtomicTemplate(String familyName, boolean readonly, int retryCount) { - this(getGlobalStmInstance(), familyName, false, readonly, retryCount); - } - - /** - * Creates a new AtomicTemplate that uses the provided STM. This method is provided to make Multiverse easy to - * integrate with environment that don't want to depend on threadlocals. - * - * @param stm the stm to use for transactions. - * @param ignoreThreadLocalTransaction true if this Template should completely ignore the ThreadLocalTransaction. - * This is useful for using the AtomicTemplate in other environments that don't - * want to depend on threadlocals but do want to use the AtomicTemplate. - * @throws NullPointerException if stm is null. - */ - public AtomicTemplate(Stm stm, String familyName, boolean ignoreThreadLocalTransaction, boolean readonly, - int retryCount) { - if (stm == null) { - throw new NullPointerException(); - } - if (retryCount < 0) { - throw new IllegalArgumentException(); - } - this.stm = stm; - this.ignoreThreadLocalTransaction = ignoreThreadLocalTransaction; - this.readonly = readonly; - this.retryCount = retryCount; - this.familyName = familyName; - } - - public String getFamilyName() { - return familyName; - } - - /** - * Returns the current attempt. Value will always be larger than zero and increases everytime the transaction needs - * to be retried. - * - * @return the current attempt count. - */ - public final int getAttemptCount() { - return attemptCount; - } - - /** - * Returns the number of retries that this AtomicTemplate is allowed to do. The returned value will always be equal - * or larger than 0. - * - * @return the number of retries. - */ - public final int getRetryCount() { - return retryCount; - } - - /** - * Returns the {@link Stm} used by this AtomicTemplate to execute transactions on. - * - * @return the Stm used by this AtomicTemplate. - */ - public final Stm getStm() { - return stm; - } - - /** - * Check if this AtomicTemplate ignores the ThreadLocalTransaction. - * - * @return true if this AtomicTemplate ignores the ThreadLocalTransaction, false otherwise. - */ - public final boolean isIgnoreThreadLocalTransaction() { - return ignoreThreadLocalTransaction; - } - - /** - * Checks if this AtomicTemplate executes readonly transactions. - * - * @return true if it executes readonly transactions, false otherwise. - */ - public final boolean isReadonly() { - return readonly; - } - - /** - * This is the method can be overridden to do pre-start tasks. - */ - public void preStart() { - } - - /** - * This is the method can be overridden to do post-start tasks. - * - * @param t the transaction used for this execution. - */ - public void postStart(Transaction t) { - } - - /** - * This is the method can be overridden to do pre-commit tasks. - */ - public void preCommit() { - } - - /** - * This is the method can be overridden to do post-commit tasks. - */ - public void postCommit() { - } - - /** - * This is the method that needs to be implemented. - * - * @param t the transaction used for this execution. - * @return the result of the execution. - * - * @throws Exception the Exception thrown - */ - public abstract E execute(Transaction t) throws Exception; - - /** - * Executes the template. - * - * @return the result of the {@link #execute(org.multiverse.api.Transaction)} method. - * - * @throws InvisibleCheckedException if a checked exception was thrown while executing the {@link - * #execute(org.multiverse.api.Transaction)} method. - * @throws AbortedException if the exception was explicitly aborted. - * @throws TooManyRetriesException if the template retried the transaction too many times. The cause of the last - * failure (also an exception) is included as cause. So you have some idea where - * to look for problems - */ - public final E execute() { - try { - return executeChecked(); - } catch (Exception ex) { - if (ex instanceof RuntimeException) { - throw (RuntimeException) ex; - } else { - throw new AtomicTemplate.InvisibleCheckedException(ex); - } - } - } - - /** - * Executes the Template and rethrows the checked exception instead of wrapping it in a InvisibleCheckedException. - * - * @return the result - * - * @throws Exception the Exception thrown inside the {@link #execute(org.multiverse.api.Transaction)} - * method. - * @throws AbortedException if the exception was explicitly aborted. - * @throws TooManyRetriesException if the template retried the transaction too many times. The cause of the last - * failure (also an exception) is included as cause. So you have some idea where to - * look for problems - */ - public final E executeChecked() throws Exception { - preStart(); - Transaction t = getTransaction(); - if (noUsableTransaction(t)) { - t = startTransaction(); - setTransaction(t); - postStart(t); - try { - attemptCount = 1; - Exception lastRetryCause = null; - while (attemptCount - 1 <= retryCount) { - boolean abort = true; - boolean reset = false; - try { - E result = execute(t); - if (t.getStatus().equals(TransactionStatus.aborted)) { - String msg = format("Transaction with familyname %s is aborted", t.getFamilyName()); - throw new AbortedException(msg); - } - preCommit(); - t.commit(); - abort = false; - reset = false; - postCommit(); - return result; - } catch (RetryError e) { - Latch latch = new CheapLatch(); - t.abortAndRegisterRetryLatch(latch); - latch.awaitUninterruptible(); - //since the abort is already done, no need to do it again. - abort = false; - } catch (CommitFailureException ex) { - lastRetryCause = ex; - reset = true; - //ignore, just retry the transaction - } catch (LoadException ex) { - lastRetryCause = ex; - reset = true; - //ignore, just retry the transaction - } finally { - if (abort) { - t.abort(); - if (reset) { - t = t.abortAndReturnRestarted(); - setTransaction(t); - } - } - } - attemptCount++; - } - - throw new TooManyRetriesException("Too many retries", lastRetryCause); - } finally { - setTransaction(null); - } - } else { - return execute(t); - } - } - - private Transaction startTransaction() { - return readonly ? stm.startReadOnlyTransaction(familyName) : stm.startUpdateTransaction(familyName); - } - - private boolean noUsableTransaction(Transaction t) { - return t == null || t.getStatus() != TransactionStatus.active; - } - - /** - * Gets the current Transaction stored in the TransactionThreadLocal. - *

    - * If the ignoreThreadLocalTransaction is set, the threadlocal stuff is completeley ignored. - * - * @return the found transaction, or null if none is found. - */ - private Transaction getTransaction() { - return ignoreThreadLocalTransaction ? null : getThreadLocalTransaction(); - } - - /** - * Stores the transaction in the TransactionThreadLocal. - *

    - * This call is ignored if the ignoreThreadLocalTransaction is true. - * - * @param t the transaction to set (is allowed to be null). - */ - private void setTransaction(Transaction t) { - if (!ignoreThreadLocalTransaction) { - setThreadLocalTransaction(t); - } - } - - public static class InvisibleCheckedException extends RuntimeException { - - public InvisibleCheckedException(Exception cause) { - super(cause); - } - - @Override - public Exception getCause() { - return (Exception) super.getCause(); - } - } -} diff --git a/akka-util/pom.xml b/akka-util/pom.xml deleted file mode 100644 index 9b22090ee9..0000000000 --- a/akka-util/pom.xml +++ /dev/null @@ -1,39 +0,0 @@ - - 4.0.0 - - akka-util - Akka Util Module - - jar - - - akka - se.scalablesolutions.akka - 0.7-SNAPSHOT - - - - - org.scala-lang - scala-library - ${scala.version} - - - org.codehaus.aspectwerkz - aspectwerkz-nodeps-jdk5 - 2.1 - - - org.codehaus.aspectwerkz - aspectwerkz-jdk5 - 2.1 - - - net.lag - configgy - 1.4.7 - - - - diff --git a/akka-util/src/main/scala/Bootable.scala b/akka-util/src/main/scala/Bootable.scala index a46a131f00..172be3fd43 100644 --- a/akka-util/src/main/scala/Bootable.scala +++ b/akka-util/src/main/scala/Bootable.scala @@ -5,6 +5,6 @@ package se.scalablesolutions.akka.util trait Bootable { - def onLoad : Unit = () - def onUnload : Unit = () + def onLoad {} + def onUnload {} } \ No newline at end of file diff --git a/akka-util/src/main/scala/Config.scala b/akka-util/src/main/scala/Config.scala deleted file mode 100644 index f25b08ee46..0000000000 --- a/akka-util/src/main/scala/Config.scala +++ /dev/null @@ -1,74 +0,0 @@ -/** - * Copyright (C) 2009-2010 Scalable Solutions AB - */ - -package se.scalablesolutions.akka - -import util.Logging - -import net.lag.configgy.{Configgy, ParseException} - -/** - * @author Jonas Bonér - */ -object Config extends Logging { - val VERSION = "0.7-SNAPSHOT" - - // Set Multiverse options for max speed - System.setProperty("org.multiverse.MuliverseConstants.sanityChecks", "false") - System.setProperty("org.multiverse.api.GlobalStmInstance.factorymethod", "org.multiverse.stms.alpha.AlphaStm.createFast") - - val HOME = { - val systemHome = System.getenv("AKKA_HOME") - if (systemHome == null || systemHome.length == 0 || systemHome == ".") { - val optionHome = System.getProperty("akka.home", "") - if (optionHome.length != 0) Some(optionHome) - else None - } else Some(systemHome) - } - - val config = { - if (HOME.isDefined) { - try { - val configFile = HOME.get + "/config/akka.conf" - Configgy.configure(configFile) - log.info("AKKA_HOME is defined to [%s], config loaded from [%s].", HOME.get, configFile) - } catch { - case e: ParseException => throw new IllegalStateException( - "'akka.conf' config file can not be found in [" + HOME + "/config/akka.conf] aborting." + - "\n\tEither add it in the 'config' directory or add it to the classpath.") - } - } else if (System.getProperty("akka.config", "") != "") { - val configFile = System.getProperty("akka.config", "") - try { - Configgy.configure(configFile) - log.info("Config loaded from -Dakka.config=%s", configFile) - } catch { - case e: ParseException => throw new IllegalStateException( - "Config could not be loaded from -Dakka.config=" + configFile) - } - } else { - try { - Configgy.configureFromResource("akka.conf", getClass.getClassLoader) - log.info("Config loaded from the application classpath.") - } catch { - case e: ParseException => throw new IllegalStateException( - "\nCan't find 'akka.conf' configuration file." + - "\nOne of the three ways of locating the 'akka.conf' file needs to be defined:" + - "\n\t1. Define 'AKKA_HOME' environment variable to the root of the Akka distribution." + - "\n\t2. Define the '-Dakka.config=...' system property option." + - "\n\t3. Put the 'akka.conf' file on the classpath." + - "\nI have no way of finding the 'akka.conf' configuration file." + - "\nAborting.") - } - } - Configgy.config - } - - val CONFIG_VERSION = config.getString("akka.version", "0") - if (VERSION != CONFIG_VERSION) throw new IllegalStateException( - "Akka JAR version [" + VERSION + "] is different than the provided config ('akka.conf') version [" + CONFIG_VERSION + "]") - val startTime = System.currentTimeMillis - - def uptime = (System.currentTimeMillis - startTime) / 1000 -} diff --git a/akka-util/src/main/scala/Helpers.scala b/akka-util/src/main/scala/Helpers.scala index b7e5ff3b75..55abf6e7ac 100644 --- a/akka-util/src/main/scala/Helpers.scala +++ b/akka-util/src/main/scala/Helpers.scala @@ -40,7 +40,6 @@ object Helpers extends Logging { } // ================================================ - @serializable class ReadWriteLock { private val rwl = new ReentrantReadWriteLock private val readLock = rwl.readLock diff --git a/akka-util/src/main/scala/Logging.scala b/akka-util/src/main/scala/Logging.scala index a6b89b86b2..b988c73f22 100644 --- a/akka-util/src/main/scala/Logging.scala +++ b/akka-util/src/main/scala/Logging.scala @@ -6,10 +6,10 @@ package se.scalablesolutions.akka.util import net.lag.logging.Logger -import java.io.StringWriter; -import java.io.PrintWriter; -import java.net.InetAddress; -import java.net.UnknownHostException; +import java.io.StringWriter +import java.io.PrintWriter +import java.net.InetAddress +import java.net.UnknownHostException /** * Base trait for all classes that wants to be able use the logging infrastructure. @@ -30,6 +30,7 @@ trait Logging { * * @author Jonas Bonér */ + // FIXME make use of LoggableException class LoggableException extends Exception with Logging { private val uniqueId = getExceptionID private var originalException: Option[Exception] = None diff --git a/akka.iml b/akka.iml index 2f07a75716..74542e8e48 100644 --- a/akka.iml +++ b/akka.iml @@ -2,6 +2,23 @@ + + + + + + + + + diff --git a/changes.xml b/changes.xml deleted file mode 100644 index 90a9e31c88..0000000000 --- a/changes.xml +++ /dev/null @@ -1,86 +0,0 @@ - - - - - - - Akka Release Notes - Jonas Bonér - - - - Clustered Comet using Akka remote actors and clustered membership API - Cluster membership API and implementation based on JGroups - Security module for HTTP-based authentication and authorization - Support for using Scala XML tags in RESTful Actors (scala-jersey) - Support for Comet Actors using Atmosphere - MongoDB as Akka storage backend - Redis as Akka storage backend - Transparent JSON serialization of Scala objects based on SJSON - Kerberos/SPNEGO support for Security module - Implicit sender for remote actors: Remote actors are able to use reply to answer a request - Support for using the Lift Web framework with Actors - Rewritten STM, now integrated with Multiverse STM - Added STM API for atomic {..} and run {..} orElse {..} - Added STM retry - Complete rewrite of the persistence transaction management, now based on Unit of Work and Multiverse STM - Monadic API to TransactionalRef (use it in for-comprehension) - Lightweight actor syntax using one of the Actor.actor(..) methods. F.e: 'val a = actor { case _ => .. }' - Rewritten event-based dispatcher which improved perfomance by 10x, now substantially faster than event-driven Scala Actors - New Scala JSON parser based on sjson - Added zlib compression to remote actors - Added implicit sender reference for fire-forget ('!') message sends - Monadic API to TransactionalRef (use it in for-comprehension) - Smoother web app integration; just add akka.conf to the classpath (WEB-INF/classes), no need for AKKA_HOME or -Dakka.conf=.. - Modularization of distribution into a thin core (actors, remoting and STM) and the rest in submodules - Added 'forward' to Actor, forwards message but keeps original sender address - JSON serialization for Java objects (using Jackson) - JSON serialization for Scala objects (using SJSON) - Added implementation for remote actor reconnect upon failure - Protobuf serialization for Java and Scala objects - SBinary serialization for Scala objects - Protobuf as remote protocol - AMQP integration; abstracted as actors in a supervisor hierarchy. Impl AMQP 0.9.1 - Updated Cassandra integration and CassandraSession API to v0.4 - Added CassandraSession API (with socket pooling) wrapping Cassandra's Thrift API in Scala and Java APIs - CassandraStorage is now works with external Cassandra cluster - ActorRegistry for retrieving Actor instances by class name and by id - SchedulerActor for scheduling periodic tasks - Now start up kernel with 'java -jar dist/akka-0.6.jar' - Added mailing list: akka-user@googlegroups.com - Improved and restructured documentation - New URL: http://akkasource.org - New and much improved docs - Enhanced trapping of failures: 'trapExit = List(classOf[..], classOf[..])' - Upgraded to Netty 3.2, Protobuf 2.2, ScalaTest 1.0, Jersey 1.1.3, Atmosphere 0.4.1, Cassandra 0.4.1, Configgy 1.4 - Lowered actor memory footprint; now an actor consumes ~600 bytes, which mean that you can create 6.5 million on 4 G RAM - Removed concurrent mode - Remote actors are now defined by their UUID (not class name) - Fixed dispatcher bugs - Cleaned up Maven scripts and distribution in general - Fixed many many bugs and minor issues - Fixed inconsistencies and uglyness in Actors API - Removed embedded Cassandra mode - Removed the !? method in Actor (synchronous message send, since it's evil. Use !! with time-out instead. - Removed startup scripts and lib dir - Removed the 'Transient' life-cycle scope since to close to 'Temporary' in semantics. - Removed 'Transient' Actors and restart timeout - - - - \ No newline at end of file diff --git a/config/akka-reference.conf b/config/akka-reference.conf index 749b599e0b..7e93604521 100644 --- a/config/akka-reference.conf +++ b/config/akka-reference.conf @@ -19,8 +19,9 @@ # FQN to the class doing initial active object/actor # supervisor bootstrap, should be defined in default constructor - boot = ["sample.java.Boot", - "sample.scala.Boot", + boot = ["sample.camel.Boot", + "sample.java.Boot", + "sample.scala.Boot", "se.scalablesolutions.akka.security.samples.Boot"] @@ -30,8 +31,10 @@ service = on - max-nr-of-retries = 100 - distributed = off # not implemented yet + fair = on # should transactions be fair or non-fair (non fair yield better performance) + max-nr-of-retries = 1000 # max nr of retries of a failing transaction before giving up + timeout = 10000 # transaction timeout; if transaction has not committed within the timeout then it is aborted + distributed = off # not implemented yet @@ -47,9 +50,10 @@ zlib-compression-level = 6 # Options: 0-9 (1 being fastest and 9 being the most compressed), default is 6 - name = "default" # The name of the cluster - #actor = "se.scalablesolutions.akka.remote.JGroupsClusterActor" # FQN of an implementation of ClusterActor - serializer = "se.scalablesolutions.akka.serialization.Serializer$Java" # FQN of the serializer class + service = on + name = "default" # The name of the cluster + actor = "se.scalablesolutions.akka.cluster.jgroups.JGroupsClusterActor" # FQN of an implementation of ClusterActor + serializer = "se.scalablesolutions.akka.serialization.Serializer$Java$" # FQN of the serializer class diff --git a/config/akka.conf b/config/akka.conf index 94f630089a..84b9bfbbcf 100644 --- a/config/akka.conf +++ b/config/akka.conf @@ -1,4 +1,4 @@ -# This config import the Akka reference configuration. +# This config imports the Akka reference configuration. include "akka-reference.conf" # In this file you can override any option defined in the 'akka-reference.conf' file. diff --git a/deploy/.keep b/deploy/.keep deleted file mode 100644 index e69de29bb2..0000000000 diff --git a/embedded-repo/com/redis/redisclient/1.1/redisclient-1.1.jar b/embedded-repo/com/redis/redisclient/1.1/redisclient-1.1.jar deleted file mode 100644 index a269f15f7a..0000000000 Binary files a/embedded-repo/com/redis/redisclient/1.1/redisclient-1.1.jar and /dev/null differ diff --git a/embedded-repo/com/redis/redisclient/1.1/redisclient-1.1.pom b/embedded-repo/com/redis/redisclient/1.1/redisclient-1.1.pom deleted file mode 100755 index 16dd81402a..0000000000 --- a/embedded-repo/com/redis/redisclient/1.1/redisclient-1.1.pom +++ /dev/null @@ -1,8 +0,0 @@ - - - 4.0.0 - com.redis - redisclient - 1.1 - jar - diff --git a/embedded-repo/com/redis/redisclient/1.2-SNAPSHOT/redisclient-1.2-SNAPSHOT.jar b/embedded-repo/com/redis/redisclient/1.2-SNAPSHOT/redisclient-1.2-SNAPSHOT.jar new file mode 100644 index 0000000000..88815a75d9 Binary files /dev/null and b/embedded-repo/com/redis/redisclient/1.2-SNAPSHOT/redisclient-1.2-SNAPSHOT.jar differ diff --git a/project/build.properties b/project/build.properties new file mode 100644 index 0000000000..9f7e717580 --- /dev/null +++ b/project/build.properties @@ -0,0 +1,7 @@ +project.organization=se.scalablesolutions.akka +project.name=akka +project.version=0.7-SNAPSHOT +scala.version=2.7.7 +sbt.version=0.7.1 +def.scala.version=2.7.7 +build.scala.versions=2.7.7 diff --git a/project/build/AkkaProject.scala b/project/build/AkkaProject.scala new file mode 100644 index 0000000000..a891ce1668 --- /dev/null +++ b/project/build/AkkaProject.scala @@ -0,0 +1,387 @@ +/*------------------------------------------------------------------------------- + Copyright (C) 2009-2010 Scalable Solutions AB + + ---------------------------------------------------- + -------- sbt buildfile for the Akka project -------- + ---------------------------------------------------- + + Akka implements a unique hybrid of: + * Actors , which gives you: + * Simple and high-level abstractions for concurrency and parallelism. + * Asynchronous, non-blocking and highly performant event-driven programming + model. + * Very lightweight event-driven processes (create ~6.5 million actors on + 4 G RAM). + * Supervision hierarchies with let-it-crash semantics. For writing highly + fault-tolerant systems that never stop, systems that self-heal. + * Software Transactional Memory (STM). (Distributed transactions coming soon). + * Transactors: combine actors and STM into transactional actors. Allows you to + compose atomic message flows with automatic rollback and retry. + * Remoting: highly performant distributed actors with remote supervision and + error management. + * Cluster membership management. + + Akka also has a set of add-on modules: + * Persistence: A set of pluggable back-end storage modules that works in sync + with the STM. + * Cassandra distributed and highly scalable database. + * MongoDB document database. + * Redis data structures database (upcoming) + * REST (JAX-RS): Expose actors as REST services. + * Comet: Expose actors as Comet services. + * Security: Digest and Kerberos based security. + * Microkernel: Run Akka as a stand-alone kernel. + +-------------------------------------------------------------------------------*/ + +import sbt._ +import java.io.File +import java.util.jar.Attributes + +class AkkaParent(info: ProjectInfo) extends DefaultProject(info) { + + // ------------------------------------------------------------ + // project versions + val JERSEY_VERSION = "1.1.5" + val ATMO_VERSION = "0.5.4" + val CASSANDRA_VERSION = "0.5.0" + + // ------------------------------------------------------------ + lazy val akkaHome = { + val home = System.getenv("AKKA_HOME") + if (home == null) throw new Error("You need to set the $AKKA_HOME environment variable to the root of the Akka distribution") + home + } + lazy val deployPath = Path.fromFile(new java.io.File(akkaHome + "/deploy")) + lazy val distPath = Path.fromFile(new java.io.File(akkaHome + "/dist")) + + lazy val dist = zipTask(allArtifacts, "dist", distName) dependsOn (`package`) describedAs("Zips up the distribution.") + + def distName = "%s_%s-%s.zip".format(name, defScalaVersion.value, version) + + // ------------------------------------------------------------ + // repositories + val embeddedrepo = "embedded repo" at new File(akkaHome, "embedded-repo").toURI.toString + val sunjdmk = "sunjdmk" at "http://wp5.e-taxonomy.eu/cdmlib/mavenrepo" + val databinder = "DataBinder" at "http://databinder.net/repo" + val configgy = "Configgy" at "http://www.lag.net/repo" + val codehaus = "Codehaus" at "http://repository.codehaus.org" + val codehaus_snapshots = "Codehaus Snapshots" at "http://snapshots.repository.codehaus.org" + val jboss = "jBoss" at "http://repository.jboss.org/maven2" + val guiceyfruit = "GuiceyFruit" at "http://guiceyfruit.googlecode.com/svn/repo/releases/" + val google = "google" at "http://google-maven-repository.googlecode.com/svn/repository" + val m2 = "m2" at "http://download.java.net/maven/2" + + // ------------------------------------------------------------ + // project defintions + lazy val akka_java_util = project("akka-util-java", "akka-util-java", new AkkaJavaUtilProject(_)) + lazy val akka_util = project("akka-util", "akka-util", new AkkaUtilProject(_)) + lazy val akka_core = project("akka-core", "akka-core", new AkkaCoreProject(_), akka_util, akka_java_util) + lazy val akka_amqp = project("akka-amqp", "akka-amqp", new AkkaAMQPProject(_), akka_core) + lazy val akka_rest = project("akka-rest", "akka-rest", new AkkaRestProject(_), akka_core) + lazy val akka_comet = project("akka-comet", "akka-comet", new AkkaCometProject(_), akka_rest) + lazy val akka_camel = project("akka-camel", "akka-camel", new AkkaCamelProject(_), akka_core) + lazy val akka_patterns = project("akka-patterns", "akka-patterns", new AkkaPatternsProject(_), akka_core) + lazy val akka_security = project("akka-security", "akka-security", new AkkaSecurityProject(_), akka_core) + lazy val akka_persistence = project("akka-persistence", "akka-persistence", new AkkaPersistenceParentProject(_)) + lazy val akka_cluster = project("akka-cluster", "akka-cluster", new AkkaClusterParentProject(_)) + lazy val akka_kernel = project("akka-kernel", "akka-kernel", new AkkaKernelProject(_), + akka_core, akka_rest, akka_persistence, akka_cluster, akka_amqp, akka_security, akka_comet, akka_camel, akka_patterns) + + // functional tests in java + lazy val akka_fun_test = project("akka-fun-test-java", "akka-fun-test-java", new AkkaFunTestProject(_), akka_kernel) + + // examples + lazy val akka_samples = project("akka-samples", "akka-samples", new AkkaSamplesParentProject(_)) + + // ------------------------------------------------------------ + // create executable jar + override def mainClass = Some("se.scalablesolutions.akka.kernel.Main") + + override def packageOptions = + manifestClassPath.map(cp => ManifestAttributes((Attributes.Name.CLASS_PATH, cp))).toList ::: + getMainClass(false).map(MainClass(_)).toList + + // create a manifest with all akka jars and dependency jars on classpath + override def manifestClassPath = Some(allArtifacts.getFiles + .filter(_.getName.endsWith(".jar")) + .map("lib_managed/scala_%s/compile/".format(defScalaVersion.value) + _.getName) + .mkString(" ") + + " dist/akka-util_%s-%s.jar".format(defScalaVersion.value, version) + + " dist/akka-util-java_%s-%s.jar".format(defScalaVersion.value, version) + + " dist/akka-core_%s-%s.jar".format(defScalaVersion.value, version) + + " dist/akka-cluster-shoal_%s-%s.jar".format(defScalaVersion.value, version) + + " dist/akka-cluster-jgroups_%s-%s.jar".format(defScalaVersion.value, version) + + " dist/akka-rest_%s-%s.jar".format(defScalaVersion.value, version) + + " dist/akka-comet_%s-%s.jar".format(defScalaVersion.value, version) + + " dist/akka-camel_%s-%s.jar".format(defScalaVersion.value, version) + + " dist/akka-security_%s-%s.jar".format(defScalaVersion.value, version) + + " dist/akka-amqp_%s-%s.jar".format(defScalaVersion.value, version) + + " dist/akka-patterns_%s-%s.jar".format(defScalaVersion.value, version) + + " dist/akka-persistence-common_%s-%s.jar".format(defScalaVersion.value, version) + + " dist/akka-persistence-redis_%s-%s.jar".format(defScalaVersion.value, version) + + " dist/akka-persistence-mongo_%s-%s.jar".format(defScalaVersion.value, version) + + " dist/akka-persistence-cassandra_%s-%s.jar".format(defScalaVersion.value, version) + + " dist/akka-kernel_%s-%s.jar".format(defScalaVersion.value, version) + ) + + // ------------------------------------------------------------ + // publishing + override def managedStyle = ManagedStyle.Maven + val publishTo = Resolver.file("maven-local", Path.userHome / ".m2" / "repository" asFile) + + // Credentials(Path.userHome / ".akka_publish_credentials", log) + val sourceArtifact = Artifact(artifactID, "src", "jar", Some("sources"), Nil, None) + //val docsArtifact = Artifact(artifactID, "docs", "jar", Some("javadoc"), Nil, None) + + override def packageDocsJar = defaultJarPath("-javadoc.jar") + override def packageSrcJar= defaultJarPath("-sources.jar") + override def packageToPublishActions = super.packageToPublishActions ++ Seq(packageDocs, packageSrc) + + override def pomExtra = + 2009 + http://akkasource.org + + Scalable Solutions AB + http://scalablesolutions.se + + + + Apache 2 + http://www.apache.org/licenses/LICENSE-2.0.txt + repo + + + + // ------------------------------------------------------------ + // subprojects + class AkkaCoreProject(info: ProjectInfo) extends DefaultProject(info) { + val netty = "org.jboss.netty" % "netty" % "3.2.0.BETA1" % "compile" + val commons_io = "commons-io" % "commons-io" % "1.4" % "compile" + val dispatch_json = "net.databinder" % "dispatch-json_2.7.7" % "0.6.4" % "compile" + val dispatch_htdisttp = "net.databinder" % "dispatch-http_2.7.7" % "0.6.4" % "compile" + val sjson = "sjson.json" % "sjson" % "0.4" % "compile" + val sbinary = "sbinary" % "sbinary" % "0.3" % "compile" + val jackson = "org.codehaus.jackson" % "jackson-mapper-asl" % "1.2.1" % "compile" + val jackson_core = "org.codehaus.jackson" % "jackson-core-asl" % "1.2.1" % "compile" + val voldemort = "voldemort.store.compress" % "h2-lzf" % "1.0" % "compile" + val javautils = "org.scala-tools" % "javautils" % "2.7.4-0.1" % "compile" + // testing + val scalatest = "org.scalatest" % "scalatest" % "1.0" % "test" + val junit = "junit" % "junit" % "4.5" % "test" + lazy val dist = deployTask(info, distPath) dependsOn(`package`) describedAs("Deploying") + } + + class AkkaUtilProject(info: ProjectInfo) extends DefaultProject(info) { + val werkz = "org.codehaus.aspectwerkz" % "aspectwerkz-nodeps-jdk5" % "2.1" % "compile" + val werkz_core = "org.codehaus.aspectwerkz" % "aspectwerkz-jdk5" % "2.1" % "compile" + val configgy = "net.lag" % "configgy" % "1.4.7" % "compile" + lazy val dist = deployTask(info, distPath) dependsOn(`package`) describedAs("Deploying") + } + + class AkkaJavaUtilProject(info: ProjectInfo) extends DefaultProject(info) { + val guicey = "org.guiceyfruit" % "guice-core" % "2.0-beta-4" % "compile" + val protobuf = "com.google.protobuf" % "protobuf-java" % "2.2.0" % "compile" + val multiverse = "org.multiverse" % "multiverse-alpha" % "0.4-SNAPSHOT" % "compile" + lazy val dist = deployTask(info, distPath) dependsOn(`package`) describedAs("Deploying") + } + + class AkkaAMQPProject(info: ProjectInfo) extends DefaultProject(info) { + val rabbit = "com.rabbitmq" % "amqp-client" % "1.7.2" + lazy val dist = deployTask(info, distPath) dependsOn(`package`) describedAs("Deploying") + } + + class AkkaRestProject(info: ProjectInfo) extends DefaultProject(info) { + val servlet = "javax.servlet" % "servlet-api" % "2.5" % "compile" + val jersey = "com.sun.jersey" % "jersey-core" % JERSEY_VERSION % "compile" + val jersey_server = "com.sun.jersey" % "jersey-server" % JERSEY_VERSION % "compile" + val jersey_json = "com.sun.jersey" % "jersey-json" % JERSEY_VERSION % "compile" + val jersey_contrib = "com.sun.jersey.contribs" % "jersey-scala" % JERSEY_VERSION % "compile" + val jsr311 = "javax.ws.rs" % "jsr311-api" % "1.1" % "compile" + lazy val dist = deployTask(info, distPath) dependsOn(`package`) describedAs("Deploying") + } + + class AkkaCometProject(info: ProjectInfo) extends DefaultProject(info) { + val grizzly = "com.sun.grizzly" % "grizzly-comet-webserver" % "1.9.18-i" % "compile" + val servlet = "javax.servlet" % "servlet-api" % "2.5" % "compile" + val atmo = "org.atmosphere" % "atmosphere-annotations" % ATMO_VERSION % "compile" + val atmo_jersey = "org.atmosphere" % "atmosphere-jersey" % ATMO_VERSION % "compile" + val atmo_runtime = "org.atmosphere" % "atmosphere-runtime" % ATMO_VERSION % "compile" + lazy val dist = deployTask(info, distPath) dependsOn(`package`) describedAs("Deploying") + } + + class AkkaCamelProject(info: ProjectInfo) extends DefaultProject(info) { + val camel_core = "org.apache.camel" % "camel-core" % "2.2.0" % "compile" + lazy val dist = deployTask(info, distPath) dependsOn(`package`) describedAs("Deploying") + } + + class AkkaPatternsProject(info: ProjectInfo) extends DefaultProject(info) { + // testing + val scalatest = "org.scalatest" % "scalatest" % "1.0" % "test" + val junit = "junit" % "junit" % "4.5" % "test" + lazy val dist = deployTask(info, distPath) dependsOn(`package`) describedAs("Deploying") + } + + class AkkaSecurityProject(info: ProjectInfo) extends DefaultProject(info) { + val annotation = "javax.annotation" % "jsr250-api" % "1.0" + val jersey_server = "com.sun.jersey" % "jersey-server" % JERSEY_VERSION % "compile" + val jsr311 = "javax.ws.rs" % "jsr311-api" % "1.1" % "compile" + val lift_util = "net.liftweb" % "lift-util" % "1.1-M6" % "compile" + // testing + val scalatest = "org.scalatest" % "scalatest" % "1.0" % "test" + val junit = "junit" % "junit" % "4.5" % "test" + val mockito = "org.mockito" % "mockito-all" % "1.8.1" % "test" + lazy val dist = deployTask(info, distPath) dependsOn(`package`) describedAs("Deploying") + } + + class AkkaPersistenceCommonProject(info: ProjectInfo) extends DefaultProject(info) { + val thrift = "com.facebook" % "thrift" % "1.0" % "compile" + val commons_pool = "commons-pool" % "commons-pool" % "1.5.1" % "compile" + lazy val dist = deployTask(info, distPath) dependsOn(`package`) describedAs("Deploying") + } + + class AkkaRedisProject(info: ProjectInfo) extends DefaultProject(info) { + val redis = "com.redis" % "redisclient" % "1.2-SNAPSHOT" % "compile" + override def testOptions = TestFilter((name: String) => name.endsWith("Test")) :: Nil + lazy val dist = deployTask(info, distPath) dependsOn(`package`) describedAs("Deploying") + } + + class AkkaMongoProject(info: ProjectInfo) extends DefaultProject(info) { + val mongo = "org.mongodb" % "mongo-java-driver" % "1.1" % "compile" + override def testOptions = TestFilter((name: String) => name.endsWith("Test")) :: Nil + lazy val dist = deployTask(info, distPath) dependsOn(`package`) describedAs("Deploying") + } + + class AkkaCassandraProject(info: ProjectInfo) extends DefaultProject(info) { + val cassandra = "org.apache.cassandra" % "cassandra" % CASSANDRA_VERSION % "compile" + val high_scale = "org.apache.cassandra" % "high-scale-lib" % CASSANDRA_VERSION % "test" + val cassandra_clhm = "org.apache.cassandra" % "clhm-production" % CASSANDRA_VERSION % "test" + val commons_coll = "commons-collections" % "commons-collections" % "3.2.1" % "test" + val google_coll = "com.google.collections" % "google-collections" % "1.0" % "test" + val slf4j = "org.slf4j" % "slf4j-api" % "1.5.8" % "test" + val slf4j_log4j = "org.slf4j" % "slf4j-log4j12" % "1.5.8" % "test" + val log4j = "log4j" % "log4j" % "1.2.15" % "test" + override def testOptions = TestFilter((name: String) => name.endsWith("Test")) :: Nil + lazy val dist = deployTask(info, distPath) dependsOn(`package`) describedAs("Deploying") + } + + class AkkaPersistenceParentProject(info: ProjectInfo) extends ParentProject(info) { + lazy val akka_persistence_common = project("akka-persistence-common", "akka-persistence-common", new AkkaPersistenceCommonProject(_), akka_core) + lazy val akka_persistence_redis = project("akka-persistence-redis", "akka-persistence-redis", new AkkaRedisProject(_), akka_persistence_common) + lazy val akka_persistence_mongo = project("akka-persistence-mongo", "akka-persistence-mongo", new AkkaMongoProject(_), akka_persistence_common) + lazy val akka_persistence_cassandra = project("akka-persistence-cassandra", "akka-persistence-cassandra", new AkkaCassandraProject(_), akka_persistence_common) + } + + class AkkaJgroupsProject(info: ProjectInfo) extends DefaultProject(info) { + val jgroups = "jgroups" % "jgroups" % "2.8.0.CR7" % "compile" + lazy val dist = deployTask(info, distPath) dependsOn(`package`) describedAs("Deploying") + } + + class AkkaShoalProject(info: ProjectInfo) extends DefaultProject(info) { + val shoal = "shoal-jxta" % "shoal" % "1.1-20090818" % "compile" + val shoal_extra = "shoal-jxta" % "jxta" % "1.1-20090818" % "compile" + lazy val dist = deployTask(info, distPath) dependsOn(`package`) describedAs("Deploying") + } + + class AkkaClusterParentProject(info: ProjectInfo) extends ParentProject(info) { + lazy val akka_cluster_jgroups = project("akka-cluster-jgroups", "akka-cluster-jgroups", new AkkaJgroupsProject(_), akka_core) + lazy val akka_cluster_shoal = project("akka-cluster-shoal", "akka-cluster-shoal", new AkkaShoalProject(_), akka_core) + } + + class AkkaKernelProject(info: ProjectInfo) extends DefaultProject(info) { + lazy val dist = deployTask(info, distPath) dependsOn(`package`) describedAs("Deploying") + } + + // examples + class AkkaFunTestProject(info: ProjectInfo) extends DefaultProject(info) { + val protobuf = "com.google.protobuf" % "protobuf-java" % "2.2.0" + val grizzly = "com.sun.grizzly" % "grizzly-comet-webserver" % "1.9.18-i" % "compile" + val jersey_server = "com.sun.jersey" % "jersey-server" % JERSEY_VERSION % "compile" + val jersey_json = "com.sun.jersey" % "jersey-json" % JERSEY_VERSION % "compile" + val jersey_atom = "com.sun.jersey" % "jersey-atom" % JERSEY_VERSION % "compile" + // testing + val junit = "junit" % "junit" % "4.5" % "test" + val jmock = "org.jmock" % "jmock" % "2.4.0" % "test" + } + + class AkkaSampleChatProject(info: ProjectInfo) extends DefaultProject(info) { + lazy val dist = deployTask(info, deployPath) dependsOn(`package`) describedAs("Deploying") + } + + class AkkaSampleLiftProject(info: ProjectInfo) extends DefaultProject(info) { + val lift = "net.liftweb" % "lift-webkit" % "1.1-M6" % "compile" + val lift_util = "net.liftweb" % "lift-util" % "1.1-M6" % "compile" + val servlet = "javax.servlet" % "servlet-api" % "2.5" % "compile" + // testing + val jetty = "org.mortbay.jetty" % "jetty" % "6.1.22" % "test" + val junit = "junit" % "junit" % "4.5" % "test" + lazy val dist = deployTask(info, deployPath) dependsOn(`package`) describedAs("Deploying") + } + + class AkkaSampleRestJavaProject(info: ProjectInfo) extends DefaultProject(info) { + lazy val dist = deployTask(info, deployPath) dependsOn(`package`) describedAs("Deploying") + } + + class AkkaSampleRestScalaProject(info: ProjectInfo) extends DefaultProject(info) { + val jsr311 = "javax.ws.rs" % "jsr311-api" % "1.1" % "compile" + lazy val dist = deployTask(info, deployPath) dependsOn(`package`) describedAs("Deploying") + } + + class AkkaSampleCamelProject(info: ProjectInfo) extends DefaultProject(info) { + val camel_jetty = "org.apache.camel" % "camel-jetty" % "2.2.0" % "compile" + val camel_jms = "org.apache.camel" % "camel-jms" % "2.2.0" % "compile" + val activemq_core = "org.apache.activemq" % "activemq-core" % "5.3.0" % "compile" + lazy val dist = deployTask(info, deployPath) dependsOn(`package`) describedAs("Deploying") + } + + class AkkaSampleSecurityProject(info: ProjectInfo) extends DefaultProject(info) { + val jsr311 = "javax.ws.rs" % "jsr311-api" % "1.1" % "compile" + val jsr250 = "javax.annotation" % "jsr250-api" % "1.0" + lazy val dist = deployTask(info, deployPath) dependsOn(`package`) describedAs("Deploying") + } + + class AkkaSamplesParentProject(info: ProjectInfo) extends ParentProject(info) { + lazy val akka_sample_chat = project("akka-sample-chat", "akka-sample-chat", new AkkaSampleChatProject(_), akka_kernel) + lazy val akka_sample_lift = project("akka-sample-lift", "akka-sample-lift", new AkkaSampleLiftProject(_), akka_kernel) + lazy val akka_sample_rest_java = project("akka-sample-rest-java", "akka-sample-rest-java", new AkkaSampleRestJavaProject(_), akka_kernel) + lazy val akka_sample_rest_scala = project("akka-sample-rest-scala", "akka-sample-rest-scala", new AkkaSampleRestScalaProject(_), akka_kernel) + lazy val akka_sample_camel = project("akka-sample-camel", "akka-sample-camel", new AkkaSampleCamelProject(_), akka_kernel) + lazy val akka_sample_security = project("akka-sample-security", "akka-sample-security", new AkkaSampleSecurityProject(_), akka_kernel) + } + + // ------------------------------------------------------------ + // helper functions + def removeDupEntries(paths: PathFinder) = + Path.lazyPathFinder { + val mapped = paths.get map { p => (p.relativePath, p) } + (Map() ++ mapped).values.toList + } + + def allArtifacts = { + (removeDupEntries(runClasspath filter ClasspathUtilities.isArchive) +++ + ((outputPath ##) / defaultJarName) +++ + mainResources +++ + mainDependencies.scalaJars +++ + descendents(info.projectPath, "*.conf") +++ + descendents(info.projectPath / "dist", "*.jar") +++ + descendents(info.projectPath / "deploy", "*.jar") +++ + descendents(path("lib") ##, "*.jar") +++ + descendents(configurationPath(Configurations.Compile) ##, "*.jar")) + .filter(jar => + !jar.toString.endsWith("scala-library-2.7.5.jar") && // remove redundant scala libs + !jar.toString.endsWith("scala-library-2.7.6.jar")) + } + + def deployTask(info: ProjectInfo, toDir: Path) = task { + val projectPath = info.projectPath.toString + val moduleName = projectPath.substring(projectPath.lastIndexOf(System.getProperty("file.separator")) + 1, projectPath.length) + // FIXME need to find out a way to grab these paths from the sbt system + val JAR_FILE_NAME = moduleName + "_%s-%s.jar".format(defScalaVersion.value, version) + val JAR_FILE_PATH = projectPath + "/target/scala_%s/".format(defScalaVersion.value) + JAR_FILE_NAME + + val from = Path.fromFile(new java.io.File(JAR_FILE_PATH)) + val to = Path.fromFile(new java.io.File(toDir + "/" + JAR_FILE_NAME)) + log.info("Deploying " + to) + FileUtilities.copyFile(from, to, log) + } +} diff --git a/scripts/run_akka.sh b/scripts/run_akka.sh new file mode 100755 index 0000000000..c07397adeb --- /dev/null +++ b/scripts/run_akka.sh @@ -0,0 +1,16 @@ +#!/bin/bash +cd $AKKA_HOME +VERSION=akka_2.7.7-0.7-SNAPSHOT +TARGET_DIR=dist/$1 +shift 1 +VMARGS=$@ + +if [ -d $TARGET_DIR ]; then + cd $TARGET_DIR +else + unzip dist/${VERSION}.zip -d $TARGET_DIR + cd $TARGET_DIR +fi + +export AKKA_HOME=`pwd` +java -jar ${VMARGS} ${VERSION}.jar \ No newline at end of file