diff --git a/.gitignore b/.gitignore
index 22379bef4c..69dd6d55c9 100755
--- a/.gitignore
+++ b/.gitignore
@@ -1,5 +1,9 @@
*~
*#
+project/boot/*
+*/project/build/target
+*/project/boot
+lib_managed
etags
TAGS
reports
diff --git a/akka-amqp/pom.xml b/akka-amqp/pom.xml
deleted file mode 100644
index aa569958a6..0000000000
--- a/akka-amqp/pom.xml
+++ /dev/null
@@ -1,29 +0,0 @@
-
- 4.0.0
-
- akka-amqp
- Akka AMQP Module
-
- jar
-
-
- akka
- se.scalablesolutions.akka
- 0.7-SNAPSHOT
-
-
-
-
- akka-core
- ${project.groupId}
- ${project.version}
-
-
- com.rabbitmq
- amqp-client
- 1.7.0
-
-
-
-
diff --git a/akka-camel/src/main/resources/META-INF/services/org/apache/camel/component/actor b/akka-camel/src/main/resources/META-INF/services/org/apache/camel/component/actor
new file mode 100644
index 0000000000..a2141db8a9
--- /dev/null
+++ b/akka-camel/src/main/resources/META-INF/services/org/apache/camel/component/actor
@@ -0,0 +1 @@
+class=se.scalablesolutions.akka.camel.component.ActorComponent
\ No newline at end of file
diff --git a/akka-camel/src/main/scala/CamelContextLifecycle.scala b/akka-camel/src/main/scala/CamelContextLifecycle.scala
new file mode 100644
index 0000000000..b9a696207c
--- /dev/null
+++ b/akka-camel/src/main/scala/CamelContextLifecycle.scala
@@ -0,0 +1,95 @@
+/**
+ * Copyright (C) 2009-2010 Scalable Solutions AB
+ */
+
+package se.scalablesolutions.akka.camel
+
+import org.apache.camel.{ProducerTemplate, CamelContext}
+import org.apache.camel.impl.DefaultCamelContext
+
+import se.scalablesolutions.akka.util.Logging
+
+/**
+ * Defines the lifecycle of a CamelContext. Allowed state transitions are
+ * init -> start -> stop -> init -> ... etc.
+ *
+ * @author Martin Krasser
+ */
+trait CamelContextLifecycle extends Logging {
+ // TODO: enforce correct state transitions
+ // valid: init -> start -> stop -> init ...
+
+ private var _context: CamelContext = _
+ private var _template: ProducerTemplate = _
+
+ private var _initialized = false
+ private var _started = false
+
+ /**
+ * Returns the managed CamelContext.
+ */
+ protected def context: CamelContext = _context
+
+ /**
+ * Returns the managed ProducerTemplate.
+ */
+ protected def template: ProducerTemplate = _template
+
+ /**
+ * Sets the managed CamelContext.
+ */
+ protected def context_= (context: CamelContext) { _context = context }
+
+ /**
+ * Sets the managed ProducerTemplate.
+ */
+ protected def template_= (template: ProducerTemplate) { _template = template }
+
+ def initialized = _initialized
+ def started = _started
+
+ /**
+ * Starts the CamelContext and ProducerTemplate.
+ */
+ def start = {
+ context.start
+ template.start
+ _started = true
+ log.info("Camel context started")
+ }
+
+ /**
+ * Stops the CamelContext and ProducerTemplate.
+ */
+ def stop = {
+ template.stop
+ context.stop
+ _initialized = false
+ _started = false
+ log.info("Camel context stopped")
+ }
+
+ /**
+ * Initializes this lifecycle object with the a DefaultCamelContext.
+ */
+ def init: Unit = init(new DefaultCamelContext)
+
+ /**
+ * Initializes this lifecycle object with the given CamelContext.
+ */
+ def init(context: CamelContext) {
+ this.context = context
+ this.template = context.createProducerTemplate
+ _initialized = true
+ log.info("Camel context initialized")
+ }
+}
+
+/**
+ * Makes a global CamelContext and ProducerTemplate accessible to applications. The lifecycle
+ * of these objects is managed by se.scalablesolutions.akka.camel.service.CamelService.
+ */
+object CamelContextManager extends CamelContextLifecycle {
+ override def context: CamelContext = super.context
+ override def template: ProducerTemplate = super.template
+}
\ No newline at end of file
diff --git a/akka-camel/src/main/scala/Consumer.scala b/akka-camel/src/main/scala/Consumer.scala
new file mode 100644
index 0000000000..27ec98b25d
--- /dev/null
+++ b/akka-camel/src/main/scala/Consumer.scala
@@ -0,0 +1,20 @@
+/**
+ * Copyright (C) 2009-2010 Scalable Solutions AB
+ */
+
+package se.scalablesolutions.akka.camel
+
+import se.scalablesolutions.akka.actor.Actor
+
+/**
+ * Mixed in by Actor implementations that consume message from Camel endpoints.
+ *
+ * @author Martin Krasser
+ */
+trait Consumer { self: Actor =>
+
+ /**
+ * Returns the Camel endpoint URI to consume messages from.
+ */
+ def endpointUri: String
+}
\ No newline at end of file
diff --git a/akka-camel/src/main/scala/Message.scala b/akka-camel/src/main/scala/Message.scala
new file mode 100644
index 0000000000..8e0156c669
--- /dev/null
+++ b/akka-camel/src/main/scala/Message.scala
@@ -0,0 +1,249 @@
+/**
+ * Copyright (C) 2009-2010 Scalable Solutions AB
+ */
+
+package se.scalablesolutions.akka.camel
+
+import org.apache.camel.{Exchange, Message => CamelMessage}
+import org.apache.camel.util.ExchangeHelper
+
+import scala.collection.jcl.{Map => MapWrapper}
+
+/**
+ * An immutable representation of a Camel message. Actor classes that mix in
+ * se.scalablesolutions.akka.camel.Producer or
+ * se.scalablesolutions.akka.camel.Consumer use this message type for communication.
+ *
+ * @author Martin Krasser
+ */
+case class Message(val body: Any, val headers: Map[String, Any]) {
+ /**
+ * Creates a message with a body and an empty header map.
+ */
+ def this(body: Any) = this(body, Map.empty)
+
+ /**
+ * Returns the body of the message converted to the type given by the clazz
+ * argument. Conversion is done using Camel's type converter. The type converter is obtained
+ * from the CamelContext managed by CamelContextManager. Applications have to ensure proper
+ * initialization of CamelContextManager.
+ *
+ * @see CamelContextManager.
+ */
+ def bodyAs[T](clazz: Class[T]): T =
+ CamelContextManager.context.getTypeConverter.mandatoryConvertTo[T](clazz, body)
+
+ /**
+ * Returns those headers from this message whose name is contained in names.
+ */
+ def headers(names: Set[String]): Map[String, Any] = headers.filter(names contains _._1)
+
+ /**
+ * Creates a Message with a new body using a transformer function.
+ */
+ def transformBody[A](transformer: A => Any): Message = setBody(transformer(body.asInstanceOf[A]))
+
+ /**
+ * Creates a Message with a new body converted to type clazz.
+ *
+ * @see Message#bodyAs(Class)
+ */
+ def setBodyAs[T](clazz: Class[T]): Message = setBody(bodyAs(clazz))
+
+ /**
+ * Creates a Message with a new body.
+ */
+ def setBody(body: Any) = new Message(body, this.headers)
+
+ /**
+ * Creates a new Message with new headers.
+ */
+ def setHeaders(headers: Map[String, Any]) = new Message(this.body, headers)
+
+ /**
+ * Creates a new Message with the headers argument added to the existing headers.
+ */
+ def addHeaders(headers: Map[String, Any]) = new Message(this.body, this.headers ++ headers)
+
+ /**
+ * Creates a new Message with the header argument added to the existing headers.
+ */
+ def addHeader(header: (String, Any)) = new Message(this.body, this.headers + header)
+
+ /**
+ * Creates a new Message where the header with name headerName is removed from
+ * the existing headers.
+ */
+ def removeHeader(headerName: String) = new Message(this.body, this.headers - headerName)
+}
+
+/**
+ * Companion object of Message class.
+ *
+ * @author Martin Krasser
+ */
+object Message {
+
+ /**
+ * Message header to correlate request with response messages. Applications that send
+ * messages to a Producer actor may want to set this header on the request message
+ * so that it can be correlated with an asynchronous response. Messages send to Consumer
+ * actors have this header already set.
+ */
+ val MessageExchangeId = "MessageExchangeId".intern
+
+ /**
+ * Creates a new Message with body as message body and an empty header map.
+ */
+ def apply(body: Any) = new Message(body)
+
+ /**
+ * Creates a canonical form of the given message msg. If msg of type
+ * Message then msg is returned, otherwise msg is set as body of a
+ * newly created Message object.
+ */
+ def canonicalize(msg: Any) = msg match {
+ case mobj: Message => mobj
+ case body => new Message(body)
+ }
+}
+
+/**
+ * An immutable representation of a failed Camel exchange. It contains the failure cause
+ * obtained from Exchange.getException and the headers from either the Exchange.getIn
+ * message or Exchange.getOut message, depending on the exchange pattern.
+ *
+ * @author Martin Krasser
+ */
+case class Failure(val cause: Exception, val headers: Map[String, Any])
+
+/**
+ * Adapter for converting an org.apache.camel.Exchange to and from Message and Failure objects.
+ *
+ * @author Martin Krasser
+ */
+class CamelExchangeAdapter(exchange: Exchange) {
+
+ import CamelMessageConversion.toMessageAdapter
+
+ /**
+ * Sets Exchange.getIn from the given Message object.
+ */
+ def fromRequestMessage(msg: Message): Exchange = { requestMessage.fromMessage(msg); exchange }
+
+ /**
+ * Depending on the exchange pattern, sets Exchange.getIn or Exchange.getOut from the given
+ * Message object. If the exchange is out-capable then the Exchange.getOut is set, otherwise
+ * Exchange.getIn.
+ */
+ def fromResponseMessage(msg: Message): Exchange = { responseMessage.fromMessage(msg); exchange }
+
+ /**
+ * Sets Exchange.getException from the given Failure message. Headers of the Failure message
+ * are ignored.
+ */
+ def fromFailureMessage(msg: Failure): Exchange = { exchange.setException(msg.cause); exchange }
+
+ /**
+ * Creates a Message object from Exchange.getIn.
+ */
+ def toRequestMessage: Message = toRequestMessage(Map.empty)
+
+ /**
+ * Depending on the exchange pattern, creates a Message object from Exchange.getIn or Exchange.getOut.
+ * If the exchange is out-capable then the Exchange.getOut is set, otherwise Exchange.getIn.
+ */
+ def toResponseMessage: Message = toResponseMessage(Map.empty)
+
+ /**
+ * Creates a Failure object from the adapted Exchange.
+ *
+ * @see Failure
+ */
+ def toFailureMessage: Failure = toFailureMessage(Map.empty)
+
+ /**
+ * Creates a Message object from Exchange.getIn.
+ *
+ * @param headers additional headers to set on the created Message in addition to those
+ * in the Camel message.
+ */
+ def toRequestMessage(headers: Map[String, Any]): Message = requestMessage.toMessage(headers)
+
+ /**
+ * Depending on the exchange pattern, creates a Message object from Exchange.getIn or Exchange.getOut.
+ * If the exchange is out-capable then the Exchange.getOut is set, otherwise Exchange.getIn.
+ *
+ * @param headers additional headers to set on the created Message in addition to those
+ * in the Camel message.
+ */
+ def toResponseMessage(headers: Map[String, Any]): Message = responseMessage.toMessage(headers)
+
+ /**
+ * Creates a Failure object from the adapted Exchange.
+ *
+ * @param headers additional headers to set on the created Message in addition to those
+ * in the Camel message.
+ *
+ * @see Failure
+ */
+ def toFailureMessage(headers: Map[String, Any]): Failure =
+ Failure(exchange.getException, headers ++ responseMessage.toMessage.headers)
+
+ private def requestMessage = exchange.getIn
+
+ private def responseMessage = ExchangeHelper.getResultMessage(exchange)
+
+}
+
+/**
+ * Adapter for converting an org.apache.camel.Message to and from Message objects.
+ *
+ * @author Martin Krasser
+ */
+class CamelMessageAdapter(val cm: CamelMessage) {
+ /**
+ * Set the adapted Camel message from the given Message object.
+ */
+ def fromMessage(m: Message): CamelMessage = {
+ cm.setBody(m.body)
+ for (h <- m.headers) cm.getHeaders.put(h._1, h._2.asInstanceOf[AnyRef])
+ cm
+ }
+
+ /**
+ * Creates a new Message object from the adapted Camel message.
+ */
+ def toMessage: Message = toMessage(Map.empty)
+
+ /**
+ * Creates a new Message object from the adapted Camel message.
+ *
+ * @param headers additional headers to set on the created Message in addition to those
+ * in the Camel message.
+ */
+ def toMessage(headers: Map[String, Any]): Message = Message(cm.getBody, cmHeaders(headers, cm))
+
+ private def cmHeaders(headers: Map[String, Any], cm: CamelMessage) =
+ headers ++ MapWrapper[String, AnyRef](cm.getHeaders).elements
+}
+
+/**
+ * Defines conversion methods to CamelExchangeAdapter and CamelMessageAdapter.
+ * Imported by applications
+ * that implicitly want to use conversion methods of CamelExchangeAdapter and CamelMessageAdapter.
+ */
+object CamelMessageConversion {
+
+ /**
+ * Creates an CamelExchangeAdapter for the given Camel exchange.
+ */
+ implicit def toExchangeAdapter(ce: Exchange): CamelExchangeAdapter =
+ new CamelExchangeAdapter(ce)
+
+ /**
+ * Creates an CamelMessageAdapter for the given Camel message.
+ */
+ implicit def toMessageAdapter(cm: CamelMessage): CamelMessageAdapter =
+ new CamelMessageAdapter(cm)
+}
\ No newline at end of file
diff --git a/akka-camel/src/main/scala/Producer.scala b/akka-camel/src/main/scala/Producer.scala
new file mode 100644
index 0000000000..43e9b8b10e
--- /dev/null
+++ b/akka-camel/src/main/scala/Producer.scala
@@ -0,0 +1,192 @@
+/**
+ * Copyright (C) 2009-2010 Scalable Solutions AB
+ */
+
+package se.scalablesolutions.akka.camel
+
+import CamelMessageConversion.toExchangeAdapter
+
+import org.apache.camel.{Processor, ExchangePattern, Exchange, ProducerTemplate}
+import org.apache.camel.impl.DefaultExchange
+import org.apache.camel.spi.Synchronization
+
+import se.scalablesolutions.akka.actor.Actor
+import se.scalablesolutions.akka.dispatch.CompletableFuture
+import se.scalablesolutions.akka.util.Logging
+
+/**
+ * Mixed in by Actor implementations that produce messages to Camel endpoints.
+ *
+ * @author Martin Krasser
+ */
+trait Producer { self: Actor =>
+
+ private val headersToCopyDefault = Set(Message.MessageExchangeId)
+
+ /**
+ * If set to true (default), communication with the Camel endpoint is done via the Camel
+ * Async API. Camel then processes the
+ * message in a separate thread. If set to false, the actor thread is blocked until Camel
+ * has finished processing the produced message.
+ */
+ def async: Boolean = true
+
+ /**
+ * If set to false (default), this producer expects a response message from the Camel endpoint.
+ * If set to true, this producer communicates with the Camel endpoint with an in-only message
+ * exchange pattern (fire and forget).
+ */
+ def oneway: Boolean = false
+
+ /**
+ * Returns the Camel endpoint URI to produce messages to.
+ */
+ def endpointUri: String
+
+ /**
+ * Returns the names of message headers to copy from a request message to a response message.
+ * By default only the Message.MessageExchangeId is copied. Applications may override this to
+ * define an application-specific set of message headers to copy.
+ */
+ def headersToCopy: Set[String] = headersToCopyDefault
+
+ /**
+ * Returns the producer template from the CamelContextManager. Applications either have to ensure
+ * proper initialization of CamelContextManager or override this method.
+ *
+ * @see CamelContextManager.
+ */
+ protected def template: ProducerTemplate = CamelContextManager.template
+
+ /**
+ * Initiates a one-way (in-only) message exchange to the Camel endpoint given by
+ * endpointUri. This method blocks until Camel finishes processing
+ * the message exchange.
+ *
+ * @param msg: the message to produce. The message is converted to its canonical
+ * representation via Message.canonicalize.
+ */
+ protected def produceOneway(msg: Any): Unit =
+ template.send(endpointUri, createInOnlyExchange.fromRequestMessage(Message.canonicalize(msg)))
+
+ /**
+ * Initiates a one-way (in-only) message exchange to the Camel endpoint given by
+ * endpointUri. This method triggers asynchronous processing of the
+ * message exchange by Camel.
+ *
+ * @param msg: the message to produce. The message is converted to its canonical
+ * representation via Message.canonicalize.
+ */
+ protected def produceOnewayAsync(msg: Any): Unit =
+ template.asyncSend(
+ endpointUri, createInOnlyExchange.fromRequestMessage(Message.canonicalize(msg)))
+
+ /**
+ * Initiates a two-way (in-out) message exchange to the Camel endpoint given by
+ * endpointUri. This method blocks until Camel finishes processing
+ * the message exchange.
+ *
+ * @param msg: the message to produce. The message is converted to its canonical
+ * representation via Message.canonicalize.
+ * @return either a response Message or a Failure object.
+ */
+ protected def produce(msg: Any): Any = {
+ val cmsg = Message.canonicalize(msg)
+ val requestProcessor = new Processor() {
+ def process(exchange: Exchange) = exchange.fromRequestMessage(cmsg)
+ }
+ val result = template.request(endpointUri, requestProcessor)
+ if (result.isFailed) result.toFailureMessage(cmsg.headers(headersToCopy))
+ else result.toResponseMessage(cmsg.headers(headersToCopy))
+ }
+
+ /**
+ * Initiates a two-way (in-out) message exchange to the Camel endpoint given by
+ * endpointUri. This method triggers asynchronous processing of the
+ * message exchange by Camel. The response message is returned asynchronously to
+ * the original sender (or sender future).
+ *
+ * @param msg: the message to produce. The message is converted to its canonical
+ * representation via Message.canonicalize.
+ * @return either a response Message or a Failure object.
+ * @see ProducerResponseSender
+ */
+ protected def produceAsync(msg: Any): Unit = {
+ val cmsg = Message.canonicalize(msg)
+ val sync = new ProducerResponseSender(
+ cmsg.headers(headersToCopy), this.sender, this.senderFuture, this)
+ template.asyncCallback(endpointUri, createInOutExchange.fromRequestMessage(cmsg), sync)
+ }
+
+ /**
+ * Default implementation for Actor.receive. Implementors may choose to
+ * def receive = produce. This partial function calls one of
+ * the protected produce methods depending on the return values of
+ * oneway and async.
+ */
+ protected def produce: PartialFunction[Any, Unit] = {
+ case msg => {
+ if ( oneway && !async) produceOneway(msg)
+ else if ( oneway && async) produceOnewayAsync(msg)
+ else if (!oneway && !async) reply(produce(msg))
+ else /*(!oneway && async)*/ produceAsync(msg)
+ }
+ }
+
+ /**
+ * Creates a new in-only Exchange.
+ */
+ protected def createInOnlyExchange: Exchange = createExchange(ExchangePattern.InOnly)
+
+ /**
+ * Creates a new in-out Exchange.
+ */
+ protected def createInOutExchange: Exchange = createExchange(ExchangePattern.InOut)
+
+ /**
+ * Creates a new Exchange with given pattern from the CamelContext managed by
+ * CamelContextManager. Applications either have to ensure proper initialization
+ * of CamelContextManager or override this method.
+ *
+ * @see CamelContextManager.
+ */
+ protected def createExchange(pattern: ExchangePattern): Exchange =
+ new DefaultExchange(CamelContextManager.context, pattern)
+}
+
+/**
+ * Synchronization object that sends responses asynchronously to initial senders. This
+ * class is used by Producer for asynchronous two-way messaging with a Camel endpoint.
+ *
+ * @author Martin Krasser
+ */
+class ProducerResponseSender(
+ headers: Map[String, Any],
+ sender: Option[Actor],
+ senderFuture: Option[CompletableFuture],
+ producer: Actor) extends Synchronization with Logging {
+
+ implicit val producerActor = Some(producer) // the response sender
+
+ /**
+ * Replies a Failure message, created from the given exchange, to sender (or
+ * senderFuture if applicable).
+ */
+ def onFailure(exchange: Exchange) = reply(exchange.toFailureMessage(headers))
+
+ /**
+ * Replies a response Message, created from the given exchange, to sender (or
+ * senderFuture if applicable).
+ */
+ def onComplete(exchange: Exchange) = reply(exchange.toResponseMessage(headers))
+
+ private def reply(message: Any) = {
+ sender match {
+ case Some(actor) => actor ! message
+ case None => senderFuture match {
+ case Some(future) => future.completeWithResult(message)
+ case None => log.warning("No destination for sending response")
+ }
+ }
+ }
+}
diff --git a/akka-camel/src/main/scala/component/ActorComponent.scala b/akka-camel/src/main/scala/component/ActorComponent.scala
new file mode 100644
index 0000000000..763f9dd017
--- /dev/null
+++ b/akka-camel/src/main/scala/component/ActorComponent.scala
@@ -0,0 +1,152 @@
+/**
+ * Copyright (C) 2009-2010 Scalable Solutions AB
+ */
+
+package se.scalablesolutions.akka.camel.component
+
+import java.lang.{RuntimeException, String}
+import java.util.{Map => JavaMap}
+import java.util.concurrent.TimeoutException
+
+import org.apache.camel.{Exchange, Consumer, Processor}
+import org.apache.camel.impl.{DefaultProducer, DefaultEndpoint, DefaultComponent}
+
+import se.scalablesolutions.akka.actor.{ActorRegistry, Actor}
+import se.scalablesolutions.akka.camel.{Failure, CamelMessageConversion, Message}
+
+/**
+ * Camel component for sending messages to and receiving replies from actors.
+ *
+ * @see se.scalablesolutions.akka.camel.component.ActorEndpoint
+ * @see se.scalablesolutions.akka.camel.component.ActorProducer
+ *
+ * @author Martin Krasser
+ */
+class ActorComponent extends DefaultComponent {
+ def createEndpoint(uri: String, remaining: String, parameters: JavaMap[String, Object]): ActorEndpoint = {
+ val idAndUuid = idAndUuidPair(remaining)
+ new ActorEndpoint(uri, this, idAndUuid._1, idAndUuid._2)
+ }
+
+ private def idAndUuidPair(remaining: String): Tuple2[Option[String], Option[String]] = {
+ remaining split ":" toList match {
+ case id :: Nil => (Some(id), None)
+ case "id" :: id :: Nil => (Some(id), None)
+ case "uuid" :: uuid :: Nil => (None, Some(uuid))
+ case _ => throw new IllegalArgumentException(
+ "invalid path format: %s - should be or id: or uuid:" format remaining)
+ }
+ }
+}
+
+/**
+ * Camel endpoint for referencing an actor. The actor reference is given by the endpoint URI.
+ * An actor can be referenced by its Actor.getId or its Actor.uuid.
+ * Supported endpoint URI formats are
+ * actor:<actorid>,
+ * actor:id:<actorid> and
+ * actor:uuid:<actoruuid>.
+ *
+ * @see se.scalablesolutions.akka.camel.component.ActorComponent
+ * @see se.scalablesolutions.akka.camel.component.ActorProducer
+
+ * @author Martin Krasser
+ */
+class ActorEndpoint(uri: String,
+ comp: ActorComponent,
+ val id: Option[String],
+ val uuid: Option[String]) extends DefaultEndpoint(uri, comp) {
+
+ /**
+ * @throws UnsupportedOperationException
+ */
+ def createConsumer(processor: Processor): Consumer =
+ throw new UnsupportedOperationException("actor consumer not supported yet")
+
+ /**
+ * Creates a new ActorProducer instance initialized with this endpoint.
+ */
+ def createProducer: ActorProducer = new ActorProducer(this)
+
+ /**
+ * Returns true.
+ */
+ def isSingleton: Boolean = true
+}
+
+/**
+ * Sends the in-message of an exchange to an actor. If the exchange pattern is out-capable,
+ * the producer waits for a reply (using the !! operator), otherwise the ! operator is used
+ * for sending the message.
+ *
+ * @see se.scalablesolutions.akka.camel.component.ActorComponent
+ * @see se.scalablesolutions.akka.camel.component.ActorEndpoint
+ *
+ * @author Martin Krasser
+ */
+class ActorProducer(val ep: ActorEndpoint) extends DefaultProducer(ep) {
+ import CamelMessageConversion.toExchangeAdapter
+
+ implicit val sender = None
+
+ /**
+ * Depending on the exchange pattern, this method either calls processInOut or
+ * processInOnly for interacting with an actor. This methods looks up the actor
+ * from the ActorRegistry according to this producer's endpoint URI.
+ *
+ * @param exchange represents the message exchange with the actor.
+ */
+ def process(exchange: Exchange) {
+ val actor = target getOrElse (throw new ActorNotRegisteredException(ep.getEndpointUri))
+ if (exchange.getPattern.isOutCapable) processInOut(exchange, actor)
+ else processInOnly(exchange, actor)
+ }
+
+ /**
+ * Send the exchange in-message to the given actor using the ! operator. The message
+ * send to the actor is of type se.scalablesolutions.akka.camel.Message.
+ */
+ protected def processInOnly(exchange: Exchange, actor: Actor): Unit =
+ actor ! exchange.toRequestMessage(Map(Message.MessageExchangeId -> exchange.getExchangeId))
+
+ /**
+ * Send the exchange in-message to the given actor using the !! operator. The exchange
+ * out-message is populated from the actor's reply message. The message sent to the
+ * actor is of type se.scalablesolutions.akka.camel.Message.
+ */
+ protected def processInOut(exchange: Exchange, actor: Actor) {
+ val header = Map(Message.MessageExchangeId -> exchange.getExchangeId)
+ val result: Any = actor !! exchange.toRequestMessage(header)
+
+ result match {
+ case Some(msg: Failure) => exchange.fromFailureMessage(msg)
+ case Some(msg) => exchange.fromResponseMessage(Message.canonicalize(msg))
+ case None => {
+ throw new TimeoutException("timeout (%d ms) while waiting response from %s"
+ format (actor.timeout, ep.getEndpointUri))
+ }
+ }
+ }
+
+ private def target: Option[Actor] =
+ if (ep.id.isDefined) targetById(ep.id.get)
+ else targetByUuid(ep.uuid.get)
+
+ private def targetById(id: String) = ActorRegistry.actorsFor(id) match {
+ case Nil => None
+ case actor :: Nil => Some(actor)
+ case actors => Some(actors.first)
+ }
+
+ private def targetByUuid(uuid: String) = ActorRegistry.actorFor(uuid)
+}
+
+/**
+ * Thrown to indicate that an actor referenced by an endpoint URI cannot be
+ * found in the ActorRegistry.
+ *
+ * @author Martin Krasser
+ */
+class ActorNotRegisteredException(uri: String) extends RuntimeException {
+ override def getMessage = "%s not registered" format uri
+}
\ No newline at end of file
diff --git a/akka-camel/src/main/scala/service/CamelService.scala b/akka-camel/src/main/scala/service/CamelService.scala
new file mode 100644
index 0000000000..86b4f2dc23
--- /dev/null
+++ b/akka-camel/src/main/scala/service/CamelService.scala
@@ -0,0 +1,89 @@
+/**
+ * Copyright (C) 2009-2010 Scalable Solutions AB
+ */
+
+package se.scalablesolutions.akka.camel.service
+
+import se.scalablesolutions.akka.actor.ActorRegistry
+import se.scalablesolutions.akka.camel.CamelContextManager
+import se.scalablesolutions.akka.util.{Bootable, Logging}
+
+/**
+ * Used by applications (and the Kernel) to publish consumer actors via Camel
+ * endpoints and to manage the life cycle of a a global CamelContext which can
+ * be accessed via se.scalablesolutions.akka.camel.CamelContextManager.
+ *
+ * @author Martin Krasser
+ */
+trait CamelService extends Bootable with Logging {
+
+ import se.scalablesolutions.akka.actor.Actor.Sender.Self
+ import CamelContextManager._
+
+ private[camel] val consumerPublisher = new ConsumerPublisher
+ private[camel] val publishRequestor = new PublishRequestor(consumerPublisher)
+
+ /**
+ * Starts the CamelService. Any started actor that is a consumer actor will be (asynchronously)
+ * published as Camel endpoint. Consumer actors that are started after this method returned will
+ * be published as well. Actor publishing is done asynchronously.
+ */
+ abstract override def onLoad = {
+ super.onLoad
+
+ // Only init and start if not already done by application
+ if (!initialized) init
+ if (!started) start
+
+ // Camel should cache input streams
+ context.setStreamCaching(true)
+
+ // start actor that exposes consumer actors via Camel endpoints
+ consumerPublisher.start
+
+ // add listener for actor registration events
+ ActorRegistry.addRegistrationListener(publishRequestor.start)
+
+ // publish already registered consumer actors
+ for (publish <- Publish.forConsumers(ActorRegistry.actors)) consumerPublisher ! publish
+ }
+
+ /**
+ * Stops the CamelService.
+ */
+ abstract override def onUnload = {
+ ActorRegistry.removeRegistrationListener(publishRequestor)
+ publishRequestor.stop
+ consumerPublisher.stop
+ stop
+ super.onUnload
+ }
+
+ /**
+ * Starts the CamelService.
+ *
+ * @see onLoad
+ */
+ def load = onLoad
+
+ /**
+ * Stops the CamelService.
+ *
+ * @see onUnload
+ */
+ def unload = onUnload
+}
+
+/**
+ * CamelService companion object used by standalone applications to create their own
+ * CamelService instance.
+ *
+ * @author Martin Krasser
+ */
+object CamelService {
+
+ /**
+ * Creates a new CamelService instance.
+ */
+ def newInstance: CamelService = new CamelService {}
+}
diff --git a/akka-camel/src/main/scala/service/ConsumerPublisher.scala b/akka-camel/src/main/scala/service/ConsumerPublisher.scala
new file mode 100644
index 0000000000..a6509e2694
--- /dev/null
+++ b/akka-camel/src/main/scala/service/ConsumerPublisher.scala
@@ -0,0 +1,135 @@
+/**
+ * Copyright (C) 2009-2010 Scalable Solutions AB
+ */
+package se.scalablesolutions.akka.camel.service
+
+import java.io.InputStream
+import java.util.concurrent.CountDownLatch
+
+import org.apache.camel.builder.RouteBuilder
+
+import se.scalablesolutions.akka.actor.{ActorUnregistered, ActorRegistered, Actor}
+import se.scalablesolutions.akka.actor.annotation.consume
+import se.scalablesolutions.akka.camel.{Consumer, CamelContextManager}
+import se.scalablesolutions.akka.util.Logging
+
+/**
+ * Actor that publishes consumer actors as Camel endpoints at the CamelContext managed
+ * by se.scalablesolutions.akka.camel.CamelContextManager. It accepts messages of type
+ * se.scalablesolutions.akka.camel.service.Publish.
+ *
+ * @author Martin Krasser
+ */
+class ConsumerPublisher extends Actor with Logging {
+ @volatile private var latch = new CountDownLatch(0)
+
+ /**
+ * Adds a route to the actor identified by a Publish message to the global CamelContext.
+ */
+ protected def receive = {
+ case p: Publish => publish(new ConsumerRoute(p.endpointUri, p.id, p.uuid))
+ case _ => { /* ignore */}
+ }
+
+ /**
+ * Sets the number of expected Publish messages received by this actor. Used for testing
+ * only.
+ */
+ private[camel] def expectPublishCount(count: Int): Unit = latch = new CountDownLatch(count)
+
+ /**
+ * Waits for the number of expected Publish messages to arrive. Used for testing only.
+ */
+ private[camel] def awaitPublish = latch.await
+
+ private def publish(route: ConsumerRoute) {
+ CamelContextManager.context.addRoutes(route)
+ log.info("published actor via endpoint %s" format route.endpointUri)
+ latch.countDown // needed for testing only.
+ }
+}
+
+/**
+ * Defines the route to a consumer actor.
+ *
+ * @param endpointUri endpoint URI of the consumer actor
+ * @param id actor identifier
+ * @param uuid true if id refers to Actor.uuid, false if
+ * id refers to Acotr.getId.
+ *
+ * @author Martin Krasser
+ */
+class ConsumerRoute(val endpointUri: String, id: String, uuid: Boolean) extends RouteBuilder {
+ // TODO: make conversions configurable
+ private val bodyConversions = Map(
+ "file" -> classOf[InputStream]
+ )
+
+ def configure = {
+ val schema = endpointUri take endpointUri.indexOf(":") // e.g. "http" from "http://whatever/..."
+ bodyConversions.get(schema) match {
+ case Some(clazz) => from(endpointUri).convertBodyTo(clazz).to(actorUri)
+ case None => from(endpointUri).to(actorUri)
+ }
+ }
+
+ private def actorUri = (if (uuid) "actor:uuid:%s" else "actor:id:%s") format id
+}
+
+/**
+ * A registration listener that publishes consumer actors (and ignores other actors).
+ *
+ * @author Martin Krasser
+ */
+class PublishRequestor(consumerPublisher: Actor) extends Actor {
+ protected def receive = {
+ case ActorUnregistered(actor) => { /* ignore */ }
+ case ActorRegistered(actor) => Publish.forConsumer(actor) match {
+ case Some(publish) => consumerPublisher ! publish
+ case None => { /* ignore */ }
+ }
+ }
+}
+
+/**
+ * Request message for publishing a consumer actor.
+ *
+ * @param endpointUri endpoint URI of the consumer actor
+ * @param id actor identifier
+ * @param uuid true if id refers to Actor.uuid, false if
+ * id refers to Acotr.getId.
+ *
+ * @author Martin Krasser
+ */
+case class Publish(endpointUri: String, id: String, uuid: Boolean)
+
+/**
+ * @author Martin Krasser
+ */
+object Publish {
+
+ /**
+ * Creates a list of Publish request messages for all consumer actors in the actors
+ * list.
+ */
+ def forConsumers(actors: List[Actor]): List[Publish] =
+ for (actor <- actors; pub = forConsumer(actor); if pub.isDefined) yield pub.get
+
+ /**
+ * Creates a Publish request message if actor is a consumer actor.
+ */
+ def forConsumer(actor: Actor): Option[Publish] =
+ forConsumeAnnotated(actor) orElse forConsumerType(actor)
+
+ private def forConsumeAnnotated(actor: Actor): Option[Publish] = {
+ val annotation = actor.getClass.getAnnotation(classOf[consume])
+ if (annotation eq null) None
+ else if (actor._remoteAddress.isDefined) None // do not publish proxies
+ else Some(Publish(annotation.value, actor.getId, false))
+ }
+
+ private def forConsumerType(actor: Actor): Option[Publish] =
+ if (!actor.isInstanceOf[Consumer]) None
+ else if (actor._remoteAddress.isDefined) None
+ else Some(Publish(actor.asInstanceOf[Consumer].endpointUri, actor.uuid, true))
+}
diff --git a/akka-camel/src/test/scala/MessageTest.scala b/akka-camel/src/test/scala/MessageTest.scala
new file mode 100644
index 0000000000..d519dbafa7
--- /dev/null
+++ b/akka-camel/src/test/scala/MessageTest.scala
@@ -0,0 +1,79 @@
+package se.scalablesolutions.akka.camel
+
+import java.io.InputStream
+
+import org.apache.camel.NoTypeConversionAvailableException
+import org.junit.Assert._
+import org.scalatest.junit.JUnitSuite
+
+import org.junit.Test
+
+class MessageTest extends JUnitSuite {
+
+ //
+ // TODO: extend/rewrite unit tests
+ // These tests currently only ensure proper functioning of basic features.
+ //
+
+ @Test def shouldConvertDoubleBodyToString = {
+ CamelContextManager.init
+ assertEquals("1.4", Message(1.4, null).bodyAs(classOf[String]))
+ }
+
+ @Test def shouldThrowExceptionWhenConvertingDoubleBodyToInputStream {
+ CamelContextManager.init
+ intercept[NoTypeConversionAvailableException] {
+ Message(1.4, null).bodyAs(classOf[InputStream])
+ }
+ }
+
+ @Test def shouldReturnSubsetOfHeaders = {
+ val message = Message("test" , Map("A" -> "1", "B" -> "2"))
+ assertEquals(Map("B" -> "2"), message.headers(Set("B")))
+ }
+
+ @Test def shouldTransformBodyAndPreserveHeaders = {
+ assertEquals(
+ Message("ab", Map("A" -> "1")),
+ Message("a" , Map("A" -> "1")).transformBody[String](body => body + "b"))
+ }
+
+ @Test def shouldConvertBodyAndPreserveHeaders = {
+ CamelContextManager.init
+ assertEquals(
+ Message("1.4", Map("A" -> "1")),
+ Message(1.4 , Map("A" -> "1")).setBodyAs(classOf[String]))
+ }
+
+ @Test def shouldSetBodyAndPreserveHeaders = {
+ assertEquals(
+ Message("test2" , Map("A" -> "1")),
+ Message("test1" , Map("A" -> "1")).setBody("test2"))
+ }
+
+ @Test def shouldSetHeadersAndPreserveBody = {
+ assertEquals(
+ Message("test1" , Map("C" -> "3")),
+ Message("test1" , Map("A" -> "1")).setHeaders(Map("C" -> "3")))
+
+ }
+
+ @Test def shouldAddHeaderAndPreserveBodyAndHeaders = {
+ assertEquals(
+ Message("test1" , Map("A" -> "1", "B" -> "2")),
+ Message("test1" , Map("A" -> "1")).addHeader("B" -> "2"))
+ }
+
+ @Test def shouldAddHeadersAndPreserveBodyAndHeaders = {
+ assertEquals(
+ Message("test1" , Map("A" -> "1", "B" -> "2")),
+ Message("test1" , Map("A" -> "1")).addHeaders(Map("B" -> "2")))
+ }
+
+ @Test def shouldRemoveHeadersAndPreserveBodyAndRemainingHeaders = {
+ assertEquals(
+ Message("test1" , Map("A" -> "1")),
+ Message("test1" , Map("A" -> "1", "B" -> "2")).removeHeader("B"))
+ }
+
+}
\ No newline at end of file
diff --git a/akka-camel/src/test/scala/ProducerTest.scala b/akka-camel/src/test/scala/ProducerTest.scala
new file mode 100644
index 0000000000..11ae148fb5
--- /dev/null
+++ b/akka-camel/src/test/scala/ProducerTest.scala
@@ -0,0 +1,109 @@
+package se.scalablesolutions.akka.camel
+
+import org.apache.camel.{Exchange, Processor}
+import org.apache.camel.builder.RouteBuilder
+import org.apache.camel.component.mock.MockEndpoint
+import org.junit.Assert._
+import org.junit.{Test, After, Before}
+import org.scalatest.junit.JUnitSuite
+
+import se.scalablesolutions.akka.actor.Actor
+
+class ProducerTest extends JUnitSuite {
+
+ //
+ // TODO: extend/rewrite unit tests
+ // These tests currently only ensure proper functioning of basic features.
+ //
+
+ import CamelContextManager._
+
+ var mock: MockEndpoint = _
+
+ @Before def setUp = {
+ init
+ context.addRoutes(new TestRouteBuilder)
+ start
+ mock = context.getEndpoint("mock:mock", classOf[MockEndpoint])
+ }
+
+ @After def tearDown = {
+ stop
+ }
+
+ //
+ // TODO: test replies to messages sent with ! (bang)
+ // TODO: test copying of custom message headers
+ //
+
+ @Test def shouldProduceMessageSyncAndReceiveResponse = {
+ val producer = new TestProducer("direct:input2", false, false).start
+ val message = Message("test1", Map(Message.MessageExchangeId -> "123"))
+ val expected = Message("Hello test1", Map(Message.MessageExchangeId -> "123"))
+ assertEquals(expected, producer !! message get)
+ producer.stop
+ }
+
+ @Test def shouldProduceMessageSyncAndReceiveFailure = {
+ val producer = new TestProducer("direct:input2", false, false).start
+ val message = Message("fail", Map(Message.MessageExchangeId -> "123"))
+ val result = producer.!.get
+ assertEquals("failure", result.cause.getMessage)
+ assertEquals(Map(Message.MessageExchangeId -> "123"), result.headers)
+ producer.stop
+ }
+
+ @Test def shouldProduceMessageAsyncAndReceiveResponse = {
+ val producer = new TestProducer("direct:input2", true, false).start
+ val message = Message("test2", Map(Message.MessageExchangeId -> "124"))
+ val expected = Message("Hello test2", Map(Message.MessageExchangeId -> "124"))
+ assertEquals(expected, producer !! message get)
+ producer.stop
+ }
+
+ @Test def shouldProduceMessageAsyncAndReceiveFailure = {
+ val producer = new TestProducer("direct:input2", true, false).start
+ val message = Message("fail", Map(Message.MessageExchangeId -> "124"))
+ val result = producer.!.get
+ assertEquals("failure", result.cause.getMessage)
+ assertEquals(Map(Message.MessageExchangeId -> "124"), result.headers)
+ producer.stop
+ }
+
+ @Test def shouldProduceMessageSyncWithoutReceivingResponse = {
+ val producer = new TestProducer("direct:input1", false, true).start
+ mock.expectedBodiesReceived("test3")
+ producer.!("test3")(None)
+ producer.stop
+ }
+
+ @Test def shouldProduceMessageAsyncAndReceiveResponseSync = {
+ val producer = new TestProducer("direct:input1", true, true).start
+ mock.expectedBodiesReceived("test4")
+ producer.!("test4")(None)
+ producer.stop
+ }
+
+ class TestProducer(uri:String, prodAsync: Boolean, prodOneway: Boolean) extends Actor with Producer {
+ override def async = prodAsync
+ override def oneway = prodOneway
+ def endpointUri = uri
+ def receive = produce
+ }
+
+ class TestRouteBuilder extends RouteBuilder {
+ def configure {
+ from("direct:input1").to("mock:mock")
+ from("direct:input2").process(new Processor() {
+ def process(exchange: Exchange) = {
+ val body = exchange.getIn.getBody
+ body match {
+ case "fail" => throw new Exception("failure")
+ case body => exchange.getOut.setBody("Hello %s" format body)
+ }
+ }
+ })
+ }
+ }
+
+}
\ No newline at end of file
diff --git a/akka-camel/src/test/scala/component/ActorComponentFeatureTest.scala b/akka-camel/src/test/scala/component/ActorComponentFeatureTest.scala
new file mode 100644
index 0000000000..58b2cdb169
--- /dev/null
+++ b/akka-camel/src/test/scala/component/ActorComponentFeatureTest.scala
@@ -0,0 +1,62 @@
+package se.scalablesolutions.akka.camel.component
+
+import org.apache.camel.RuntimeCamelException
+import org.scalatest.{BeforeAndAfterEach, BeforeAndAfterAll, FeatureSpec}
+
+import se.scalablesolutions.akka.actor.ActorRegistry
+import se.scalablesolutions.akka.camel.CamelContextManager
+import se.scalablesolutions.akka.camel.support.{Respond, Countdown, Tester, Retain}
+
+class ActorComponentFeatureTest extends FeatureSpec with BeforeAndAfterAll with BeforeAndAfterEach {
+ override protected def beforeAll() = {
+ ActorRegistry.shutdownAll
+ CamelContextManager.init
+ CamelContextManager.start
+ }
+
+ override protected def afterAll() = CamelContextManager.stop
+
+ override protected def afterEach() = ActorRegistry.shutdownAll
+
+ feature("Communicate with an actor from a Camel application using actor endpoint URIs") {
+ import CamelContextManager.template
+
+ scenario("one-way communication using actor id") {
+ val actor = new Tester with Retain with Countdown
+ actor.start
+ template.sendBody("actor:%s" format actor.getId, "Martin")
+ assert(actor.waitFor)
+ assert(actor.body === "Martin")
+ }
+
+ scenario("one-way communication using actor uuid") {
+ val actor = new Tester with Retain with Countdown
+ actor.start
+ template.sendBody("actor:uuid:%s" format actor.uuid, "Martin")
+ assert(actor.waitFor)
+ assert(actor.body === "Martin")
+ }
+
+ scenario("two-way communication using actor id") {
+ val actor = new Tester with Respond
+ actor.start
+ assert(template.requestBody("actor:%s" format actor.getId, "Martin") === "Hello Martin")
+ }
+
+ scenario("two-way communication using actor uuid") {
+ val actor = new Tester with Respond
+ actor.start
+ assert(template.requestBody("actor:uuid:%s" format actor.uuid, "Martin") === "Hello Martin")
+ }
+
+ scenario("two-way communication with timeout") {
+ val actor = new Tester {
+ timeout = 1
+ }
+ actor.start
+ intercept[RuntimeCamelException] {
+ template.requestBody("actor:uuid:%s" format actor.uuid, "Martin")
+ }
+ }
+ }
+}
\ No newline at end of file
diff --git a/akka-camel/src/test/scala/component/ActorComponentTest.scala b/akka-camel/src/test/scala/component/ActorComponentTest.scala
new file mode 100644
index 0000000000..1f7b42bf08
--- /dev/null
+++ b/akka-camel/src/test/scala/component/ActorComponentTest.scala
@@ -0,0 +1,35 @@
+package se.scalablesolutions.akka.camel.component
+
+import org.apache.camel.impl.DefaultCamelContext
+import org.junit._
+import org.scalatest.junit.JUnitSuite
+
+class ActorComponentTest extends JUnitSuite {
+
+ val component: ActorComponent = ActorComponentTest.mockComponent
+
+ @Test def shouldCreateEndpointWithIdDefined = {
+ val ep1: ActorEndpoint = component.createEndpoint("actor:abc").asInstanceOf[ActorEndpoint]
+ val ep2: ActorEndpoint = component.createEndpoint("actor:id:abc").asInstanceOf[ActorEndpoint]
+ assert(ep1.id === Some("abc"))
+ assert(ep2.id === Some("abc"))
+ assert(ep1.uuid === None)
+ assert(ep2.uuid === None)
+ }
+
+ @Test def shouldCreateEndpointWithUuidDefined = {
+ val ep: ActorEndpoint = component.createEndpoint("actor:uuid:abc").asInstanceOf[ActorEndpoint]
+ assert(ep.uuid === Some("abc"))
+ assert(ep.id === None)
+ }
+}
+
+object ActorComponentTest {
+ def mockComponent = {
+ val component = new ActorComponent
+ component.setCamelContext(new DefaultCamelContext)
+ component
+ }
+
+ def mockEndpoint(uri:String) = mockComponent.createEndpoint(uri)
+}
diff --git a/akka-camel/src/test/scala/component/ActorProducerTest.scala b/akka-camel/src/test/scala/component/ActorProducerTest.scala
new file mode 100644
index 0000000000..afb4a12ef0
--- /dev/null
+++ b/akka-camel/src/test/scala/component/ActorProducerTest.scala
@@ -0,0 +1,76 @@
+package se.scalablesolutions.akka.camel.component
+
+import ActorComponentTest._
+
+import java.util.concurrent.TimeoutException
+
+import org.apache.camel.ExchangePattern
+import org.junit.{After, Test}
+import org.scalatest.junit.JUnitSuite
+import org.scalatest.BeforeAndAfterAll
+
+import se.scalablesolutions.akka.actor.ActorRegistry
+import se.scalablesolutions.akka.camel.support.{Countdown, Retain, Tester, Respond}
+import se.scalablesolutions.akka.camel.{Failure, Message}
+
+class ActorProducerTest extends JUnitSuite with BeforeAndAfterAll {
+
+ @After def tearDown = {
+ ActorRegistry.shutdownAll
+ }
+
+ @Test def shouldSendMessageToActor = {
+ val actor = new Tester with Retain with Countdown
+ val endpoint = mockEndpoint("actor:uuid:%s" format actor.uuid)
+ val exchange = endpoint.createExchange(ExchangePattern.InOnly)
+ actor.start
+ exchange.getIn.setBody("Martin")
+ exchange.getIn.setHeader("k1", "v1")
+ endpoint.createProducer.process(exchange)
+ actor.waitFor
+ assert(actor.body === "Martin")
+ assert(actor.headers === Map(Message.MessageExchangeId -> exchange.getExchangeId, "k1" -> "v1"))
+ }
+
+ @Test def shouldSendMessageToActorAndReturnResponse = {
+ val actor = new Tester with Respond {
+ override def response(msg: Message) = Message(super.response(msg), Map("k2" -> "v2"))
+ }
+ val endpoint = mockEndpoint("actor:uuid:%s" format actor.uuid)
+ val exchange = endpoint.createExchange(ExchangePattern.InOut)
+ actor.start
+ exchange.getIn.setBody("Martin")
+ exchange.getIn.setHeader("k1", "v1")
+ endpoint.createProducer.process(exchange)
+ assert(exchange.getOut.getBody === "Hello Martin")
+ assert(exchange.getOut.getHeader("k2") === "v2")
+ }
+
+ @Test def shouldSendMessageToActorAndReturnFailure = {
+ val actor = new Tester with Respond {
+ override def response(msg: Message) = Failure(new Exception("testmsg"), Map("k3" -> "v3"))
+ }
+ val endpoint = mockEndpoint("actor:uuid:%s" format actor.uuid)
+ val exchange = endpoint.createExchange(ExchangePattern.InOut)
+ actor.start
+ exchange.getIn.setBody("Martin")
+ exchange.getIn.setHeader("k1", "v1")
+ endpoint.createProducer.process(exchange)
+ assert(exchange.getException.getMessage === "testmsg")
+ assert(exchange.getOut.getBody === null)
+ assert(exchange.getOut.getHeader("k3") === null) // headers from failure message are currently ignored
+ }
+
+ @Test def shouldSendMessageToActorAndTimeout: Unit = {
+ val actor = new Tester {
+ timeout = 1
+ }
+ val endpoint = mockEndpoint("actor:uuid:%s" format actor.uuid)
+ val exchange = endpoint.createExchange(ExchangePattern.InOut)
+ actor.start
+ exchange.getIn.setBody("Martin")
+ intercept[TimeoutException] {
+ endpoint.createProducer.process(exchange)
+ }
+ }
+}
diff --git a/akka-camel/src/test/scala/service/CamelServiceTest.scala b/akka-camel/src/test/scala/service/CamelServiceTest.scala
new file mode 100644
index 0000000000..a3b0f5c913
--- /dev/null
+++ b/akka-camel/src/test/scala/service/CamelServiceTest.scala
@@ -0,0 +1,103 @@
+package se.scalablesolutions.akka.camel.service
+
+import org.apache.camel.builder.RouteBuilder
+import org.junit.Assert._
+import org.scalatest.junit.JUnitSuite
+
+import se.scalablesolutions.akka.actor.Actor
+import se.scalablesolutions.akka.actor.annotation.consume
+import se.scalablesolutions.akka.camel.{CamelContextManager, Consumer, Message}
+import org.junit.{Ignore, Before, After, Test}
+
+class CamelServiceTest extends JUnitSuite with CamelService {
+
+ //
+ // TODO: extend/rewrite unit tests
+ // These tests currently only ensure proper functioning of basic features.
+ //
+
+ import CamelContextManager._
+
+ var actor1: Actor = _
+ var actor2: Actor = _
+ var actor3: Actor = _
+
+ @Before def setUp = {
+ // register actors before starting the CamelService
+ actor1 = new TestActor1().start
+ actor2 = new TestActor2().start
+ actor3 = new TestActor3().start
+ // initialize global CamelContext
+ init
+ // customize global CamelContext
+ context.addRoutes(new TestRouteBuilder)
+ consumerPublisher.expectPublishCount(2)
+ load
+ consumerPublisher.awaitPublish
+ }
+
+ @After def tearDown = {
+ unload
+ actor1.stop
+ actor2.stop
+ actor3.stop
+ }
+
+ @Test def shouldReceiveResponseViaPreStartGeneratedRoutes = {
+ assertEquals("Hello Martin (actor1)", template.requestBody("direct:actor1", "Martin"))
+ assertEquals("Hello Martin (actor2)", template.requestBody("direct:actor2", "Martin"))
+ }
+
+ @Test def shouldReceiveResponseViaPostStartGeneratedRoute = {
+ consumerPublisher.expectPublishCount(1)
+ // register actor after starting CamelService
+ val actor4 = new TestActor4().start
+ consumerPublisher.awaitPublish
+ assertEquals("Hello Martin (actor4)", template.requestBody("direct:actor4", "Martin"))
+ actor4.stop
+ }
+
+ @Test def shouldReceiveResponseViaCustomRoute = {
+ assertEquals("Hello Tester (actor3)", template.requestBody("direct:actor3", "Martin"))
+ }
+
+}
+
+class TestActor1 extends Actor with Consumer {
+ def endpointUri = "direct:actor1"
+
+ protected def receive = {
+ case msg: Message => reply("Hello %s (actor1)" format msg.body)
+ }
+}
+
+@consume("direct:actor2")
+class TestActor2 extends Actor {
+ protected def receive = {
+ case msg: Message => reply("Hello %s (actor2)" format msg.body)
+ }
+}
+
+class TestActor3 extends Actor {
+ id = "actor3"
+
+ protected def receive = {
+ case msg: Message => reply("Hello %s (actor3)" format msg.body)
+ }
+}
+
+class TestActor4 extends Actor with Consumer {
+ def endpointUri = "direct:actor4"
+
+ protected def receive = {
+ case msg: Message => reply("Hello %s (actor4)" format msg.body)
+ }
+}
+
+class TestRouteBuilder extends RouteBuilder {
+ def configure {
+ val actorUri = "actor:%s" format classOf[TestActor3].getName
+ from("direct:actor3").transform(constant("Tester")).to("actor:actor3")
+ }
+}
+
diff --git a/akka-camel/src/test/scala/support/TestSupport.scala b/akka-camel/src/test/scala/support/TestSupport.scala
new file mode 100644
index 0000000000..f6b7998934
--- /dev/null
+++ b/akka-camel/src/test/scala/support/TestSupport.scala
@@ -0,0 +1,49 @@
+package se.scalablesolutions.akka.camel.support
+
+import java.util.concurrent.{TimeUnit, CountDownLatch}
+
+import se.scalablesolutions.akka.camel.Message
+import se.scalablesolutions.akka.actor.Actor
+
+trait Receive {
+ def onMessage(msg: Message): Unit
+}
+
+trait Respond extends Receive {self: Actor =>
+ abstract override def onMessage(msg: Message): Unit = {
+ super.onMessage(msg)
+ reply(response(msg))
+ }
+ def response(msg: Message): Any = "Hello %s" format msg.body
+}
+
+trait Retain extends Receive {
+ var body: Any = _
+ var headers = Map.empty[String, Any]
+ abstract override def onMessage(msg: Message): Unit = {
+ super.onMessage(msg)
+ body = msg.body
+ headers = msg.headers
+ }
+}
+
+trait Countdown extends Receive {
+ val count = 1
+ val duration = 5000
+ val latch = new CountDownLatch(count)
+
+ def waitFor = latch.await(duration, TimeUnit.MILLISECONDS)
+ def countDown = latch.countDown
+
+ abstract override def onMessage(msg: Message) = {
+ super.onMessage(msg)
+ countDown
+ }
+}
+
+class Tester extends Actor with Receive {
+ def receive = {
+ case msg: Message => onMessage(msg)
+ }
+ def onMessage(msg: Message): Unit = {}
+}
diff --git a/akka-cluster/akka-cluster-jgroups/pom.xml b/akka-cluster/akka-cluster-jgroups/pom.xml
deleted file mode 100644
index 85d25e2330..0000000000
--- a/akka-cluster/akka-cluster-jgroups/pom.xml
+++ /dev/null
@@ -1,24 +0,0 @@
-
- 4.0.0
-
- akka-cluster-jgroups
- Akka Cluster JGroups Module
-
- jar
-
-
- akka-cluster-parent
- se.scalablesolutions.akka
- 0.7-SNAPSHOT
-
-
-
-
- jgroups
- jgroups
- 2.8.0.CR7
-
-
-
-
diff --git a/akka-cluster/akka-cluster-jgroups/src/main/scala/JGroupsClusterActor.scala b/akka-cluster/akka-cluster-jgroups/src/main/scala/JGroupsClusterActor.scala
index 12d93ef272..7d56bb1539 100644
--- a/akka-cluster/akka-cluster-jgroups/src/main/scala/JGroupsClusterActor.scala
+++ b/akka-cluster/akka-cluster-jgroups/src/main/scala/JGroupsClusterActor.scala
@@ -1,15 +1,17 @@
-package se.scalablesolutions.akka.remote
+package se.scalablesolutions.akka.cluster.jgroups
import org.jgroups.{JChannel, View => JG_VIEW, Address, Message => JG_MSG, ExtendedMembershipListener, Receiver}
+import se.scalablesolutions.akka.remote.ClusterActor._
+import se.scalablesolutions.akka.remote.BasicClusterActor
+
+import org.scala_tools.javautils.Imports._
+
/**
* Clustering support via JGroups.
* @Author Viktor Klang
*/
class JGroupsClusterActor extends BasicClusterActor {
- import ClusterActor._
- import org.scala_tools.javautils.Imports._
-
type ADDR_T = Address
@volatile private var isActive = false
diff --git a/akka-cluster/akka-cluster-shoal/pom.xml b/akka-cluster/akka-cluster-shoal/pom.xml
deleted file mode 100644
index b58e77dcf5..0000000000
--- a/akka-cluster/akka-cluster-shoal/pom.xml
+++ /dev/null
@@ -1,34 +0,0 @@
-
- 4.0.0
-
- akka-cluster-shoal
- Akka Cluster Shoal Module
-
- jar
-
-
- akka-cluster-parent
- se.scalablesolutions.akka
- 0.7-SNAPSHOT
-
-
-
-
-
- shoal-jxta
- shoal
- 1.1-20090818
-
-
- shoal-jxta
- jxta
- 1.1-20090818
-
-
-
-
diff --git a/akka-cluster/akka-cluster-shoal/src/main/scala/ShoalClusterActor.scala b/akka-cluster/akka-cluster-shoal/src/main/scala/ShoalClusterActor.scala
index 3d83a46ef3..068d3a4345 100644
--- a/akka-cluster/akka-cluster-shoal/src/main/scala/ShoalClusterActor.scala
+++ b/akka-cluster/akka-cluster-shoal/src/main/scala/ShoalClusterActor.scala
@@ -1,29 +1,16 @@
/**
* Copyright (C) 2009-2010 Scalable Solutions AB
*/
-package se.scalablesolutions.akka.remote
+package se.scalablesolutions.akka.cluster.shoal
-import se.scalablesolutions.akka.Config.config
import java.util.Properties
-import com.sun.enterprise.ee.cms.core.{CallBack,
- GMSConstants,
- GMSFactory,
- GroupManagementService,
- MessageSignal,
- Signal,
- GMSException,
- SignalAcquireException,
- SignalReleaseException,
- JoinNotificationSignal,
- FailureSuspectedSignal,
- FailureNotificationSignal }
-import com.sun.enterprise.ee.cms.impl.client.{FailureNotificationActionFactoryImpl,
- FailureSuspectedActionFactoryImpl,
- JoinNotificationActionFactoryImpl,
- MessageActionFactoryImpl,
- PlannedShutdownActionFactoryImpl
-}
+import se.scalablesolutions.akka.config.Config.config
+import se.scalablesolutions.akka.remote.{ClusterActor, BasicClusterActor, RemoteServer}
+
+import com.sun.enterprise.ee.cms.core._
+import com.sun.enterprise.ee.cms.impl.client._
+
/**
* Clustering support via Shoal.
*/
@@ -67,9 +54,9 @@ class ShoalClusterActor extends BasicClusterActor {
* Adds callbacks and boots up the cluster
*/
protected def createGMS : GroupManagementService = {
-
- val g = GMSFactory.startGMSModule(serverName,name, GroupManagementService.MemberType.CORE, properties()).asInstanceOf[GroupManagementService]
-
+ val g = GMSFactory
+ .startGMSModule(serverName,name, GroupManagementService.MemberType.CORE, properties())
+ .asInstanceOf[GroupManagementService]
val callback = createCallback
g.addActionFactory(new JoinNotificationActionFactoryImpl(callback))
g.addActionFactory(new FailureSuspectedActionFactoryImpl(callback))
@@ -102,8 +89,8 @@ class ShoalClusterActor extends BasicClusterActor {
}
signal.release()
} catch {
- case e : SignalAcquireException => log.warning(e,"SignalAcquireException")
- case e : SignalReleaseException => log.warning(e,"SignalReleaseException")
+ case e : SignalAcquireException => log.warning(e,"SignalAcquireException")
+ case e : SignalReleaseException => log.warning(e,"SignalReleaseException")
}
}
}
diff --git a/akka-cluster/akka-cluster-tribes/pom.xml b/akka-cluster/akka-cluster-tribes/pom.xml
deleted file mode 100644
index efcea51aa8..0000000000
--- a/akka-cluster/akka-cluster-tribes/pom.xml
+++ /dev/null
@@ -1,24 +0,0 @@
-
- 4.0.0
-
- akka-cluster-tribes
- Akka Cluster Tribes Module
-
- jar
-
-
- akka-cluster-parent
- se.scalablesolutions.akka
- 0.7-SNAPSHOT
-
-
-
-
- org.apache.tomcat
- tribes
- 6.0.20
-
-
-
-
diff --git a/akka-cluster/pom.xml b/akka-cluster/pom.xml
deleted file mode 100644
index 9d7bd42000..0000000000
--- a/akka-cluster/pom.xml
+++ /dev/null
@@ -1,42 +0,0 @@
-
- 4.0.0
-
- akka-cluster-parent
- Akka Cluster Modules
-
- pom
-
-
- akka
- se.scalablesolutions.akka
- 0.7-SNAPSHOT
-
-
-
- akka-cluster-jgroups
-
- akka-cluster-shoal
-
-
-
-
- akka-core
- ${project.groupId}
- ${project.version}
-
-
-
- org.scalatest
- scalatest
- 1.0
- test
-
-
- junit
- junit
- 4.5
- test
-
-
-
diff --git a/akka-comet/pom.xml b/akka-comet/pom.xml
deleted file mode 100644
index 88cdc0cf57..0000000000
--- a/akka-comet/pom.xml
+++ /dev/null
@@ -1,54 +0,0 @@
-
- 4.0.0
-
- akka-comet
- Akka Comet Module
-
- jar
-
-
- akka
- se.scalablesolutions.akka
- 0.7-SNAPSHOT
-
-
-
-
-
- akka-rest
- ${project.groupId}
- ${project.version}
-
-
-
-
- com.sun.grizzly
- grizzly-comet-webserver
- ${grizzly.version}
-
-
-
-
- javax.servlet
- servlet-api
- 2.5
-
-
- org.atmosphere
- atmosphere-annotations
- ${atmosphere.version}
-
-
- org.atmosphere
- atmosphere-jersey
- ${atmosphere.version}
-
-
- org.atmosphere
- atmosphere-runtime
- ${atmosphere.version}
-
-
-
diff --git a/akka-comet/src/main/scala/AkkaClusterBroadcastFilter.scala b/akka-comet/src/main/scala/AkkaClusterBroadcastFilter.scala
index 724c82432e..8fdd47fddd 100644
--- a/akka-comet/src/main/scala/AkkaClusterBroadcastFilter.scala
+++ b/akka-comet/src/main/scala/AkkaClusterBroadcastFilter.scala
@@ -4,13 +4,13 @@
package se.scalablesolutions.akka.comet
-import se.scalablesolutions.akka.actor.{Actor}
-import se.scalablesolutions.akka.remote.{Cluster}
-import scala.reflect.{BeanProperty}
+import se.scalablesolutions.akka.actor.Actor
+import se.scalablesolutions.akka.remote.Cluster
+import scala.reflect.BeanProperty
import org.atmosphere.cpr.{BroadcastFilter, ClusterBroadcastFilter, Broadcaster}
sealed trait ClusterCometMessageType
-case class ClusterCometBroadcast(val name : String, val msg : AnyRef) extends ClusterCometMessageType
+case class ClusterCometBroadcast(name: String, msg: AnyRef) extends ClusterCometMessageType
/**
* Enables explicit clustering of Atmosphere (Comet) resources
diff --git a/akka-kernel/src/main/scala/BootableCometActorService.scala b/akka-comet/src/main/scala/BootableCometActorService.scala
similarity index 87%
rename from akka-kernel/src/main/scala/BootableCometActorService.scala
rename to akka-comet/src/main/scala/BootableCometActorService.scala
index b014fcb9ad..496cc33aed 100644
--- a/akka-kernel/src/main/scala/BootableCometActorService.scala
+++ b/akka-comet/src/main/scala/BootableCometActorService.scala
@@ -2,16 +2,16 @@
* Copyright (C) 2009-2010 Scalable Solutions AB
*/
-package se.scalablesolutions.akka
+package se.scalablesolutions.akka.comet
import com.sun.grizzly.http.SelectorThread
import com.sun.grizzly.http.servlet.ServletAdapter
import com.sun.grizzly.standalone.StaticStreamAlgorithm
import javax.ws.rs.core.UriBuilder
-import se.scalablesolutions.akka.comet.AkkaServlet
+
import se.scalablesolutions.akka.actor.BootableActorLoaderService
-import se.scalablesolutions.akka.util.{Bootable,Logging}
+import se.scalablesolutions.akka.util.{Bootable, Logging}
/**
* Handles the Akka Comet Support (load/unload)
@@ -19,16 +19,17 @@ import se.scalablesolutions.akka.util.{Bootable,Logging}
trait BootableCometActorService extends Bootable with Logging {
self : BootableActorLoaderService =>
- import Config._
+ import config.Config._
val REST_HOSTNAME = config.getString("akka.rest.hostname", "localhost")
val REST_URL = "http://" + REST_HOSTNAME
val REST_PORT = config.getInt("akka.rest.port", 9998)
+
protected var jerseySelectorThread: Option[SelectorThread] = None
abstract override def onLoad = {
super.onLoad
- if(config.getBool("akka.rest.service", true)){
+ if (config.getBool("akka.rest.service", true)) {
val uri = UriBuilder.fromUri(REST_URL).port(REST_PORT).build()
@@ -42,8 +43,7 @@ trait BootableCometActorService extends Bootable with Logging {
adapter.setHandleStaticResources(true)
adapter.setServletInstance(new AkkaServlet)
adapter.setContextPath(uri.getPath)
- //Using autodetection for now
- //adapter.addInitParameter("cometSupport", "org.atmosphere.container.GrizzlyCometSupport")
+ adapter.addInitParameter("cometSupport", "org.atmosphere.container.GrizzlyCometSupport")
if (HOME.isDefined) adapter.setRootFolder(HOME.get + "/deploy/root")
log.info("REST service root path [%s] and context path [%s]", adapter.getRootFolder, adapter.getContextPath)
diff --git a/akka-core/src/main/scala/actor/ActiveObject.scala b/akka-core/src/main/scala/actor/ActiveObject.scala
index d88f0e861b..9b5a6b409a 100644
--- a/akka-core/src/main/scala/actor/ActiveObject.scala
+++ b/akka-core/src/main/scala/actor/ActiveObject.scala
@@ -19,7 +19,7 @@ import java.net.InetSocketAddress
import java.lang.reflect.{InvocationTargetException, Method}
object Annotations {
- import se.scalablesolutions.akka.annotation._
+ import se.scalablesolutions.akka.actor.annotation._
val oneway = classOf[oneway]
val transactionrequired = classOf[transactionrequired]
val prerestart = classOf[prerestart]
diff --git a/akka-core/src/main/scala/actor/Actor.scala b/akka-core/src/main/scala/actor/Actor.scala
index e5423e7bd1..674afeb6ad 100644
--- a/akka-core/src/main/scala/actor/Actor.scala
+++ b/akka-core/src/main/scala/actor/Actor.scala
@@ -4,23 +4,25 @@
package se.scalablesolutions.akka.actor
-import se.scalablesolutions.akka.Config._
import se.scalablesolutions.akka.dispatch._
+import se.scalablesolutions.akka.config.Config._
import se.scalablesolutions.akka.config.{AllForOneStrategy, OneForOneStrategy, FaultHandlingStrategy}
import se.scalablesolutions.akka.config.ScalaConfig._
import se.scalablesolutions.akka.stm.Transaction._
import se.scalablesolutions.akka.stm.TransactionManagement._
-import se.scalablesolutions.akka.stm.{StmException, TransactionManagement}
+import se.scalablesolutions.akka.stm.TransactionManagement
import se.scalablesolutions.akka.remote.protobuf.RemoteProtocol.RemoteRequest
import se.scalablesolutions.akka.remote.{RemoteProtocolBuilder, RemoteClient, RemoteRequestIdFactory}
import se.scalablesolutions.akka.serialization.Serializer
import se.scalablesolutions.akka.util.{HashCode, Logging, UUID}
import org.multiverse.api.ThreadLocalTransaction._
+import org.multiverse.commitbarriers.CountDownCommitBarrier
import java.util.{Queue, HashSet}
import java.util.concurrent.ConcurrentLinkedQueue
import java.net.InetSocketAddress
+import java.util.concurrent.locks.{Lock, ReentrantLock}
/**
* Implements the Transactor abstraction. E.g. a transactional actor.
@@ -72,7 +74,7 @@ object Actor extends Logging {
val HOSTNAME = config.getString("akka.remote.server.hostname", "localhost")
val PORT = config.getInt("akka.remote.server.port", 9999)
- object Sender{
+ object Sender {
implicit val Self: Option[Actor] = None
}
@@ -98,9 +100,7 @@ object Actor extends Logging {
* The actor is started when created.
* Example:
*
- * import Actor._
- *
- * val a = actor {
+ * val a = Actor.init {
* ... // init stuff
* } receive {
* case msg => ... // handle message
@@ -108,8 +108,8 @@ object Actor extends Logging {
*
*
*/
- def actor(body: => Unit) = {
- def handler(body: => Unit) = new {
+ def init[A](body: => Unit) = {
+ def handler[A](body: => Unit) = new {
def receive(handler: PartialFunction[Any, Unit]) = new Actor() {
start
body
@@ -198,7 +198,7 @@ object Actor extends Logging {
*/
trait Actor extends TransactionManagement {
implicit protected val self: Option[Actor] = Some(this)
- implicit protected val transactionFamily: String = this.getClass.getName
+ implicit protected val transactionFamilyName: String = this.getClass.getName
// Only mutable for RemoteServer in order to maintain identity across nodes
private[akka] var _uuid = UUID.newUuid.toString
@@ -219,6 +219,12 @@ trait Actor extends TransactionManagement {
private[akka] var _replyToAddress: Option[InetSocketAddress] = None
private[akka] val _mailbox: Queue[MessageInvocation] = new ConcurrentLinkedQueue[MessageInvocation]
+ /**
+ * This lock ensures thread safety in the dispatching: only one message can
+ * be dispatched at once on the actor.
+ */
+ private[akka] val _dispatcherLock: Lock = new ReentrantLock
+
// ====================================
// protected fields
// ====================================
@@ -309,9 +315,9 @@ trait Actor extends TransactionManagement {
* If 'trapExit' is set for the actor to act as supervisor, then a faultHandler must be defined.
* Can be one of:
*
- * AllForOneStrategy(maxNrOfRetries: Int, withinTimeRange: Int)
+ * faultHandler = Some(AllForOneStrategy(maxNrOfRetries, withinTimeRange))
*
- * OneForOneStrategy(maxNrOfRetries: Int, withinTimeRange: Int)
+ * faultHandler = Some(OneForOneStrategy(maxNrOfRetries, withinTimeRange))
*
*/
protected var faultHandler: Option[FaultHandlingStrategy] = None
@@ -334,8 +340,8 @@ trait Actor extends TransactionManagement {
/**
* User overridable callback/setting.
*
- * Partial function implementing the server logic.
- * To be implemented by subclassing server.
+ * Partial function implementing the actor logic.
+ * To be implemented by subclassing actor.
*
* Example code:
*
@@ -501,8 +507,6 @@ trait Actor extends TransactionManagement {
def !: Option[T] = {
if (_isKilled) throw new ActorKilledException("Actor [" + toString + "] has been killed, can't respond to messages")
if (_isRunning) {
- val from = if (sender != null && sender.isInstanceOf[Actor]) Some(sender.asInstanceOf[Actor])
- else None
val future = postMessageToMailboxAndCreateFutureResultWithTimeout(message, timeout, None)
val isActiveObject = message.isInstanceOf[Invocation]
if (isActiveObject && message.asInstanceOf[Invocation].isVoid) future.completeWithResult(None)
@@ -785,6 +789,11 @@ trait Actor extends TransactionManagement {
}
protected[akka] def postMessageToMailbox(message: Any, sender: Option[Actor]): Unit = {
+ if (isTransactionSetInScope) {
+ log.trace("Adding transaction for %s with message [%s] to transaction set", toString, message)
+ getTransactionSetInScope.incParties
+ }
+
if (_remoteAddress.isDefined) {
val requestBuilder = RemoteRequest.newBuilder
.setId(RemoteRequestIdFactory.nextId)
@@ -796,8 +805,7 @@ trait Actor extends TransactionManagement {
.setIsEscaped(false)
val id = registerSupervisorAsRemoteActor
- if(id.isDefined)
- requestBuilder.setSupervisorUuid(id.get)
+ if (id.isDefined) requestBuilder.setSupervisorUuid(id.get)
// set the source fields used to reply back to the original sender
// (i.e. not the remote proxy actor)
@@ -816,7 +824,7 @@ trait Actor extends TransactionManagement {
RemoteProtocolBuilder.setMessage(message, requestBuilder)
RemoteClient.clientFor(_remoteAddress.get).send(requestBuilder.build, None)
} else {
- val invocation = new MessageInvocation(this, message, None, sender, currentTransaction.get)
+ val invocation = new MessageInvocation(this, message, None, sender, transactionSet.get)
if (_isEventBased) {
_mailbox.add(invocation)
if (_isSuspended) invocation.send
@@ -824,12 +832,18 @@ trait Actor extends TransactionManagement {
else
invocation.send
}
+ clearTransactionSet
}
protected[akka] def postMessageToMailboxAndCreateFutureResultWithTimeout(
message: Any,
timeout: Long,
senderFuture: Option[CompletableFuture]): CompletableFuture = {
+ if (isTransactionSetInScope) {
+ log.trace("Adding transaction for %s with message [%s] to transaction set", toString, message)
+ getTransactionSetInScope.incParties
+ }
+
if (_remoteAddress.isDefined) {
val requestBuilder = RemoteRequest.newBuilder
.setId(RemoteRequestIdFactory.nextId)
@@ -843,16 +857,18 @@ trait Actor extends TransactionManagement {
val id = registerSupervisorAsRemoteActor
if (id.isDefined) requestBuilder.setSupervisorUuid(id.get)
val future = RemoteClient.clientFor(_remoteAddress.get).send(requestBuilder.build, senderFuture)
+ clearTransactionSet
if (future.isDefined) future.get
else throw new IllegalStateException("Expected a future from remote call to actor " + toString)
} else {
val future = if (senderFuture.isDefined) senderFuture.get
else new DefaultCompletableFuture(timeout)
- val invocation = new MessageInvocation(this, message, Some(future), None, currentTransaction.get)
+ val invocation = new MessageInvocation(this, message, Some(future), None, transactionSet.get)
if (_isEventBased) {
_mailbox.add(invocation)
invocation.send
} else invocation.send
+ clearTransactionSet
future
}
}
@@ -872,7 +888,7 @@ trait Actor extends TransactionManagement {
}
private def dispatch[T](messageHandle: MessageInvocation) = {
- setTransaction(messageHandle.tx)
+ setTransactionSet(messageHandle.transactionSet)
val message = messageHandle.message //serializeMessage(messageHandle.message)
senderFuture = messageHandle.future
@@ -894,43 +910,55 @@ trait Actor extends TransactionManagement {
}
private def transactionalDispatch[T](messageHandle: MessageInvocation) = {
- setTransaction(messageHandle.tx)
+ var topLevelTransaction = false
+ val txSet: Option[CountDownCommitBarrier] =
+ if (messageHandle.transactionSet.isDefined) messageHandle.transactionSet
+ else {
+ topLevelTransaction = true // FIXME create a new internal atomic block that can wait for X seconds if top level tx
+ if (isTransactionRequiresNew) {
+ log.trace("Creating a new transaction set (top-level transaction) \nfor actor %s \nwith message %s", toString, messageHandle)
+ Some(createNewTransactionSet)
+ } else None
+ }
+ setTransactionSet(txSet)
val message = messageHandle.message //serializeMessage(messageHandle.message)
senderFuture = messageHandle.future
sender = messageHandle.sender
+ def clearTx = {
+ clearTransactionSet
+ clearTransaction
+ }
+
def proceed = {
- try {
- incrementTransaction
- if (base.isDefinedAt(message)) base(message) // invoke user actor's receive partial function
- else throw new IllegalArgumentException(
- "Actor " + toString + " could not process message [" + message + "]" +
- "\n\tsince no matching 'case' clause in its 'receive' method could be found")
- } finally {
- decrementTransaction
- }
+ if (base.isDefinedAt(message)) base(message) // invoke user actor's receive partial function
+ else throw new IllegalArgumentException(
+ toString + " could not process message [" + message + "]" +
+ "\n\tsince no matching 'case' clause in its 'receive' method could be found")
+ setTransactionSet(txSet) // restore transaction set to allow atomic block to do commit
}
try {
- if (isTransactionRequiresNew && !isTransactionInScope) {
- if (senderFuture.isEmpty) throw new StmException(
- "Can't continue transaction in a one-way fire-forget message send" +
- "\n\tE.g. using Actor '!' method or Active Object 'void' method" +
- "\n\tPlease use the Actor '!!' method or Active Object method with non-void return type")
+ if (isTransactionRequiresNew) {
atomic {
proceed
}
} else proceed
} catch {
+ case e: IllegalStateException => {}
case e =>
+ // abort transaction set
+ if (isTransactionSetInScope) try { getTransactionSetInScope.abort } catch { case e: IllegalStateException => {} }
Actor.log.error(e, "Exception when invoking \n\tactor [%s] \n\twith message [%s]", this, message)
+
if (senderFuture.isDefined) senderFuture.get.completeWithException(this, e)
- clearTransaction // need to clear currentTransaction before call to supervisor
+ clearTx // need to clear currentTransaction before call to supervisor
+
// FIXME to fix supervisor restart of remote actor for oneway calls, inject a supervisor proxy that can send notification back to client
if (_supervisor.isDefined) _supervisor.get ! Exit(this, e)
} finally {
- clearTransaction
+ clearTx
}
}
@@ -1042,6 +1070,5 @@ trait Actor extends TransactionManagement {
that.asInstanceOf[Actor]._uuid == _uuid
}
- override def toString(): String = "Actor[" + id + ":" + uuid + "]"
-
+ override def toString = "Actor[" + id + ":" + uuid + "]"
}
diff --git a/akka-core/src/main/scala/actor/ActorRegistry.scala b/akka-core/src/main/scala/actor/ActorRegistry.scala
index 9e0b1cba08..6db4d0375a 100644
--- a/akka-core/src/main/scala/actor/ActorRegistry.scala
+++ b/akka-core/src/main/scala/actor/ActorRegistry.scala
@@ -8,8 +8,7 @@ import se.scalablesolutions.akka.util.Logging
import scala.collection.mutable.ListBuffer
import scala.reflect.Manifest
-
-import java.util.concurrent.ConcurrentHashMap
+import java.util.concurrent.{CopyOnWriteArrayList, ConcurrentHashMap}
/**
* Registry holding all Actor instances in the whole system.
@@ -23,9 +22,10 @@ import java.util.concurrent.ConcurrentHashMap
* @author Jonas Bonér
*/
object ActorRegistry extends Logging {
- private val actorsByUUID = new ConcurrentHashMap[String, Actor]
- private val actorsById = new ConcurrentHashMap[String, List[Actor]]
- private val actorsByClassName = new ConcurrentHashMap[String, List[Actor]]
+ private val actorsByUUID = new ConcurrentHashMap[String, Actor]
+ private val actorsById = new ConcurrentHashMap[String, List[Actor]]
+ private val actorsByClassName = new ConcurrentHashMap[String, List[Actor]]
+ private val registrationListeners = new CopyOnWriteArrayList[Actor]
/**
* Returns all actors in the system.
@@ -103,6 +103,9 @@ object ActorRegistry extends Logging {
if (actorsByClassName.containsKey(className)) {
actorsByClassName.put(className, actor :: actorsByClassName.get(className))
} else actorsByClassName.put(className, actor :: Nil)
+
+ // notify listeners
+ foreachListener(_.!(ActorRegistered(actor))(None))
}
/**
@@ -112,6 +115,8 @@ object ActorRegistry extends Logging {
actorsByUUID remove actor.uuid
actorsById remove actor.getId
actorsByClassName remove actor.getClass.getName
+ // notify listeners
+ foreachListener(_.!(ActorUnregistered(actor))(None))
}
/**
@@ -125,4 +130,26 @@ object ActorRegistry extends Logging {
actorsByClassName.clear
log.info("All actors have been shut down and unregistered from ActorRegistry")
}
+
+ /**
+ * Adds the registration listener this this registry's listener list.
+ */
+ def addRegistrationListener(listener: Actor) = {
+ registrationListeners.add(listener)
+ }
+
+ /**
+ * Removes the registration listener this this registry's listener list.
+ */
+ def removeRegistrationListener(listener: Actor) = {
+ registrationListeners.remove(listener)
+ }
+
+ private def foreachListener(f: (Actor) => Unit) {
+ val iterator = registrationListeners.iterator
+ while (iterator.hasNext) f(iterator.next)
+ }
}
+
+case class ActorRegistered(actor: Actor)
+case class ActorUnregistered(actor: Actor)
\ No newline at end of file
diff --git a/akka-core/src/main/scala/actor/BootableActorLoaderService.scala b/akka-core/src/main/scala/actor/BootableActorLoaderService.scala
index 1bacbf6f59..5c80620d80 100644
--- a/akka-core/src/main/scala/actor/BootableActorLoaderService.scala
+++ b/akka-core/src/main/scala/actor/BootableActorLoaderService.scala
@@ -7,8 +7,8 @@ package se.scalablesolutions.akka.actor
import java.io.File
import java.net.URLClassLoader
-import se.scalablesolutions.akka.util.{Bootable,Logging}
-import se.scalablesolutions.akka.Config._
+import se.scalablesolutions.akka.util.{Bootable, Logging}
+import se.scalablesolutions.akka.config.Config._
/**
* Handles all modules in the deploy directory (load and unload)
@@ -30,12 +30,8 @@ trait BootableActorLoaderService extends Bootable with Logging {
}
val toDeploy = for (f <- DEPLOY_DIR.listFiles().toArray.toList.asInstanceOf[List[File]]) yield f.toURL
log.info("Deploying applications from [%s]: [%s]", DEPLOY, toDeploy.toArray.toList)
- new URLClassLoader(toDeploy.toArray, ClassLoader.getSystemClassLoader)
- } else if (getClass.getClassLoader.getResourceAsStream("akka.conf") ne null) {
- getClass.getClassLoader
- } else throw new IllegalStateException(
- "AKKA_HOME is not defined and no 'akka.conf' can be found on the classpath, aborting")
- )
+ new URLClassLoader(toDeploy.toArray, getClass.getClassLoader)
+ } else getClass.getClassLoader)
}
abstract override def onLoad = {
@@ -47,4 +43,4 @@ trait BootableActorLoaderService extends Bootable with Logging {
}
abstract override def onUnload = ActorRegistry.shutdownAll
-}
\ No newline at end of file
+}
diff --git a/akka-core/src/main/scala/actor/Scheduler.scala b/akka-core/src/main/scala/actor/Scheduler.scala
index 8205db5843..be23149b61 100644
--- a/akka-core/src/main/scala/actor/Scheduler.scala
+++ b/akka-core/src/main/scala/actor/Scheduler.scala
@@ -17,7 +17,7 @@ import java.util.concurrent._
import se.scalablesolutions.akka.config.ScalaConfig._
import se.scalablesolutions.akka.config.{AllForOneStrategy, OneForOneStrategy, FaultHandlingStrategy}
-import se.scalablesolutions.akka.util.{Logging}
+import se.scalablesolutions.akka.util.Logging
import org.scala_tools.javautils.Imports._
diff --git a/akka-core/src/main/scala/config/Config.scala b/akka-core/src/main/scala/config/Config.scala
index e993573972..ecbdf33d81 100644
--- a/akka-core/src/main/scala/config/Config.scala
+++ b/akka-core/src/main/scala/config/Config.scala
@@ -4,231 +4,71 @@
package se.scalablesolutions.akka.config
-import se.scalablesolutions.akka.actor.Actor
-import se.scalablesolutions.akka.dispatch.MessageDispatcher
+import se.scalablesolutions.akka.util.Logging
-sealed abstract class FaultHandlingStrategy
-case class AllForOneStrategy(maxNrOfRetries: Int, withinTimeRange: Int) extends FaultHandlingStrategy
-case class OneForOneStrategy(maxNrOfRetries: Int, withinTimeRange: Int) extends FaultHandlingStrategy
-
-/**
- * Configuration classes - not to be used as messages.
- *
- * @author Jonas Bonér
- */
-object ScalaConfig {
- sealed abstract class ConfigElement
-
- abstract class Server extends ConfigElement
- abstract class FailOverScheme extends ConfigElement
- abstract class Scope extends ConfigElement
-
- case class SupervisorConfig(restartStrategy: RestartStrategy, worker: List[Server]) extends Server
-
- class Supervise(val actor: Actor, val lifeCycle: LifeCycle, _remoteAddress: RemoteAddress) extends Server {
- val remoteAddress: Option[RemoteAddress] = if (_remoteAddress eq null) None else Some(_remoteAddress)
- }
- object Supervise {
- def apply(actor: Actor, lifeCycle: LifeCycle, remoteAddress: RemoteAddress) = new Supervise(actor, lifeCycle, remoteAddress)
- def apply(actor: Actor, lifeCycle: LifeCycle) = new Supervise(actor, lifeCycle, null)
- def unapply(supervise: Supervise) = Some((supervise.actor, supervise.lifeCycle, supervise.remoteAddress))
- }
-
- case class RestartStrategy(
- scheme: FailOverScheme,
- maxNrOfRetries: Int,
- withinTimeRange: Int,
- trapExceptions: List[Class[_ <: Throwable]]) extends ConfigElement
-
- case object AllForOne extends FailOverScheme
- case object OneForOne extends FailOverScheme
-
- case class LifeCycle(scope: Scope, callbacks: Option[RestartCallbacks]) extends ConfigElement
- object LifeCycle {
- def apply(scope: Scope) = new LifeCycle(scope, None)
- }
- case class RestartCallbacks(preRestart: String, postRestart: String) {
- if ((preRestart eq null) || (postRestart eq null)) throw new IllegalArgumentException("Restart callback methods can't be null")
- }
-
- case object Permanent extends Scope
- case object Temporary extends Scope
-
- case class RemoteAddress(val hostname: String, val port: Int) extends ConfigElement
-
- class Component(_intf: Class[_],
- val target: Class[_],
- val lifeCycle: LifeCycle,
- val timeout: Int,
- val transactionRequired: Boolean,
- _dispatcher: MessageDispatcher, // optional
- _remoteAddress: RemoteAddress // optional
- ) extends Server {
- val intf: Option[Class[_]] = if (_intf eq null) None else Some(_intf)
- val dispatcher: Option[MessageDispatcher] = if (_dispatcher eq null) None else Some(_dispatcher)
- val remoteAddress: Option[RemoteAddress] = if (_remoteAddress eq null) None else Some(_remoteAddress)
- }
- object Component {
- def apply(intf: Class[_], target: Class[_], lifeCycle: LifeCycle, timeout: Int) =
- new Component(intf, target, lifeCycle, timeout, false, null, null)
-
- def apply(target: Class[_], lifeCycle: LifeCycle, timeout: Int) =
- new Component(null, target, lifeCycle, timeout, false, null, null)
-
- def apply(intf: Class[_], target: Class[_], lifeCycle: LifeCycle, timeout: Int, dispatcher: MessageDispatcher) =
- new Component(intf, target, lifeCycle, timeout, false, dispatcher, null)
-
- def apply(target: Class[_], lifeCycle: LifeCycle, timeout: Int, dispatcher: MessageDispatcher) =
- new Component(null, target, lifeCycle, timeout, false, dispatcher, null)
-
- def apply(intf: Class[_], target: Class[_], lifeCycle: LifeCycle, timeout: Int, remoteAddress: RemoteAddress) =
- new Component(intf, target, lifeCycle, timeout, false, null, remoteAddress)
-
- def apply(target: Class[_], lifeCycle: LifeCycle, timeout: Int, remoteAddress: RemoteAddress) =
- new Component(null, target, lifeCycle, timeout, false, null, remoteAddress)
-
- def apply(intf: Class[_], target: Class[_], lifeCycle: LifeCycle, timeout: Int, dispatcher: MessageDispatcher, remoteAddress: RemoteAddress) =
- new Component(intf, target, lifeCycle, timeout, false, dispatcher, remoteAddress)
-
- def apply(target: Class[_], lifeCycle: LifeCycle, timeout: Int, dispatcher: MessageDispatcher, remoteAddress: RemoteAddress) =
- new Component(null, target, lifeCycle, timeout, false, dispatcher, remoteAddress)
-
- def apply(intf: Class[_], target: Class[_], lifeCycle: LifeCycle, timeout: Int, transactionRequired: Boolean) =
- new Component(intf, target, lifeCycle, timeout, transactionRequired, null, null)
-
- def apply(target: Class[_], lifeCycle: LifeCycle, timeout: Int, transactionRequired: Boolean) =
- new Component(null, target, lifeCycle, timeout, transactionRequired, null, null)
-
- def apply(intf: Class[_], target: Class[_], lifeCycle: LifeCycle, timeout: Int, transactionRequired: Boolean, dispatcher: MessageDispatcher) =
- new Component(intf, target, lifeCycle, timeout, transactionRequired, dispatcher, null)
-
- def apply(target: Class[_], lifeCycle: LifeCycle, timeout: Int, transactionRequired: Boolean, dispatcher: MessageDispatcher) =
- new Component(null, target, lifeCycle, timeout, transactionRequired, dispatcher, null)
-
- def apply(intf: Class[_], target: Class[_], lifeCycle: LifeCycle, timeout: Int, transactionRequired: Boolean, remoteAddress: RemoteAddress) =
- new Component(intf, target, lifeCycle, timeout, transactionRequired, null, remoteAddress)
-
- def apply(target: Class[_], lifeCycle: LifeCycle, timeout: Int, transactionRequired: Boolean, remoteAddress: RemoteAddress) =
- new Component(null, target, lifeCycle, timeout, transactionRequired, null, remoteAddress)
-
- def apply(intf: Class[_], target: Class[_], lifeCycle: LifeCycle, timeout: Int, transactionRequired: Boolean, dispatcher: MessageDispatcher, remoteAddress: RemoteAddress) =
- new Component(intf, target, lifeCycle, timeout, transactionRequired, dispatcher, remoteAddress)
-
- def apply(target: Class[_], lifeCycle: LifeCycle, timeout: Int, transactionRequired: Boolean, dispatcher: MessageDispatcher, remoteAddress: RemoteAddress) =
- new Component(null, target, lifeCycle, timeout, transactionRequired, dispatcher, remoteAddress)
- }
-}
+import net.lag.configgy.{Configgy, ParseException}
/**
* @author Jonas Bonér
*/
-object JavaConfig {
- import scala.reflect.BeanProperty
+object Config extends Logging {
+ val VERSION = "0.7-SNAPSHOT"
- sealed abstract class ConfigElement
+ // Set Multiverse options for max speed
+ System.setProperty("org.multiverse.MuliverseConstants.sanityChecks", "false")
+ System.setProperty("org.multiverse.api.GlobalStmInstance.factorymethod", "org.multiverse.stms.alpha.AlphaStm.createFast")
- class RestartStrategy(
- @BeanProperty val scheme: FailOverScheme,
- @BeanProperty val maxNrOfRetries: Int,
- @BeanProperty val withinTimeRange: Int,
- @BeanProperty val trapExceptions: Array[Class[_ <: Throwable]]) extends ConfigElement {
- def transform = se.scalablesolutions.akka.config.ScalaConfig.RestartStrategy(
- scheme.transform, maxNrOfRetries, withinTimeRange, trapExceptions.toList)
+ val HOME = {
+ val systemHome = System.getenv("AKKA_HOME")
+ if (systemHome == null || systemHome.length == 0 || systemHome == ".") {
+ val optionHome = System.getProperty("akka.home", "")
+ if (optionHome.length != 0) Some(optionHome)
+ else None
+ } else Some(systemHome)
}
-
- class LifeCycle(@BeanProperty val scope: Scope, @BeanProperty val callbacks: RestartCallbacks) extends ConfigElement {
- def this(scope: Scope) = this(scope, null)
- def transform = {
- val callbackOption = if (callbacks eq null) None else Some(callbacks.transform)
- se.scalablesolutions.akka.config.ScalaConfig.LifeCycle(scope.transform, callbackOption)
+
+ val config = {
+ if (HOME.isDefined) {
+ try {
+ val configFile = HOME.get + "/config/akka.conf"
+ Configgy.configure(configFile)
+ log.info("AKKA_HOME is defined to [%s], config loaded from [%s].", HOME.get, configFile)
+ } catch {
+ case e: ParseException => throw new IllegalStateException(
+ "'akka.conf' config file can not be found in [" + HOME + "/config/akka.conf] aborting." +
+ "\n\tEither add it in the 'config' directory or add it to the classpath.")
+ }
+ } else if (System.getProperty("akka.config", "") != "") {
+ val configFile = System.getProperty("akka.config", "")
+ try {
+ Configgy.configure(configFile)
+ log.info("Config loaded from -Dakka.config=%s", configFile)
+ } catch {
+ case e: ParseException => throw new IllegalStateException(
+ "Config could not be loaded from -Dakka.config=" + configFile)
+ }
+ } else {
+ try {
+ Configgy.configureFromResource("akka.conf", getClass.getClassLoader)
+ log.info("Config loaded from the application classpath.")
+ } catch {
+ case e: ParseException => throw new IllegalStateException(
+ "\nCan't find 'akka.conf' configuration file." +
+ "\nOne of the three ways of locating the 'akka.conf' file needs to be defined:" +
+ "\n\t1. Define 'AKKA_HOME' environment variable to the root of the Akka distribution." +
+ "\n\t2. Define the '-Dakka.config=...' system property option." +
+ "\n\t3. Put the 'akka.conf' file on the classpath." +
+ "\nI have no way of finding the 'akka.conf' configuration file." +
+ "\nAborting.")
+ }
}
+ Configgy.config
}
- class RestartCallbacks(@BeanProperty val preRestart: String, @BeanProperty val postRestart: String) {
- def transform = se.scalablesolutions.akka.config.ScalaConfig.RestartCallbacks(preRestart, postRestart)
- }
+ val CONFIG_VERSION = config.getString("akka.version", "0")
+ if (VERSION != CONFIG_VERSION) throw new IllegalStateException(
+ "Akka JAR version [" + VERSION + "] is different than the provided config ('akka.conf') version [" + CONFIG_VERSION + "]")
+ val startTime = System.currentTimeMillis
- abstract class Scope extends ConfigElement {
- def transform: se.scalablesolutions.akka.config.ScalaConfig.Scope
- }
- class Permanent extends Scope {
- override def transform = se.scalablesolutions.akka.config.ScalaConfig.Permanent
- }
- class Temporary extends Scope {
- override def transform = se.scalablesolutions.akka.config.ScalaConfig.Temporary
- }
-
- abstract class FailOverScheme extends ConfigElement {
- def transform: se.scalablesolutions.akka.config.ScalaConfig.FailOverScheme
- }
- class AllForOne extends FailOverScheme {
- override def transform = se.scalablesolutions.akka.config.ScalaConfig.AllForOne
- }
- class OneForOne extends FailOverScheme {
- override def transform = se.scalablesolutions.akka.config.ScalaConfig.OneForOne
- }
-
- class RemoteAddress(@BeanProperty val hostname: String, @BeanProperty val port: Int)
-
- abstract class Server extends ConfigElement
- class Component(@BeanProperty val intf: Class[_],
- @BeanProperty val target: Class[_],
- @BeanProperty val lifeCycle: LifeCycle,
- @BeanProperty val timeout: Int,
- @BeanProperty val transactionRequired: Boolean, // optional
- @BeanProperty val dispatcher: MessageDispatcher, // optional
- @BeanProperty val remoteAddress: RemoteAddress // optional
- ) extends Server {
-
- def this(intf: Class[_], target: Class[_], lifeCycle: LifeCycle, timeout: Int) =
- this(intf, target, lifeCycle, timeout, false, null, null)
-
- def this(target: Class[_], lifeCycle: LifeCycle, timeout: Int) =
- this(null, target, lifeCycle, timeout, false, null, null)
-
- def this(intf: Class[_], target: Class[_], lifeCycle: LifeCycle, timeout: Int, remoteAddress: RemoteAddress) =
- this(intf, target, lifeCycle, timeout, false, null, remoteAddress)
-
- def this(target: Class[_], lifeCycle: LifeCycle, timeout: Int, remoteAddress: RemoteAddress) =
- this(null, target, lifeCycle, timeout, false, null, remoteAddress)
-
- def this(intf: Class[_], target: Class[_], lifeCycle: LifeCycle, timeout: Int, dispatcher: MessageDispatcher) =
- this(intf, target, lifeCycle, timeout, false, dispatcher, null)
-
- def this(target: Class[_], lifeCycle: LifeCycle, timeout: Int, dispatcher: MessageDispatcher) =
- this(null, target, lifeCycle, timeout, false, dispatcher, null)
-
- def this(target: Class[_], lifeCycle: LifeCycle, timeout: Int, dispatcher: MessageDispatcher, remoteAddress: RemoteAddress) =
- this(null, target, lifeCycle, timeout, false, dispatcher, remoteAddress)
-
- def this(intf: Class[_], target: Class[_], lifeCycle: LifeCycle, timeout: Int, transactionRequired: Boolean) =
- this(intf, target, lifeCycle, timeout, transactionRequired, null, null)
-
- def this(target: Class[_], lifeCycle: LifeCycle, timeout: Int, transactionRequired: Boolean) =
- this(null, target, lifeCycle, timeout, transactionRequired, null, null)
-
- def this(intf: Class[_], target: Class[_], lifeCycle: LifeCycle, timeout: Int, transactionRequired: Boolean, remoteAddress: RemoteAddress) =
- this(intf, target, lifeCycle, timeout, transactionRequired, null, remoteAddress)
-
- def this(target: Class[_], lifeCycle: LifeCycle, timeout: Int, transactionRequired: Boolean, remoteAddress: RemoteAddress) =
- this(null, target, lifeCycle, timeout, transactionRequired, null, remoteAddress)
-
- def this(intf: Class[_], target: Class[_], lifeCycle: LifeCycle, timeout: Int, transactionRequired: Boolean, dispatcher: MessageDispatcher) =
- this(intf, target, lifeCycle, timeout, transactionRequired, dispatcher, null)
-
- def this(target: Class[_], lifeCycle: LifeCycle, timeout: Int, transactionRequired: Boolean, dispatcher: MessageDispatcher) =
- this(null, target, lifeCycle, timeout, transactionRequired, dispatcher, null)
-
- def this(target: Class[_], lifeCycle: LifeCycle, timeout: Int, transactionRequired: Boolean, dispatcher: MessageDispatcher, remoteAddress: RemoteAddress) =
- this(null, target, lifeCycle, timeout, transactionRequired, dispatcher, remoteAddress)
-
- def transform =
- se.scalablesolutions.akka.config.ScalaConfig.Component(
- intf, target, lifeCycle.transform, timeout, transactionRequired, dispatcher,
- if (remoteAddress ne null) se.scalablesolutions.akka.config.ScalaConfig.RemoteAddress(remoteAddress.hostname, remoteAddress.port) else null)
-
- def newSupervised(actor: Actor) =
- se.scalablesolutions.akka.config.ScalaConfig.Supervise(actor, lifeCycle.transform)
- }
-
-}
\ No newline at end of file
+ def uptime = (System.currentTimeMillis - startTime) / 1000
+}
diff --git a/akka-core/src/main/scala/config/ConfiguratorRepository.scala b/akka-core/src/main/scala/config/ConfiguratorRepository.scala
index 9c12bf4b32..097259164b 100644
--- a/akka-core/src/main/scala/config/ConfiguratorRepository.scala
+++ b/akka-core/src/main/scala/config/ConfiguratorRepository.scala
@@ -6,7 +6,7 @@ package se.scalablesolutions.akka.config
import scala.collection.mutable.HashSet
-import util.Logging
+import se.scalablesolutions.akka.util.Logging
object ConfiguratorRepository extends Logging {
diff --git a/akka-core/src/main/scala/config/SupervisionConfig.scala b/akka-core/src/main/scala/config/SupervisionConfig.scala
new file mode 100644
index 0000000000..e993573972
--- /dev/null
+++ b/akka-core/src/main/scala/config/SupervisionConfig.scala
@@ -0,0 +1,234 @@
+/**
+ * Copyright (C) 2009-2010 Scalable Solutions AB
+ */
+
+package se.scalablesolutions.akka.config
+
+import se.scalablesolutions.akka.actor.Actor
+import se.scalablesolutions.akka.dispatch.MessageDispatcher
+
+sealed abstract class FaultHandlingStrategy
+case class AllForOneStrategy(maxNrOfRetries: Int, withinTimeRange: Int) extends FaultHandlingStrategy
+case class OneForOneStrategy(maxNrOfRetries: Int, withinTimeRange: Int) extends FaultHandlingStrategy
+
+/**
+ * Configuration classes - not to be used as messages.
+ *
+ * @author Jonas Bonér
+ */
+object ScalaConfig {
+ sealed abstract class ConfigElement
+
+ abstract class Server extends ConfigElement
+ abstract class FailOverScheme extends ConfigElement
+ abstract class Scope extends ConfigElement
+
+ case class SupervisorConfig(restartStrategy: RestartStrategy, worker: List[Server]) extends Server
+
+ class Supervise(val actor: Actor, val lifeCycle: LifeCycle, _remoteAddress: RemoteAddress) extends Server {
+ val remoteAddress: Option[RemoteAddress] = if (_remoteAddress eq null) None else Some(_remoteAddress)
+ }
+ object Supervise {
+ def apply(actor: Actor, lifeCycle: LifeCycle, remoteAddress: RemoteAddress) = new Supervise(actor, lifeCycle, remoteAddress)
+ def apply(actor: Actor, lifeCycle: LifeCycle) = new Supervise(actor, lifeCycle, null)
+ def unapply(supervise: Supervise) = Some((supervise.actor, supervise.lifeCycle, supervise.remoteAddress))
+ }
+
+ case class RestartStrategy(
+ scheme: FailOverScheme,
+ maxNrOfRetries: Int,
+ withinTimeRange: Int,
+ trapExceptions: List[Class[_ <: Throwable]]) extends ConfigElement
+
+ case object AllForOne extends FailOverScheme
+ case object OneForOne extends FailOverScheme
+
+ case class LifeCycle(scope: Scope, callbacks: Option[RestartCallbacks]) extends ConfigElement
+ object LifeCycle {
+ def apply(scope: Scope) = new LifeCycle(scope, None)
+ }
+ case class RestartCallbacks(preRestart: String, postRestart: String) {
+ if ((preRestart eq null) || (postRestart eq null)) throw new IllegalArgumentException("Restart callback methods can't be null")
+ }
+
+ case object Permanent extends Scope
+ case object Temporary extends Scope
+
+ case class RemoteAddress(val hostname: String, val port: Int) extends ConfigElement
+
+ class Component(_intf: Class[_],
+ val target: Class[_],
+ val lifeCycle: LifeCycle,
+ val timeout: Int,
+ val transactionRequired: Boolean,
+ _dispatcher: MessageDispatcher, // optional
+ _remoteAddress: RemoteAddress // optional
+ ) extends Server {
+ val intf: Option[Class[_]] = if (_intf eq null) None else Some(_intf)
+ val dispatcher: Option[MessageDispatcher] = if (_dispatcher eq null) None else Some(_dispatcher)
+ val remoteAddress: Option[RemoteAddress] = if (_remoteAddress eq null) None else Some(_remoteAddress)
+ }
+ object Component {
+ def apply(intf: Class[_], target: Class[_], lifeCycle: LifeCycle, timeout: Int) =
+ new Component(intf, target, lifeCycle, timeout, false, null, null)
+
+ def apply(target: Class[_], lifeCycle: LifeCycle, timeout: Int) =
+ new Component(null, target, lifeCycle, timeout, false, null, null)
+
+ def apply(intf: Class[_], target: Class[_], lifeCycle: LifeCycle, timeout: Int, dispatcher: MessageDispatcher) =
+ new Component(intf, target, lifeCycle, timeout, false, dispatcher, null)
+
+ def apply(target: Class[_], lifeCycle: LifeCycle, timeout: Int, dispatcher: MessageDispatcher) =
+ new Component(null, target, lifeCycle, timeout, false, dispatcher, null)
+
+ def apply(intf: Class[_], target: Class[_], lifeCycle: LifeCycle, timeout: Int, remoteAddress: RemoteAddress) =
+ new Component(intf, target, lifeCycle, timeout, false, null, remoteAddress)
+
+ def apply(target: Class[_], lifeCycle: LifeCycle, timeout: Int, remoteAddress: RemoteAddress) =
+ new Component(null, target, lifeCycle, timeout, false, null, remoteAddress)
+
+ def apply(intf: Class[_], target: Class[_], lifeCycle: LifeCycle, timeout: Int, dispatcher: MessageDispatcher, remoteAddress: RemoteAddress) =
+ new Component(intf, target, lifeCycle, timeout, false, dispatcher, remoteAddress)
+
+ def apply(target: Class[_], lifeCycle: LifeCycle, timeout: Int, dispatcher: MessageDispatcher, remoteAddress: RemoteAddress) =
+ new Component(null, target, lifeCycle, timeout, false, dispatcher, remoteAddress)
+
+ def apply(intf: Class[_], target: Class[_], lifeCycle: LifeCycle, timeout: Int, transactionRequired: Boolean) =
+ new Component(intf, target, lifeCycle, timeout, transactionRequired, null, null)
+
+ def apply(target: Class[_], lifeCycle: LifeCycle, timeout: Int, transactionRequired: Boolean) =
+ new Component(null, target, lifeCycle, timeout, transactionRequired, null, null)
+
+ def apply(intf: Class[_], target: Class[_], lifeCycle: LifeCycle, timeout: Int, transactionRequired: Boolean, dispatcher: MessageDispatcher) =
+ new Component(intf, target, lifeCycle, timeout, transactionRequired, dispatcher, null)
+
+ def apply(target: Class[_], lifeCycle: LifeCycle, timeout: Int, transactionRequired: Boolean, dispatcher: MessageDispatcher) =
+ new Component(null, target, lifeCycle, timeout, transactionRequired, dispatcher, null)
+
+ def apply(intf: Class[_], target: Class[_], lifeCycle: LifeCycle, timeout: Int, transactionRequired: Boolean, remoteAddress: RemoteAddress) =
+ new Component(intf, target, lifeCycle, timeout, transactionRequired, null, remoteAddress)
+
+ def apply(target: Class[_], lifeCycle: LifeCycle, timeout: Int, transactionRequired: Boolean, remoteAddress: RemoteAddress) =
+ new Component(null, target, lifeCycle, timeout, transactionRequired, null, remoteAddress)
+
+ def apply(intf: Class[_], target: Class[_], lifeCycle: LifeCycle, timeout: Int, transactionRequired: Boolean, dispatcher: MessageDispatcher, remoteAddress: RemoteAddress) =
+ new Component(intf, target, lifeCycle, timeout, transactionRequired, dispatcher, remoteAddress)
+
+ def apply(target: Class[_], lifeCycle: LifeCycle, timeout: Int, transactionRequired: Boolean, dispatcher: MessageDispatcher, remoteAddress: RemoteAddress) =
+ new Component(null, target, lifeCycle, timeout, transactionRequired, dispatcher, remoteAddress)
+ }
+}
+
+/**
+ * @author Jonas Bonér
+ */
+object JavaConfig {
+ import scala.reflect.BeanProperty
+
+ sealed abstract class ConfigElement
+
+ class RestartStrategy(
+ @BeanProperty val scheme: FailOverScheme,
+ @BeanProperty val maxNrOfRetries: Int,
+ @BeanProperty val withinTimeRange: Int,
+ @BeanProperty val trapExceptions: Array[Class[_ <: Throwable]]) extends ConfigElement {
+ def transform = se.scalablesolutions.akka.config.ScalaConfig.RestartStrategy(
+ scheme.transform, maxNrOfRetries, withinTimeRange, trapExceptions.toList)
+ }
+
+ class LifeCycle(@BeanProperty val scope: Scope, @BeanProperty val callbacks: RestartCallbacks) extends ConfigElement {
+ def this(scope: Scope) = this(scope, null)
+ def transform = {
+ val callbackOption = if (callbacks eq null) None else Some(callbacks.transform)
+ se.scalablesolutions.akka.config.ScalaConfig.LifeCycle(scope.transform, callbackOption)
+ }
+ }
+
+ class RestartCallbacks(@BeanProperty val preRestart: String, @BeanProperty val postRestart: String) {
+ def transform = se.scalablesolutions.akka.config.ScalaConfig.RestartCallbacks(preRestart, postRestart)
+ }
+
+ abstract class Scope extends ConfigElement {
+ def transform: se.scalablesolutions.akka.config.ScalaConfig.Scope
+ }
+ class Permanent extends Scope {
+ override def transform = se.scalablesolutions.akka.config.ScalaConfig.Permanent
+ }
+ class Temporary extends Scope {
+ override def transform = se.scalablesolutions.akka.config.ScalaConfig.Temporary
+ }
+
+ abstract class FailOverScheme extends ConfigElement {
+ def transform: se.scalablesolutions.akka.config.ScalaConfig.FailOverScheme
+ }
+ class AllForOne extends FailOverScheme {
+ override def transform = se.scalablesolutions.akka.config.ScalaConfig.AllForOne
+ }
+ class OneForOne extends FailOverScheme {
+ override def transform = se.scalablesolutions.akka.config.ScalaConfig.OneForOne
+ }
+
+ class RemoteAddress(@BeanProperty val hostname: String, @BeanProperty val port: Int)
+
+ abstract class Server extends ConfigElement
+ class Component(@BeanProperty val intf: Class[_],
+ @BeanProperty val target: Class[_],
+ @BeanProperty val lifeCycle: LifeCycle,
+ @BeanProperty val timeout: Int,
+ @BeanProperty val transactionRequired: Boolean, // optional
+ @BeanProperty val dispatcher: MessageDispatcher, // optional
+ @BeanProperty val remoteAddress: RemoteAddress // optional
+ ) extends Server {
+
+ def this(intf: Class[_], target: Class[_], lifeCycle: LifeCycle, timeout: Int) =
+ this(intf, target, lifeCycle, timeout, false, null, null)
+
+ def this(target: Class[_], lifeCycle: LifeCycle, timeout: Int) =
+ this(null, target, lifeCycle, timeout, false, null, null)
+
+ def this(intf: Class[_], target: Class[_], lifeCycle: LifeCycle, timeout: Int, remoteAddress: RemoteAddress) =
+ this(intf, target, lifeCycle, timeout, false, null, remoteAddress)
+
+ def this(target: Class[_], lifeCycle: LifeCycle, timeout: Int, remoteAddress: RemoteAddress) =
+ this(null, target, lifeCycle, timeout, false, null, remoteAddress)
+
+ def this(intf: Class[_], target: Class[_], lifeCycle: LifeCycle, timeout: Int, dispatcher: MessageDispatcher) =
+ this(intf, target, lifeCycle, timeout, false, dispatcher, null)
+
+ def this(target: Class[_], lifeCycle: LifeCycle, timeout: Int, dispatcher: MessageDispatcher) =
+ this(null, target, lifeCycle, timeout, false, dispatcher, null)
+
+ def this(target: Class[_], lifeCycle: LifeCycle, timeout: Int, dispatcher: MessageDispatcher, remoteAddress: RemoteAddress) =
+ this(null, target, lifeCycle, timeout, false, dispatcher, remoteAddress)
+
+ def this(intf: Class[_], target: Class[_], lifeCycle: LifeCycle, timeout: Int, transactionRequired: Boolean) =
+ this(intf, target, lifeCycle, timeout, transactionRequired, null, null)
+
+ def this(target: Class[_], lifeCycle: LifeCycle, timeout: Int, transactionRequired: Boolean) =
+ this(null, target, lifeCycle, timeout, transactionRequired, null, null)
+
+ def this(intf: Class[_], target: Class[_], lifeCycle: LifeCycle, timeout: Int, transactionRequired: Boolean, remoteAddress: RemoteAddress) =
+ this(intf, target, lifeCycle, timeout, transactionRequired, null, remoteAddress)
+
+ def this(target: Class[_], lifeCycle: LifeCycle, timeout: Int, transactionRequired: Boolean, remoteAddress: RemoteAddress) =
+ this(null, target, lifeCycle, timeout, transactionRequired, null, remoteAddress)
+
+ def this(intf: Class[_], target: Class[_], lifeCycle: LifeCycle, timeout: Int, transactionRequired: Boolean, dispatcher: MessageDispatcher) =
+ this(intf, target, lifeCycle, timeout, transactionRequired, dispatcher, null)
+
+ def this(target: Class[_], lifeCycle: LifeCycle, timeout: Int, transactionRequired: Boolean, dispatcher: MessageDispatcher) =
+ this(null, target, lifeCycle, timeout, transactionRequired, dispatcher, null)
+
+ def this(target: Class[_], lifeCycle: LifeCycle, timeout: Int, transactionRequired: Boolean, dispatcher: MessageDispatcher, remoteAddress: RemoteAddress) =
+ this(null, target, lifeCycle, timeout, transactionRequired, dispatcher, remoteAddress)
+
+ def transform =
+ se.scalablesolutions.akka.config.ScalaConfig.Component(
+ intf, target, lifeCycle.transform, timeout, transactionRequired, dispatcher,
+ if (remoteAddress ne null) se.scalablesolutions.akka.config.ScalaConfig.RemoteAddress(remoteAddress.hostname, remoteAddress.port) else null)
+
+ def newSupervised(actor: Actor) =
+ se.scalablesolutions.akka.config.ScalaConfig.Supervise(actor, lifeCycle.transform)
+ }
+
+}
\ No newline at end of file
diff --git a/akka-core/src/main/scala/dispatch/ExecutorBasedEventDrivenDispatcher.scala b/akka-core/src/main/scala/dispatch/ExecutorBasedEventDrivenDispatcher.scala
index e115800d4b..b48e7717cf 100644
--- a/akka-core/src/main/scala/dispatch/ExecutorBasedEventDrivenDispatcher.scala
+++ b/akka-core/src/main/scala/dispatch/ExecutorBasedEventDrivenDispatcher.scala
@@ -57,18 +57,29 @@ class ExecutorBasedEventDrivenDispatcher(_name: String) extends MessageDispatche
@volatile private var active: Boolean = false
val name: String = "event-driven:executor:dispatcher:" + _name
- init
-
+ init
+
def dispatch(invocation: MessageInvocation) = if (active) {
executor.execute(new Runnable() {
def run = {
- invocation.receiver.synchronized {
- var messageInvocation = invocation.receiver._mailbox.poll
- while (messageInvocation != null) {
- messageInvocation.invoke
- messageInvocation = invocation.receiver._mailbox.poll
+ var lockAcquiredOnce = false
+ // this do-wile loop is required to prevent missing new messages between the end of the inner while
+ // loop and releasing the lock
+ do {
+ if (invocation.receiver._dispatcherLock.tryLock) {
+ lockAcquiredOnce = true
+ try {
+ // Only dispatch if we got the lock. Otherwise another thread is already dispatching.
+ var messageInvocation = invocation.receiver._mailbox.poll
+ while (messageInvocation != null) {
+ messageInvocation.invoke
+ messageInvocation = invocation.receiver._mailbox.poll
+ }
+ } finally {
+ invocation.receiver._dispatcherLock.unlock
+ }
}
- }
+ } while ((lockAcquiredOnce && !invocation.receiver._mailbox.isEmpty))
}
})
} else throw new IllegalStateException("Can't submit invocations to dispatcher since it's not started")
@@ -88,4 +99,4 @@ class ExecutorBasedEventDrivenDispatcher(_name: String) extends MessageDispatche
"Can't build a new thread pool for a dispatcher that is already up and running")
private[akka] def init = withNewThreadPoolWithLinkedBlockingQueueWithUnboundedCapacity.buildThreadPool
-}
\ No newline at end of file
+}
diff --git a/akka-core/src/main/scala/dispatch/Future.scala b/akka-core/src/main/scala/dispatch/Future.scala
index 0dcc0f850c..0bf9723e31 100644
--- a/akka-core/src/main/scala/dispatch/Future.scala
+++ b/akka-core/src/main/scala/dispatch/Future.scala
@@ -13,6 +13,7 @@ class FutureTimeoutException(message: String) extends RuntimeException(message)
object Futures {
/**
+ * FIXME document
*
* val future = Futures.future(1000) {
* ... // do stuff
diff --git a/akka-core/src/main/scala/dispatch/Reactor.scala b/akka-core/src/main/scala/dispatch/Reactor.scala
index bf8254c64a..627d27aeac 100644
--- a/akka-core/src/main/scala/dispatch/Reactor.scala
+++ b/akka-core/src/main/scala/dispatch/Reactor.scala
@@ -7,16 +7,17 @@ package se.scalablesolutions.akka.dispatch
import java.util.List
import se.scalablesolutions.akka.util.{HashCode, Logging}
-import se.scalablesolutions.akka.stm.Transaction
import se.scalablesolutions.akka.actor.Actor
import java.util.concurrent.ConcurrentHashMap
+import org.multiverse.commitbarriers.CountDownCommitBarrier
+
final class MessageInvocation(val receiver: Actor,
val message: Any,
val future: Option[CompletableFuture],
val sender: Option[Actor],
- val tx: Option[Transaction]) {
+ val transactionSet: Option[CountDownCommitBarrier]) {
if (receiver eq null) throw new IllegalArgumentException("receiver is null")
def invoke = receiver.invoke(this)
@@ -37,13 +38,13 @@ final class MessageInvocation(val receiver: Actor,
that.asInstanceOf[MessageInvocation].message == message
}
- override def toString(): String = synchronized {
+ override def toString = synchronized {
"MessageInvocation[" +
"\n\tmessage = " + message +
"\n\treceiver = " + receiver +
"\n\tsender = " + sender +
"\n\tfuture = " + future +
- "\n\ttx = " + tx +
+ "\n\ttransactionSet = " + transactionSet +
"\n]"
}
}
diff --git a/akka-core/src/main/scala/remote/BootableRemoteActorService.scala b/akka-core/src/main/scala/remote/BootableRemoteActorService.scala
index 1c31c3025c..8aaec0661b 100644
--- a/akka-core/src/main/scala/remote/BootableRemoteActorService.scala
+++ b/akka-core/src/main/scala/remote/BootableRemoteActorService.scala
@@ -5,8 +5,8 @@
package se.scalablesolutions.akka.remote
import se.scalablesolutions.akka.actor.BootableActorLoaderService
-import se.scalablesolutions.akka.util.{Bootable,Logging}
-import se.scalablesolutions.akka.Config.config
+import se.scalablesolutions.akka.util.{Bootable, Logging}
+import se.scalablesolutions.akka.config.Config.config
/**
* This bundle/service is responsible for booting up and shutting down the remote actors facility
@@ -23,22 +23,19 @@ trait BootableRemoteActorService extends Bootable with Logging {
def startRemoteService = remoteServerThread.start
abstract override def onLoad = {
+ super.onLoad //Initialize BootableActorLoaderService before remote service
if(config.getBool("akka.remote.server.service", true)){
- log.info("Starting up Cluster Service")
- Cluster.start
- super.onLoad //Initialize BootableActorLoaderService before remote service
+
+ if(config.getBool("akka.remote.cluster.service", true))
+ Cluster.start(self.applicationLoader)
+
log.info("Initializing Remote Actors Service...")
startRemoteService
log.info("Remote Actors Service initialized!")
}
- else
- super.onLoad
-
}
abstract override def onUnload = {
- super.onUnload
-
log.info("Shutting down Remote Actors Service")
RemoteNode.shutdown
@@ -50,6 +47,8 @@ trait BootableRemoteActorService extends Bootable with Logging {
Cluster.shutdown
log.info("Remote Actors Service has been shut down")
+
+ super.onUnload
}
-}
\ No newline at end of file
+}
diff --git a/akka-core/src/main/scala/remote/Cluster.scala b/akka-core/src/main/scala/remote/Cluster.scala
index 4313cfe98c..4a1d6012a7 100644
--- a/akka-core/src/main/scala/remote/Cluster.scala
+++ b/akka-core/src/main/scala/remote/Cluster.scala
@@ -4,7 +4,7 @@
package se.scalablesolutions.akka.remote
-import se.scalablesolutions.akka.Config.config
+import se.scalablesolutions.akka.config.Config.config
import se.scalablesolutions.akka.config.ScalaConfig._
import se.scalablesolutions.akka.serialization.Serializer
import se.scalablesolutions.akka.actor.{Supervisor, SupervisorFactory, Actor, ActorRegistry}
@@ -17,17 +17,43 @@ import scala.collection.immutable.{Map, HashMap}
* @author Viktor Klang
*/
trait Cluster {
+
+ /**
+ * Specifies the cluster name
+ */
def name: String
+ /**
+ * Adds the specified hostname + port as a local node
+ * This information will be propagated to other nodes in the cluster
+ * and will be available at the other nodes through lookup and foreach
+ */
def registerLocalNode(hostname: String, port: Int): Unit
+ /**
+ * Removes the specified hostname + port from the local node
+ * This information will be propagated to other nodes in the cluster
+ * and will no longer be available at the other nodes through lookup and foreach
+ */
def deregisterLocalNode(hostname: String, port: Int): Unit
+ /**
+ * Sends the message to all Actors of the specified type on all other nodes in the cluster
+ */
def relayMessage(to: Class[_ <: Actor], msg: AnyRef): Unit
+ /**
+ * Traverses all known remote addresses avaiable at all other nodes in the cluster
+ * and applies the given PartialFunction on the first address that it's defined at
+ * The order of application is undefined and may vary
+ */
def lookup[T](pf: PartialFunction[RemoteAddress, T]): Option[T]
-
- def foreach(f : (RemoteAddress) => Unit) : Unit
+
+ /**
+ * Applies the specified function to all known remote addresses on al other nodes in the cluster
+ * The order of application is undefined and may vary
+ */
+ def foreach(f: (RemoteAddress) => Unit): Unit
}
/**
@@ -37,6 +63,10 @@ trait Cluster {
*/
trait ClusterActor extends Actor with Cluster {
val name = config.getString("akka.remote.cluster.name") getOrElse "default"
+
+ @volatile protected var serializer : Serializer = _
+
+ private[remote] def setSerializer(s : Serializer) : Unit = serializer = s
}
/**
@@ -44,20 +74,20 @@ trait ClusterActor extends Actor with Cluster {
*
* @author Viktor Klang
*/
-private[remote] object ClusterActor {
+private[akka] object ClusterActor {
sealed trait ClusterMessage
- private[remote] case class RelayedMessage(actorClassFQN: String, msg: AnyRef) extends ClusterMessage
- private[remote] case class Message[ADDR_T](sender : ADDR_T,msg : Array[Byte])
- private[remote] case object PapersPlease extends ClusterMessage
- private[remote] case class Papers(addresses: List[RemoteAddress]) extends ClusterMessage
- private[remote] case object Block extends ClusterMessage
- private[remote] case object Unblock extends ClusterMessage
- private[remote] case class View[ADDR_T](othersPresent : Set[ADDR_T]) extends ClusterMessage
- private[remote] case class Zombie[ADDR_T](address: ADDR_T) extends ClusterMessage
- private[remote] case class RegisterLocalNode(server: RemoteAddress) extends ClusterMessage
- private[remote] case class DeregisterLocalNode(server: RemoteAddress) extends ClusterMessage
- private[remote] case class Node(endpoints: List[RemoteAddress])
+ private[akka] case class RelayedMessage(actorClassFQN: String, msg: AnyRef) extends ClusterMessage
+ private[akka] case class Message[ADDR_T](sender: ADDR_T, msg: Array[Byte])
+ private[akka] case object PapersPlease extends ClusterMessage
+ private[akka] case class Papers(addresses: List[RemoteAddress]) extends ClusterMessage
+ private[akka] case object Block extends ClusterMessage
+ private[akka] case object Unblock extends ClusterMessage
+ private[akka] case class View[ADDR_T](othersPresent: Set[ADDR_T]) extends ClusterMessage
+ private[akka] case class Zombie[ADDR_T](address: ADDR_T) extends ClusterMessage
+ private[akka] case class RegisterLocalNode(server: RemoteAddress) extends ClusterMessage
+ private[akka] case class DeregisterLocalNode(server: RemoteAddress) extends ClusterMessage
+ private[akka] case class Node(endpoints: List[RemoteAddress])
}
/**
@@ -67,72 +97,70 @@ private[remote] object ClusterActor {
*/
abstract class BasicClusterActor extends ClusterActor {
import ClusterActor._
-
type ADDR_T
-
@volatile private var local: Node = Node(Nil)
@volatile private var remotes: Map[ADDR_T, Node] = Map()
override def init = {
- remotes = new HashMap[ADDR_T, Node]
+ remotes = new HashMap[ADDR_T, Node]
}
override def shutdown = {
- remotes = Map()
+ remotes = Map()
}
def receive = {
- case v : View[ADDR_T] => {
+ case v: View[ADDR_T] => {
// Not present in the cluster anymore = presumably zombies
// Nodes we have no prior knowledge existed = unknowns
val zombies = Set[ADDR_T]() ++ remotes.keySet -- v.othersPresent
val unknown = v.othersPresent -- remotes.keySet
log debug ("Updating view")
- log debug ("Other memebers: [%s]",v.othersPresent)
- log debug ("Zombies: [%s]",zombies)
- log debug ("Unknowns: [%s]",unknown)
+ log debug ("Other memebers: [%s]", v.othersPresent)
+ log debug ("Zombies: [%s]", zombies)
+ log debug ("Unknowns: [%s]", unknown)
// Tell the zombies and unknowns to provide papers and prematurely treat the zombies as dead
broadcast(zombies ++ unknown, PapersPlease)
remotes = remotes -- zombies
}
- case z : Zombie[ADDR_T] => { //Ask the presumed zombie for papers and prematurely treat it as dead
+ case z: Zombie[ADDR_T] => { //Ask the presumed zombie for papers and prematurely treat it as dead
log debug ("Killing Zombie Node: %s", z.address)
broadcast(z.address :: Nil, PapersPlease)
remotes = remotes - z.address
}
- case rm @ RelayedMessage(_, _) => {
+ case rm@RelayedMessage(_, _) => {
log debug ("Relaying message: %s", rm)
broadcast(rm)
}
- case m : Message[ADDR_T] => {
- val (src,msg) = (m.sender,m.msg)
- (Cluster.serializer in (msg, None)) match {
+ case m: Message[ADDR_T] => {
+ val (src, msg) = (m.sender, m.msg)
+ (serializer in (msg, None)) match {
- case PapersPlease => {
- log debug ("Asked for papers by %s", src)
- broadcast(src :: Nil, Papers(local.endpoints))
+ case PapersPlease => {
+ log debug ("Asked for papers by %s", src)
+ broadcast(src :: Nil, Papers(local.endpoints))
- if (remotes.get(src).isEmpty) // If we were asked for papers from someone we don't know, ask them!
- broadcast(src :: Nil, PapersPlease)
- }
-
- case Papers(x) => remotes = remotes + (src -> Node(x))
-
- case RelayedMessage(c, m) => ActorRegistry.actorsFor(c).foreach(_ send m)
-
- case unknown => log debug ("Unknown message: %s", unknown.toString)
+ if (remotes.get(src).isEmpty) // If we were asked for papers from someone we don't know, ask them!
+ broadcast(src :: Nil, PapersPlease)
}
+
+ case Papers(x) => remotes = remotes + (src -> Node(x))
+
+ case RelayedMessage(c, m) => ActorRegistry.actorsFor(c).foreach(_ send m)
+
+ case unknown => log debug ("Unknown message: %s", unknown.toString)
+ }
}
case RegisterLocalNode(s) => {
log debug ("RegisterLocalNode: %s", s)
- local = Node(local.endpoints + s)
+ local = Node(s :: local.endpoints)
broadcast(Papers(local.endpoints))
}
@@ -146,20 +174,20 @@ abstract class BasicClusterActor extends ClusterActor {
/**
* Implement this in a subclass to add node-to-node messaging
*/
- protected def toOneNode(dest : ADDR_T, msg : Array[Byte]) : Unit
+ protected def toOneNode(dest: ADDR_T, msg: Array[Byte]): Unit
/**
* Implement this in a subclass to add node-to-many-nodes messaging
*/
- protected def toAllNodes(msg : Array[Byte]) : Unit
+ protected def toAllNodes(msg: Array[Byte]): Unit
/**
* Sends the specified message to the given recipients using the serializer
* that's been set in the akka-conf
*/
protected def broadcast[T <: AnyRef](recipients: Iterable[ADDR_T], msg: T): Unit = {
- lazy val m = Cluster.serializer out msg
- for (r <- recipients) toOneNode(r,m)
+ lazy val m = serializer out msg
+ for (r <- recipients) toOneNode(r, m)
}
/**
@@ -167,18 +195,18 @@ abstract class BasicClusterActor extends ClusterActor {
* that's been set in the akka-conf
*/
protected def broadcast[T <: AnyRef](msg: T): Unit =
- if (!remotes.isEmpty) toAllNodes(Cluster.serializer out msg)
+ if (!remotes.isEmpty) toAllNodes(serializer out msg)
/**
* Applies the given PartialFunction to all known RemoteAddresses
*/
def lookup[T](handleRemoteAddress: PartialFunction[RemoteAddress, T]): Option[T] =
remotes.values.toList.flatMap(_.endpoints).find(handleRemoteAddress isDefinedAt _).map(handleRemoteAddress)
-
+
/**
* Applies the given function to all remote addresses known
*/
- def foreach(f : (RemoteAddress) => Unit) : Unit = remotes.values.toList.flatMap(_.endpoints).foreach(f)
+ def foreach(f: (RemoteAddress) => Unit): Unit = remotes.values.toList.flatMap(_.endpoints).foreach(f)
/**
* Registers a local endpoint
@@ -205,28 +233,31 @@ abstract class BasicClusterActor extends ClusterActor {
* Loads a specified ClusterActor and delegates to that instance.
*/
object Cluster extends Cluster with Logging {
- @volatile private[remote] var clusterActor: Option[ClusterActor] = None
- @volatile private[remote] var supervisor: Option[Supervisor] = None
-
- private[remote] lazy val serializer: Serializer = {
- val className = config.getString("akka.remote.cluster.serializer", Serializer.Java.getClass.getName)
- Class.forName(className).newInstance.asInstanceOf[Serializer]
- }
+ lazy val DEFAULT_SERIALIZER_CLASS_NAME = Serializer.Java.getClass.getName
- private[remote] def createClusterActor : Option[ClusterActor] = {
+ @volatile private[remote] var clusterActor: Option[ClusterActor] = None
+
+ private[remote] def createClusterActor(loader : ClassLoader): Option[ClusterActor] = {
val name = config.getString("akka.remote.cluster.actor")
-
+ if (name.isEmpty) throw new IllegalArgumentException(
+ "Can't start cluster since the 'akka.remote.cluster.actor' configuration option is not defined")
+
+ val serializer = Class.forName(config.getString("akka.remote.cluster.serializer", DEFAULT_SERIALIZER_CLASS_NAME)).newInstance.asInstanceOf[Serializer]
+ serializer.classLoader = Some(loader)
try {
- name map { fqn =>
- Class.forName(fqn).newInstance.asInstanceOf[ClusterActor]
+ name map {
+ fqn =>
+ val a = Class.forName(fqn).newInstance.asInstanceOf[ClusterActor]
+ a setSerializer serializer
+ a
}
}
catch {
- case e => log.error(e,"Couldn't load Cluster provider: [%s]",name.getOrElse("Not specified")); None
+ case e => log.error(e, "Couldn't load Cluster provider: [%s]", name.getOrElse("Not specified")); None
}
}
- private[remote] def createSupervisor(actor : ClusterActor) : Option[Supervisor] = {
+ private[akka] def createSupervisor(actor: ClusterActor): Option[Supervisor] = {
val sup = SupervisorFactory(
SupervisorConfig(
RestartStrategy(OneForOne, 5, 1000, List(classOf[Exception])),
@@ -245,23 +276,28 @@ object Cluster extends Cluster with Logging {
def deregisterLocalNode(hostname: String, port: Int): Unit = clusterActor.foreach(_.deregisterLocalNode(hostname, port))
def relayMessage(to: Class[_ <: Actor], msg: AnyRef): Unit = clusterActor.foreach(_.relayMessage(to, msg))
-
- def foreach(f : (RemoteAddress) => Unit) : Unit = clusterActor.foreach(_.foreach(f))
- def start : Unit = synchronized {
- if(supervisor.isEmpty) {
- for(actor <- createClusterActor;
- sup <- createSupervisor(actor)) {
- clusterActor = Some(actor)
- supervisor = Some(sup)
- sup.start
+ def foreach(f: (RemoteAddress) => Unit): Unit = clusterActor.foreach(_.foreach(f))
+
+ def start: Unit = start(None)
+
+ def start(serializerClassLoader : Option[ClassLoader]): Unit = synchronized {
+ log.info("Starting up Cluster Service...")
+ if (clusterActor.isEmpty) {
+ for{ actor <- createClusterActor(serializerClassLoader getOrElse getClass.getClassLoader)
+ sup <- createSupervisor(actor) } {
+ clusterActor = Some(actor)
+ sup.start
}
}
}
- def shutdown : Unit = synchronized {
- supervisor.foreach(_.stop)
- supervisor = None
+ def shutdown: Unit = synchronized {
+ log.info("Shutting down Cluster Service...")
+ for{
+ c <- clusterActor
+ s <- c._supervisor
+ } s.stop
clusterActor = None
}
}
diff --git a/akka-core/src/main/scala/remote/RemoteClient.scala b/akka-core/src/main/scala/remote/RemoteClient.scala
index 0887ebcd82..ec3d837c01 100644
--- a/akka-core/src/main/scala/remote/RemoteClient.scala
+++ b/akka-core/src/main/scala/remote/RemoteClient.scala
@@ -8,7 +8,7 @@ import se.scalablesolutions.akka.remote.protobuf.RemoteProtocol.{RemoteRequest,
import se.scalablesolutions.akka.actor.{Exit, Actor}
import se.scalablesolutions.akka.dispatch.{DefaultCompletableFuture, CompletableFuture}
import se.scalablesolutions.akka.util.{UUID, Logging}
-import se.scalablesolutions.akka.Config.config
+import se.scalablesolutions.akka.config.Config.config
import org.jboss.netty.channel._
import group.DefaultChannelGroup
diff --git a/akka-core/src/main/scala/remote/RemoteProtocolBuilder.scala b/akka-core/src/main/scala/remote/RemoteProtocolBuilder.scala
index 287168140a..bfeec1c34e 100644
--- a/akka-core/src/main/scala/remote/RemoteProtocolBuilder.scala
+++ b/akka-core/src/main/scala/remote/RemoteProtocolBuilder.scala
@@ -18,19 +18,17 @@ object RemoteProtocolBuilder {
private var SERIALIZER_PROTOBUF: Serializer.Protobuf = Serializer.Protobuf
- def setClassLoader(classLoader: ClassLoader) = {
- SERIALIZER_JAVA = new Serializer.Java
- SERIALIZER_JAVA_JSON = new Serializer.JavaJSON
- SERIALIZER_SCALA_JSON = new Serializer.ScalaJSON
- SERIALIZER_JAVA.setClassLoader(classLoader)
- SERIALIZER_JAVA_JSON.setClassLoader(classLoader)
- SERIALIZER_SCALA_JSON.setClassLoader(classLoader)
+ def setClassLoader(cl: ClassLoader) = {
+ SERIALIZER_JAVA.classLoader = Some(cl)
+ SERIALIZER_JAVA_JSON.classLoader = Some(cl)
+ SERIALIZER_SCALA_JSON.classLoader = Some(cl)
}
def getMessage(request: RemoteRequest): Any = {
request.getProtocol match {
case SerializationProtocol.SBINARY =>
- val renderer = Class.forName(new String(request.getMessageManifest.toByteArray)).newInstance.asInstanceOf[SBinary[_ <: AnyRef]]
+ val renderer = Class.forName(
+ new String(request.getMessageManifest.toByteArray)).newInstance.asInstanceOf[SBinary[_ <: AnyRef]]
renderer.fromBytes(request.getMessage.toByteArray)
case SerializationProtocol.SCALA_JSON =>
val manifest = SERIALIZER_JAVA.in(request.getMessageManifest.toByteArray, None).asInstanceOf[String]
diff --git a/akka-core/src/main/scala/remote/RemoteServer.scala b/akka-core/src/main/scala/remote/RemoteServer.scala
index 02cf98bcd2..8a40049fea 100644
--- a/akka-core/src/main/scala/remote/RemoteServer.scala
+++ b/akka-core/src/main/scala/remote/RemoteServer.scala
@@ -12,7 +12,7 @@ import java.util.{Map => JMap}
import se.scalablesolutions.akka.actor._
import se.scalablesolutions.akka.util._
import se.scalablesolutions.akka.remote.protobuf.RemoteProtocol.{RemoteReply, RemoteRequest}
-import se.scalablesolutions.akka.Config.config
+import se.scalablesolutions.akka.config.Config.config
import org.jboss.netty.bootstrap.ServerBootstrap
import org.jboss.netty.channel._
@@ -58,7 +58,7 @@ object RemoteNode extends RemoteServer
*/
object RemoteServer {
val HOSTNAME = config.getString("akka.remote.server.hostname", "localhost")
- val PORT = config.getInt("akka.remote.server.port", 9966)
+ val PORT = config.getInt("akka.remote.server.port", 9999)
val CONNECTION_TIMEOUT_MILLIS = config.getInt("akka.remote.server.connection-timeout", 1000)
diff --git a/akka-core/src/main/scala/serialization/Serializable.scala b/akka-core/src/main/scala/serialization/Serializable.scala
index b9a3cf5927..b5998cfb2e 100644
--- a/akka-core/src/main/scala/serialization/Serializable.scala
+++ b/akka-core/src/main/scala/serialization/Serializable.scala
@@ -5,10 +5,15 @@
package se.scalablesolutions.akka.serialization
import org.codehaus.jackson.map.ObjectMapper
+
import com.google.protobuf.Message
-import reflect.Manifest
+
+import scala.reflect.Manifest
+
import sbinary.DefaultProtocol
+
import java.io.{StringWriter, ByteArrayOutputStream, ObjectOutputStream}
+
import sjson.json.{Serializer=>SJSONSerializer}
object SerializationProtocol {
diff --git a/akka-core/src/main/scala/serialization/Serializer.scala b/akka-core/src/main/scala/serialization/Serializer.scala
index 3eb9315126..c878548711 100644
--- a/akka-core/src/main/scala/serialization/Serializer.scala
+++ b/akka-core/src/main/scala/serialization/Serializer.scala
@@ -18,8 +18,12 @@ import sjson.json.{Serializer => SJSONSerializer}
* @author Jonas Bonér
*/
trait Serializer {
+ var classLoader: Option[ClassLoader] = None
+
def deepClone(obj: AnyRef): AnyRef
+
def out(obj: AnyRef): Array[Byte]
+
def in(bytes: Array[Byte], clazz: Option[Class[_]]): AnyRef
}
@@ -51,11 +55,7 @@ object Serializer {
* @author Jonas Bonér
*/
object Java extends Java
- class Java extends Serializer {
- private var classLoader: Option[ClassLoader] = None
-
- def setClassLoader(cl: ClassLoader) = classLoader = Some(cl)
-
+ trait Java extends Serializer {
def deepClone(obj: AnyRef): AnyRef = in(out(obj), None)
def out(obj: AnyRef): Array[Byte] = {
@@ -67,8 +67,9 @@ object Serializer {
}
def in(bytes: Array[Byte], clazz: Option[Class[_]]): AnyRef = {
- val in = if (classLoader.isDefined) new ClassLoaderObjectInputStream(classLoader.get, new ByteArrayInputStream(bytes))
- else new ObjectInputStream(new ByteArrayInputStream(bytes))
+ val in =
+ if (classLoader.isDefined) new ClassLoaderObjectInputStream(classLoader.get, new ByteArrayInputStream(bytes))
+ else new ObjectInputStream(new ByteArrayInputStream(bytes))
val obj = in.readObject
in.close
obj
@@ -79,18 +80,21 @@ object Serializer {
* @author Jonas Bonér
*/
object Protobuf extends Protobuf
- class Protobuf extends Serializer {
+ trait Protobuf extends Serializer {
def deepClone(obj: AnyRef): AnyRef = in(out(obj), Some(obj.getClass))
def out(obj: AnyRef): Array[Byte] = {
- if (!obj.isInstanceOf[Message]) throw new IllegalArgumentException("Can't serialize a non-protobuf message using protobuf [" + obj + "]")
+ if (!obj.isInstanceOf[Message]) throw new IllegalArgumentException(
+ "Can't serialize a non-protobuf message using protobuf [" + obj + "]")
obj.asInstanceOf[Message].toByteArray
}
def in(bytes: Array[Byte], clazz: Option[Class[_]]): AnyRef = {
- if (!clazz.isDefined) throw new IllegalArgumentException("Need a protobuf message class to be able to serialize bytes using protobuf")
+ if (!clazz.isDefined) throw new IllegalArgumentException(
+ "Need a protobuf message class to be able to serialize bytes using protobuf")
// TODO: should we cache this method lookup?
- val message = clazz.get.getDeclaredMethod("getDefaultInstance", EMPTY_CLASS_ARRAY: _*).invoke(null, EMPTY_ANY_REF_ARRAY: _*).asInstanceOf[Message]
+ val message = clazz.get.getDeclaredMethod(
+ "getDefaultInstance", EMPTY_CLASS_ARRAY: _*).invoke(null, EMPTY_ANY_REF_ARRAY: _*).asInstanceOf[Message]
message.toBuilder().mergeFrom(bytes).build
}
@@ -104,13 +108,9 @@ object Serializer {
* @author Jonas Bonér
*/
object JavaJSON extends JavaJSON
- class JavaJSON extends Serializer {
+ trait JavaJSON extends Serializer {
private val mapper = new ObjectMapper
- private var classLoader: Option[ClassLoader] = None
-
- def setClassLoader(cl: ClassLoader) = classLoader = Some(cl)
-
def deepClone(obj: AnyRef): AnyRef = in(out(obj), Some(obj.getClass))
def out(obj: AnyRef): Array[Byte] = {
@@ -122,9 +122,11 @@ object Serializer {
}
def in(bytes: Array[Byte], clazz: Option[Class[_]]): AnyRef = {
- if (!clazz.isDefined) throw new IllegalArgumentException("Can't deserialize JSON to instance if no class is provided")
- val in = if (classLoader.isDefined) new ClassLoaderObjectInputStream(classLoader.get, new ByteArrayInputStream(bytes))
- else new ObjectInputStream(new ByteArrayInputStream(bytes))
+ if (!clazz.isDefined) throw new IllegalArgumentException(
+ "Can't deserialize JSON to instance if no class is provided")
+ val in =
+ if (classLoader.isDefined) new ClassLoaderObjectInputStream(classLoader.get, new ByteArrayInputStream(bytes))
+ else new ObjectInputStream(new ByteArrayInputStream(bytes))
val obj = mapper.readValue(in, clazz.get).asInstanceOf[AnyRef]
in.close
obj
@@ -140,13 +142,9 @@ object Serializer {
* @author Jonas Bonér
*/
object ScalaJSON extends ScalaJSON
- class ScalaJSON extends Serializer {
+ trait ScalaJSON extends Serializer {
def deepClone(obj: AnyRef): AnyRef = in(out(obj), None)
- private var classLoader: Option[ClassLoader] = None
-
- def setClassLoader(cl: ClassLoader) = classLoader = Some(cl)
-
def out(obj: AnyRef): Array[Byte] = SJSONSerializer.SJSON.out(obj)
// FIXME set ClassLoader on SJSONSerializer.SJSON
@@ -166,7 +164,7 @@ object Serializer {
* @author Jonas Bonér
*/
object SBinary extends SBinary
- class SBinary {
+ trait SBinary {
import sbinary.DefaultProtocol._
def deepClone[T <: AnyRef](obj: T)(implicit w : Writes[T], r : Reads[T]): T = in[T](out[T](obj), None)
diff --git a/akka-core/src/main/scala/stm/DataFlowVariable.scala b/akka-core/src/main/scala/stm/DataFlowVariable.scala
index daed4ec55f..cb1b828db1 100644
--- a/akka-core/src/main/scala/stm/DataFlowVariable.scala
+++ b/akka-core/src/main/scala/stm/DataFlowVariable.scala
@@ -2,7 +2,7 @@
* Copyright (C) 2009-2010 Scalable Solutions AB
*/
-package se.scalablesolutions.akka.state
+package se.scalablesolutions.akka.stm
import java.util.concurrent.atomic.AtomicReference
import java.util.concurrent.{ConcurrentLinkedQueue, LinkedBlockingQueue}
diff --git a/akka-core/src/main/scala/stm/HashTrie.scala b/akka-core/src/main/scala/stm/HashTrie.scala
index 02b7ad2145..fcb35baff3 100644
--- a/akka-core/src/main/scala/stm/HashTrie.scala
+++ b/akka-core/src/main/scala/stm/HashTrie.scala
@@ -32,7 +32,7 @@
POSSIBILITY OF SUCH DAMAGE.
**/
-package se.scalablesolutions.akka.collection
+package se.scalablesolutions.akka.stm
trait PersistentDataStructure
@@ -77,7 +77,7 @@ object HashTrie {
// nodes
@serializable
-private[collection] sealed trait Node[K, +V] {
+private[stm] sealed trait Node[K, +V] {
val size: Int
def apply(key: K, hash: Int): Option[V]
@@ -90,7 +90,7 @@ private[collection] sealed trait Node[K, +V] {
}
@serializable
-private[collection] class EmptyNode[K] extends Node[K, Nothing] {
+private[stm] class EmptyNode[K] extends Node[K, Nothing] {
val size = 0
def apply(key: K, hash: Int) = None
@@ -106,12 +106,12 @@ private[collection] class EmptyNode[K] extends Node[K, Nothing] {
}
}
-private[collection] abstract class SingleNode[K, +V] extends Node[K, V] {
+private[stm] abstract class SingleNode[K, +V] extends Node[K, V] {
val hash: Int
}
-private[collection] class LeafNode[K, +V](key: K, val hash: Int, value: V) extends SingleNode[K, V] {
+private[stm] class LeafNode[K, +V](key: K, val hash: Int, value: V) extends SingleNode[K, V] {
val size = 1
def apply(key: K, hash: Int) = if (this.key == key) Some(value) else None
@@ -141,7 +141,7 @@ private[collection] class LeafNode[K, +V](key: K, val hash: Int, value: V) exten
}
-private[collection] class CollisionNode[K, +V](val hash: Int, bucket: List[(K, V)]) extends SingleNode[K, V] {
+private[stm] class CollisionNode[K, +V](val hash: Int, bucket: List[(K, V)]) extends SingleNode[K, V] {
lazy val size = bucket.length
def this(hash: Int, pairs: (K, V)*) = this(hash, pairs.toList)
@@ -185,7 +185,7 @@ private[collection] class CollisionNode[K, +V](val hash: Int, bucket: List[(K, V
override def toString = "CollisionNode(" + bucket.toString + ")"
}
-private[collection] class BitmappedNode[K, +V](shift: Int)(table: Array[Node[K, V]], bits: Int) extends Node[K, V] {
+private[stm] class BitmappedNode[K, +V](shift: Int)(table: Array[Node[K, V]], bits: Int) extends Node[K, V] {
lazy val size = {
val sizes = for {
n <- table
@@ -284,7 +284,7 @@ private[collection] class BitmappedNode[K, +V](shift: Int)(table: Array[Node[K,
}
-private[collection] object BitmappedNode {
+private[stm] object BitmappedNode {
def apply[K, V](shift: Int)(node: SingleNode[K, V], key: K, hash: Int, value: V) = {
val table = new Array[Node[K, V]](Math.max((hash >>> shift) & 0x01f, (node.hash >>> shift) & 0x01f) + 1)
@@ -312,7 +312,7 @@ private[collection] object BitmappedNode {
}
-private[collection] class FullNode[K, +V](shift: Int)(table: Array[Node[K, V]]) extends Node[K, V] {
+private[stm] class FullNode[K, +V](shift: Int)(table: Array[Node[K, V]]) extends Node[K, V] {
lazy val size = table.foldLeft(0) { _ + _.size }
def apply(key: K, hash: Int) = table((hash >>> shift) & 0x01f)(key, hash)
diff --git a/akka-core/src/main/scala/stm/ResultOrFailure.scala b/akka-core/src/main/scala/stm/ResultOrFailure.scala
index 51ce6ddf68..ced5572104 100644
--- a/akka-core/src/main/scala/stm/ResultOrFailure.scala
+++ b/akka-core/src/main/scala/stm/ResultOrFailure.scala
@@ -2,9 +2,7 @@
* Copyright (C) 2009-2010 Scalable Solutions AB
*/
-package se.scalablesolutions.akka.util
-
-import stm.Transaction
+package se.scalablesolutions.akka.stm
/**
* Reference that can hold either a typed value or an exception.
diff --git a/akka-core/src/main/scala/stm/Transaction.scala b/akka-core/src/main/scala/stm/Transaction.scala
index 1637b4c906..a7184e969d 100644
--- a/akka-core/src/main/scala/stm/Transaction.scala
+++ b/akka-core/src/main/scala/stm/Transaction.scala
@@ -6,16 +6,18 @@ package se.scalablesolutions.akka.stm
import java.util.concurrent.atomic.AtomicLong
import java.util.concurrent.atomic.AtomicInteger
+import java.util.concurrent.TimeUnit
+
+import scala.collection.mutable.HashMap
-import se.scalablesolutions.akka.state.Committable
import se.scalablesolutions.akka.util.Logging
import org.multiverse.api.{Transaction => MultiverseTransaction}
import org.multiverse.api.GlobalStmInstance.getGlobalStmInstance
import org.multiverse.api.ThreadLocalTransaction._
-import org.multiverse.templates.OrElseTemplate
-
-import scala.collection.mutable.HashMap
+import org.multiverse.templates.{TransactionTemplate, OrElseTemplate}
+import org.multiverse.utils.backoff.ExponentialBackoffPolicy
+import org.multiverse.stms.alpha.AlphaStm
class NoTransactionInScopeException extends RuntimeException
class TransactionRetryException(message: String) extends RuntimeException(message)
@@ -30,8 +32,8 @@ class TransactionRetryException(message: String) extends RuntimeException(messag
* Here are some examples (assuming implicit transaction family name in scope):
*
* import se.scalablesolutions.akka.stm.Transaction._
- *
- * atomic {
+ *
+ * atomic {
* .. // do something within a transaction
* }
*
@@ -39,8 +41,8 @@ class TransactionRetryException(message: String) extends RuntimeException(messag
* Example of atomic transaction management using atomic block with retry count:
*
* import se.scalablesolutions.akka.stm.Transaction._
- *
- * atomic(maxNrOfRetries) {
+ *
+ * atomic(maxNrOfRetries) {
* .. // do something within a transaction
* }
*
@@ -49,10 +51,10 @@ class TransactionRetryException(message: String) extends RuntimeException(messag
* Which is a good way to reduce contention and transaction collisions.
*
* import se.scalablesolutions.akka.stm.Transaction._
- *
- * atomically {
+ *
+ * atomically {
* .. // try to do something
- * } orElse {
+ * } orElse {
* .. // if transaction clashes try do do something else to minimize contention
* }
*
* import se.scalablesolutions.akka.stm.Transaction._
- * for (tx <- Transaction) {
+ * for (tx <- Transaction) {
* ... // do transactional stuff
* }
*
- * val result = for (tx <- Transaction) yield {
+ * val result = for (tx <- Transaction) yield {
* ... // do transactional stuff yielding a result
* }
*
@@ -78,17 +80,17 @@ class TransactionRetryException(message: String) extends RuntimeException(messag
*
* // You can use them together with Transaction in a for comprehension since
* // TransactionalRef is also monadic
- * for {
+ * for {
* tx <- Transaction
* ref <- refs
* } {
* ... // use the ref inside a transaction
* }
*
- * val result = for {
+ * val result = for {
* tx <- Transaction
* ref <- refs
- * } yield {
+ * } yield {
* ... // use the ref inside a transaction, yield a result
* }
*
- *
- * It could also be that the transaction is retried (e.g. caused by optimistic locking failures). This is also a task
- * for template. In the future this retry behavior will be customizable.
- *
- * If a transaction already is available on the TransactionThreadLocal, no new transaction is started and essentially
- * the whole AtomicTemplate is ignored.
- *
- * If no transaction is available on the TransactionThreadLocal, a new one will be created and used during the execution
- * of the AtomicTemplate and will be removed once the AtomicTemplate finishes.
- *
- * All uncaught throwable's lead to a rollback of the transaction.
- *
- * AtomicTemplates are not thread-safe to use.
- *
- * AtomicTemplates can completely work without threadlocals. See the {@link AtomicTemplate#AtomicTemplate(org.multiverse.api.Stm
- * ,String, boolean, boolean, int)} for more information.
- *
- * @author Peter Veentjer
- */
-public abstract class AtomicTemplate {
-
- private final static Logger logger = Logger.getLogger(AtomicTemplate.class.getName());
-
- private final Stm stm;
- private final boolean ignoreThreadLocalTransaction;
- private final int retryCount;
- private final boolean readonly;
- private int attemptCount;
- private final String familyName;
-
- /**
- * Creates a new AtomicTemplate that uses the STM stored in the GlobalStm and works the the {@link
- * org.multiverse.utils.ThreadLocalTransaction}.
- */
- public AtomicTemplate() {
- this(getGlobalStmInstance());
- }
-
- public AtomicTemplate(boolean readonly) {
- this(getGlobalStmInstance(), null, false, readonly, Integer.MAX_VALUE);
- }
-
- /**
- * Creates a new AtomicTemplate using the provided stm. The transaction used is stores/retrieved from the {@link
- * org.multiverse.utils.ThreadLocalTransaction}.
- *
- * @param stm the stm to use for transactions.
- * @throws NullPointerException if stm is null.
- */
- public AtomicTemplate(Stm stm) {
- this(stm, null, false, false, Integer.MAX_VALUE);
- }
-
- public AtomicTemplate(String familyName, boolean readonly, int retryCount) {
- this(getGlobalStmInstance(), familyName, false, readonly, retryCount);
- }
-
- /**
- * Creates a new AtomicTemplate that uses the provided STM. This method is provided to make Multiverse easy to
- * integrate with environment that don't want to depend on threadlocals.
- *
- * @param stm the stm to use for transactions.
- * @param ignoreThreadLocalTransaction true if this Template should completely ignore the ThreadLocalTransaction.
- * This is useful for using the AtomicTemplate in other environments that don't
- * want to depend on threadlocals but do want to use the AtomicTemplate.
- * @throws NullPointerException if stm is null.
- */
- public AtomicTemplate(Stm stm, String familyName, boolean ignoreThreadLocalTransaction, boolean readonly,
- int retryCount) {
- if (stm == null) {
- throw new NullPointerException();
- }
- if (retryCount < 0) {
- throw new IllegalArgumentException();
- }
- this.stm = stm;
- this.ignoreThreadLocalTransaction = ignoreThreadLocalTransaction;
- this.readonly = readonly;
- this.retryCount = retryCount;
- this.familyName = familyName;
- }
-
- public String getFamilyName() {
- return familyName;
- }
-
- /**
- * Returns the current attempt. Value will always be larger than zero and increases everytime the transaction needs
- * to be retried.
- *
- * @return the current attempt count.
- */
- public final int getAttemptCount() {
- return attemptCount;
- }
-
- /**
- * Returns the number of retries that this AtomicTemplate is allowed to do. The returned value will always be equal
- * or larger than 0.
- *
- * @return the number of retries.
- */
- public final int getRetryCount() {
- return retryCount;
- }
-
- /**
- * Returns the {@link Stm} used by this AtomicTemplate to execute transactions on.
- *
- * @return the Stm used by this AtomicTemplate.
- */
- public final Stm getStm() {
- return stm;
- }
-
- /**
- * Check if this AtomicTemplate ignores the ThreadLocalTransaction.
- *
- * @return true if this AtomicTemplate ignores the ThreadLocalTransaction, false otherwise.
- */
- public final boolean isIgnoreThreadLocalTransaction() {
- return ignoreThreadLocalTransaction;
- }
-
- /**
- * Checks if this AtomicTemplate executes readonly transactions.
- *
- * @return true if it executes readonly transactions, false otherwise.
- */
- public final boolean isReadonly() {
- return readonly;
- }
-
- /**
- * This is the method can be overridden to do pre-start tasks.
- */
- public void preStart() {
- }
-
- /**
- * This is the method can be overridden to do post-start tasks.
- *
- * @param t the transaction used for this execution.
- */
- public void postStart(Transaction t) {
- }
-
- /**
- * This is the method can be overridden to do pre-commit tasks.
- */
- public void preCommit() {
- }
-
- /**
- * This is the method can be overridden to do post-commit tasks.
- */
- public void postCommit() {
- }
-
- /**
- * This is the method that needs to be implemented.
- *
- * @param t the transaction used for this execution.
- * @return the result of the execution.
- *
- * @throws Exception the Exception thrown
- */
- public abstract E execute(Transaction t) throws Exception;
-
- /**
- * Executes the template.
- *
- * @return the result of the {@link #execute(org.multiverse.api.Transaction)} method.
- *
- * @throws InvisibleCheckedException if a checked exception was thrown while executing the {@link
- * #execute(org.multiverse.api.Transaction)} method.
- * @throws AbortedException if the exception was explicitly aborted.
- * @throws TooManyRetriesException if the template retried the transaction too many times. The cause of the last
- * failure (also an exception) is included as cause. So you have some idea where
- * to look for problems
- */
- public final E execute() {
- try {
- return executeChecked();
- } catch (Exception ex) {
- if (ex instanceof RuntimeException) {
- throw (RuntimeException) ex;
- } else {
- throw new AtomicTemplate.InvisibleCheckedException(ex);
- }
- }
- }
-
- /**
- * Executes the Template and rethrows the checked exception instead of wrapping it in a InvisibleCheckedException.
- *
- * @return the result
- *
- * @throws Exception the Exception thrown inside the {@link #execute(org.multiverse.api.Transaction)}
- * method.
- * @throws AbortedException if the exception was explicitly aborted.
- * @throws TooManyRetriesException if the template retried the transaction too many times. The cause of the last
- * failure (also an exception) is included as cause. So you have some idea where to
- * look for problems
- */
- public final E executeChecked() throws Exception {
- preStart();
- Transaction t = getTransaction();
- if (noUsableTransaction(t)) {
- t = startTransaction();
- setTransaction(t);
- postStart(t);
- try {
- attemptCount = 1;
- Exception lastRetryCause = null;
- while (attemptCount - 1 <= retryCount) {
- boolean abort = true;
- boolean reset = false;
- try {
- E result = execute(t);
- if (t.getStatus().equals(TransactionStatus.aborted)) {
- String msg = format("Transaction with familyname %s is aborted", t.getFamilyName());
- throw new AbortedException(msg);
- }
- preCommit();
- t.commit();
- abort = false;
- reset = false;
- postCommit();
- return result;
- } catch (RetryError e) {
- Latch latch = new CheapLatch();
- t.abortAndRegisterRetryLatch(latch);
- latch.awaitUninterruptible();
- //since the abort is already done, no need to do it again.
- abort = false;
- } catch (CommitFailureException ex) {
- lastRetryCause = ex;
- reset = true;
- //ignore, just retry the transaction
- } catch (LoadException ex) {
- lastRetryCause = ex;
- reset = true;
- //ignore, just retry the transaction
- } finally {
- if (abort) {
- t.abort();
- if (reset) {
- t = t.abortAndReturnRestarted();
- setTransaction(t);
- }
- }
- }
- attemptCount++;
- }
-
- throw new TooManyRetriesException("Too many retries", lastRetryCause);
- } finally {
- setTransaction(null);
- }
- } else {
- return execute(t);
- }
- }
-
- private Transaction startTransaction() {
- return readonly ? stm.startReadOnlyTransaction(familyName) : stm.startUpdateTransaction(familyName);
- }
-
- private boolean noUsableTransaction(Transaction t) {
- return t == null || t.getStatus() != TransactionStatus.active;
- }
-
- /**
- * Gets the current Transaction stored in the TransactionThreadLocal.
- *
- * If the ignoreThreadLocalTransaction is set, the threadlocal stuff is completeley ignored.
- *
- * @return the found transaction, or null if none is found.
- */
- private Transaction getTransaction() {
- return ignoreThreadLocalTransaction ? null : getThreadLocalTransaction();
- }
-
- /**
- * Stores the transaction in the TransactionThreadLocal.
- *
- * This call is ignored if the ignoreThreadLocalTransaction is true.
- *
- * @param t the transaction to set (is allowed to be null).
- */
- private void setTransaction(Transaction t) {
- if (!ignoreThreadLocalTransaction) {
- setThreadLocalTransaction(t);
- }
- }
-
- public static class InvisibleCheckedException extends RuntimeException {
-
- public InvisibleCheckedException(Exception cause) {
- super(cause);
- }
-
- @Override
- public Exception getCause() {
- return (Exception) super.getCause();
- }
- }
-}
diff --git a/akka-util/pom.xml b/akka-util/pom.xml
deleted file mode 100644
index 9b22090ee9..0000000000
--- a/akka-util/pom.xml
+++ /dev/null
@@ -1,39 +0,0 @@
-
- 4.0.0
-
- akka-util
- Akka Util Module
-
- jar
-
-
- akka
- se.scalablesolutions.akka
- 0.7-SNAPSHOT
-
-
-
-
- org.scala-lang
- scala-library
- ${scala.version}
-
-
- org.codehaus.aspectwerkz
- aspectwerkz-nodeps-jdk5
- 2.1
-
-
- org.codehaus.aspectwerkz
- aspectwerkz-jdk5
- 2.1
-
-
- net.lag
- configgy
- 1.4.7
-
-
-
-
diff --git a/akka-util/src/main/scala/Bootable.scala b/akka-util/src/main/scala/Bootable.scala
index a46a131f00..172be3fd43 100644
--- a/akka-util/src/main/scala/Bootable.scala
+++ b/akka-util/src/main/scala/Bootable.scala
@@ -5,6 +5,6 @@
package se.scalablesolutions.akka.util
trait Bootable {
- def onLoad : Unit = ()
- def onUnload : Unit = ()
+ def onLoad {}
+ def onUnload {}
}
\ No newline at end of file
diff --git a/akka-util/src/main/scala/Config.scala b/akka-util/src/main/scala/Config.scala
deleted file mode 100644
index f25b08ee46..0000000000
--- a/akka-util/src/main/scala/Config.scala
+++ /dev/null
@@ -1,74 +0,0 @@
-/**
- * Copyright (C) 2009-2010 Scalable Solutions AB
- */
-
-package se.scalablesolutions.akka
-
-import util.Logging
-
-import net.lag.configgy.{Configgy, ParseException}
-
-/**
- * @author Jonas Bonér
- */
-object Config extends Logging {
- val VERSION = "0.7-SNAPSHOT"
-
- // Set Multiverse options for max speed
- System.setProperty("org.multiverse.MuliverseConstants.sanityChecks", "false")
- System.setProperty("org.multiverse.api.GlobalStmInstance.factorymethod", "org.multiverse.stms.alpha.AlphaStm.createFast")
-
- val HOME = {
- val systemHome = System.getenv("AKKA_HOME")
- if (systemHome == null || systemHome.length == 0 || systemHome == ".") {
- val optionHome = System.getProperty("akka.home", "")
- if (optionHome.length != 0) Some(optionHome)
- else None
- } else Some(systemHome)
- }
-
- val config = {
- if (HOME.isDefined) {
- try {
- val configFile = HOME.get + "/config/akka.conf"
- Configgy.configure(configFile)
- log.info("AKKA_HOME is defined to [%s], config loaded from [%s].", HOME.get, configFile)
- } catch {
- case e: ParseException => throw new IllegalStateException(
- "'akka.conf' config file can not be found in [" + HOME + "/config/akka.conf] aborting." +
- "\n\tEither add it in the 'config' directory or add it to the classpath.")
- }
- } else if (System.getProperty("akka.config", "") != "") {
- val configFile = System.getProperty("akka.config", "")
- try {
- Configgy.configure(configFile)
- log.info("Config loaded from -Dakka.config=%s", configFile)
- } catch {
- case e: ParseException => throw new IllegalStateException(
- "Config could not be loaded from -Dakka.config=" + configFile)
- }
- } else {
- try {
- Configgy.configureFromResource("akka.conf", getClass.getClassLoader)
- log.info("Config loaded from the application classpath.")
- } catch {
- case e: ParseException => throw new IllegalStateException(
- "\nCan't find 'akka.conf' configuration file." +
- "\nOne of the three ways of locating the 'akka.conf' file needs to be defined:" +
- "\n\t1. Define 'AKKA_HOME' environment variable to the root of the Akka distribution." +
- "\n\t2. Define the '-Dakka.config=...' system property option." +
- "\n\t3. Put the 'akka.conf' file on the classpath." +
- "\nI have no way of finding the 'akka.conf' configuration file." +
- "\nAborting.")
- }
- }
- Configgy.config
- }
-
- val CONFIG_VERSION = config.getString("akka.version", "0")
- if (VERSION != CONFIG_VERSION) throw new IllegalStateException(
- "Akka JAR version [" + VERSION + "] is different than the provided config ('akka.conf') version [" + CONFIG_VERSION + "]")
- val startTime = System.currentTimeMillis
-
- def uptime = (System.currentTimeMillis - startTime) / 1000
-}
diff --git a/akka-util/src/main/scala/Helpers.scala b/akka-util/src/main/scala/Helpers.scala
index b7e5ff3b75..55abf6e7ac 100644
--- a/akka-util/src/main/scala/Helpers.scala
+++ b/akka-util/src/main/scala/Helpers.scala
@@ -40,7 +40,6 @@ object Helpers extends Logging {
}
// ================================================
- @serializable
class ReadWriteLock {
private val rwl = new ReentrantReadWriteLock
private val readLock = rwl.readLock
diff --git a/akka-util/src/main/scala/Logging.scala b/akka-util/src/main/scala/Logging.scala
index a6b89b86b2..b988c73f22 100644
--- a/akka-util/src/main/scala/Logging.scala
+++ b/akka-util/src/main/scala/Logging.scala
@@ -6,10 +6,10 @@ package se.scalablesolutions.akka.util
import net.lag.logging.Logger
-import java.io.StringWriter;
-import java.io.PrintWriter;
-import java.net.InetAddress;
-import java.net.UnknownHostException;
+import java.io.StringWriter
+import java.io.PrintWriter
+import java.net.InetAddress
+import java.net.UnknownHostException
/**
* Base trait for all classes that wants to be able use the logging infrastructure.
@@ -30,6 +30,7 @@ trait Logging {
*
* @author Jonas Bonér
*/
+ // FIXME make use of LoggableException
class LoggableException extends Exception with Logging {
private val uniqueId = getExceptionID
private var originalException: Option[Exception] = None
diff --git a/akka.iml b/akka.iml
index 2f07a75716..74542e8e48 100644
--- a/akka.iml
+++ b/akka.iml
@@ -2,6 +2,23 @@
+
+
+
+
+
+
+
+
+
+
+
+
+
diff --git a/changes.xml b/changes.xml
deleted file mode 100644
index 90a9e31c88..0000000000
--- a/changes.xml
+++ /dev/null
@@ -1,86 +0,0 @@
-
-
-
-
-
-
- Akka Release Notes
- Jonas Bonér
-
-
-
- Clustered Comet using Akka remote actors and clustered membership API
- Cluster membership API and implementation based on JGroups
- Security module for HTTP-based authentication and authorization
- Support for using Scala XML tags in RESTful Actors (scala-jersey)
- Support for Comet Actors using Atmosphere
- MongoDB as Akka storage backend
- Redis as Akka storage backend
- Transparent JSON serialization of Scala objects based on SJSON
- Kerberos/SPNEGO support for Security module
- Implicit sender for remote actors: Remote actors are able to use reply to answer a request
- Support for using the Lift Web framework with Actors
- Rewritten STM, now integrated with Multiverse STM
- Added STM API for atomic {..} and run {..} orElse {..}
- Added STM retry
- Complete rewrite of the persistence transaction management, now based on Unit of Work and Multiverse STM
- Monadic API to TransactionalRef (use it in for-comprehension)
- Lightweight actor syntax using one of the Actor.actor(..) methods. F.e: 'val a = actor { case _ => .. }'
- Rewritten event-based dispatcher which improved perfomance by 10x, now substantially faster than event-driven Scala Actors
- New Scala JSON parser based on sjson
- Added zlib compression to remote actors
- Added implicit sender reference for fire-forget ('!') message sends
- Monadic API to TransactionalRef (use it in for-comprehension)
- Smoother web app integration; just add akka.conf to the classpath (WEB-INF/classes), no need for AKKA_HOME or -Dakka.conf=..
- Modularization of distribution into a thin core (actors, remoting and STM) and the rest in submodules
- Added 'forward' to Actor, forwards message but keeps original sender address
- JSON serialization for Java objects (using Jackson)
- JSON serialization for Scala objects (using SJSON)
- Added implementation for remote actor reconnect upon failure
- Protobuf serialization for Java and Scala objects
- SBinary serialization for Scala objects
- Protobuf as remote protocol
- AMQP integration; abstracted as actors in a supervisor hierarchy. Impl AMQP 0.9.1
- Updated Cassandra integration and CassandraSession API to v0.4
- Added CassandraSession API (with socket pooling) wrapping Cassandra's Thrift API in Scala and Java APIs
- CassandraStorage is now works with external Cassandra cluster
- ActorRegistry for retrieving Actor instances by class name and by id
- SchedulerActor for scheduling periodic tasks
- Now start up kernel with 'java -jar dist/akka-0.6.jar'
- Added mailing list: akka-user@googlegroups.com
- Improved and restructured documentation
- New URL: http://akkasource.org
- New and much improved docs
- Enhanced trapping of failures: 'trapExit = List(classOf[..], classOf[..])'
- Upgraded to Netty 3.2, Protobuf 2.2, ScalaTest 1.0, Jersey 1.1.3, Atmosphere 0.4.1, Cassandra 0.4.1, Configgy 1.4
- Lowered actor memory footprint; now an actor consumes ~600 bytes, which mean that you can create 6.5 million on 4 G RAM
- Removed concurrent mode
- Remote actors are now defined by their UUID (not class name)
- Fixed dispatcher bugs
- Cleaned up Maven scripts and distribution in general
- Fixed many many bugs and minor issues
- Fixed inconsistencies and uglyness in Actors API
- Removed embedded Cassandra mode
- Removed the !? method in Actor (synchronous message send, since it's evil. Use !! with time-out instead.
- Removed startup scripts and lib dir
- Removed the 'Transient' life-cycle scope since to close to 'Temporary' in semantics.
- Removed 'Transient' Actors and restart timeout
-
-
-
-
\ No newline at end of file
diff --git a/config/akka-reference.conf b/config/akka-reference.conf
index 749b599e0b..7e93604521 100644
--- a/config/akka-reference.conf
+++ b/config/akka-reference.conf
@@ -19,8 +19,9 @@
# FQN to the class doing initial active object/actor
# supervisor bootstrap, should be defined in default constructor
- boot = ["sample.java.Boot",
- "sample.scala.Boot",
+ boot = ["sample.camel.Boot",
+ "sample.java.Boot",
+ "sample.scala.Boot",
"se.scalablesolutions.akka.security.samples.Boot"]
@@ -30,8 +31,10 @@
service = on
- max-nr-of-retries = 100
- distributed = off # not implemented yet
+ fair = on # should transactions be fair or non-fair (non fair yield better performance)
+ max-nr-of-retries = 1000 # max nr of retries of a failing transaction before giving up
+ timeout = 10000 # transaction timeout; if transaction has not committed within the timeout then it is aborted
+ distributed = off # not implemented yet
@@ -47,9 +50,10 @@
zlib-compression-level = 6 # Options: 0-9 (1 being fastest and 9 being the most compressed), default is 6
- name = "default" # The name of the cluster
- #actor = "se.scalablesolutions.akka.remote.JGroupsClusterActor" # FQN of an implementation of ClusterActor
- serializer = "se.scalablesolutions.akka.serialization.Serializer$Java" # FQN of the serializer class
+ service = on
+ name = "default" # The name of the cluster
+ actor = "se.scalablesolutions.akka.cluster.jgroups.JGroupsClusterActor" # FQN of an implementation of ClusterActor
+ serializer = "se.scalablesolutions.akka.serialization.Serializer$Java$" # FQN of the serializer class
diff --git a/config/akka.conf b/config/akka.conf
index 94f630089a..84b9bfbbcf 100644
--- a/config/akka.conf
+++ b/config/akka.conf
@@ -1,4 +1,4 @@
-# This config import the Akka reference configuration.
+# This config imports the Akka reference configuration.
include "akka-reference.conf"
# In this file you can override any option defined in the 'akka-reference.conf' file.
diff --git a/deploy/.keep b/deploy/.keep
deleted file mode 100644
index e69de29bb2..0000000000
diff --git a/embedded-repo/com/redis/redisclient/1.1/redisclient-1.1.jar b/embedded-repo/com/redis/redisclient/1.1/redisclient-1.1.jar
deleted file mode 100644
index a269f15f7a..0000000000
Binary files a/embedded-repo/com/redis/redisclient/1.1/redisclient-1.1.jar and /dev/null differ
diff --git a/embedded-repo/com/redis/redisclient/1.1/redisclient-1.1.pom b/embedded-repo/com/redis/redisclient/1.1/redisclient-1.1.pom
deleted file mode 100755
index 16dd81402a..0000000000
--- a/embedded-repo/com/redis/redisclient/1.1/redisclient-1.1.pom
+++ /dev/null
@@ -1,8 +0,0 @@
-
-
- 4.0.0
- com.redis
- redisclient
- 1.1
- jar
-
diff --git a/embedded-repo/com/redis/redisclient/1.2-SNAPSHOT/redisclient-1.2-SNAPSHOT.jar b/embedded-repo/com/redis/redisclient/1.2-SNAPSHOT/redisclient-1.2-SNAPSHOT.jar
new file mode 100644
index 0000000000..88815a75d9
Binary files /dev/null and b/embedded-repo/com/redis/redisclient/1.2-SNAPSHOT/redisclient-1.2-SNAPSHOT.jar differ
diff --git a/project/build.properties b/project/build.properties
new file mode 100644
index 0000000000..9f7e717580
--- /dev/null
+++ b/project/build.properties
@@ -0,0 +1,7 @@
+project.organization=se.scalablesolutions.akka
+project.name=akka
+project.version=0.7-SNAPSHOT
+scala.version=2.7.7
+sbt.version=0.7.1
+def.scala.version=2.7.7
+build.scala.versions=2.7.7
diff --git a/project/build/AkkaProject.scala b/project/build/AkkaProject.scala
new file mode 100644
index 0000000000..a891ce1668
--- /dev/null
+++ b/project/build/AkkaProject.scala
@@ -0,0 +1,387 @@
+/*-------------------------------------------------------------------------------
+ Copyright (C) 2009-2010 Scalable Solutions AB
+
+ ----------------------------------------------------
+ -------- sbt buildfile for the Akka project --------
+ ----------------------------------------------------
+
+ Akka implements a unique hybrid of:
+ * Actors , which gives you:
+ * Simple and high-level abstractions for concurrency and parallelism.
+ * Asynchronous, non-blocking and highly performant event-driven programming
+ model.
+ * Very lightweight event-driven processes (create ~6.5 million actors on
+ 4 G RAM).
+ * Supervision hierarchies with let-it-crash semantics. For writing highly
+ fault-tolerant systems that never stop, systems that self-heal.
+ * Software Transactional Memory (STM). (Distributed transactions coming soon).
+ * Transactors: combine actors and STM into transactional actors. Allows you to
+ compose atomic message flows with automatic rollback and retry.
+ * Remoting: highly performant distributed actors with remote supervision and
+ error management.
+ * Cluster membership management.
+
+ Akka also has a set of add-on modules:
+ * Persistence: A set of pluggable back-end storage modules that works in sync
+ with the STM.
+ * Cassandra distributed and highly scalable database.
+ * MongoDB document database.
+ * Redis data structures database (upcoming)
+ * REST (JAX-RS): Expose actors as REST services.
+ * Comet: Expose actors as Comet services.
+ * Security: Digest and Kerberos based security.
+ * Microkernel: Run Akka as a stand-alone kernel.
+
+-------------------------------------------------------------------------------*/
+
+import sbt._
+import java.io.File
+import java.util.jar.Attributes
+
+class AkkaParent(info: ProjectInfo) extends DefaultProject(info) {
+
+ // ------------------------------------------------------------
+ // project versions
+ val JERSEY_VERSION = "1.1.5"
+ val ATMO_VERSION = "0.5.4"
+ val CASSANDRA_VERSION = "0.5.0"
+
+ // ------------------------------------------------------------
+ lazy val akkaHome = {
+ val home = System.getenv("AKKA_HOME")
+ if (home == null) throw new Error("You need to set the $AKKA_HOME environment variable to the root of the Akka distribution")
+ home
+ }
+ lazy val deployPath = Path.fromFile(new java.io.File(akkaHome + "/deploy"))
+ lazy val distPath = Path.fromFile(new java.io.File(akkaHome + "/dist"))
+
+ lazy val dist = zipTask(allArtifacts, "dist", distName) dependsOn (`package`) describedAs("Zips up the distribution.")
+
+ def distName = "%s_%s-%s.zip".format(name, defScalaVersion.value, version)
+
+ // ------------------------------------------------------------
+ // repositories
+ val embeddedrepo = "embedded repo" at new File(akkaHome, "embedded-repo").toURI.toString
+ val sunjdmk = "sunjdmk" at "http://wp5.e-taxonomy.eu/cdmlib/mavenrepo"
+ val databinder = "DataBinder" at "http://databinder.net/repo"
+ val configgy = "Configgy" at "http://www.lag.net/repo"
+ val codehaus = "Codehaus" at "http://repository.codehaus.org"
+ val codehaus_snapshots = "Codehaus Snapshots" at "http://snapshots.repository.codehaus.org"
+ val jboss = "jBoss" at "http://repository.jboss.org/maven2"
+ val guiceyfruit = "GuiceyFruit" at "http://guiceyfruit.googlecode.com/svn/repo/releases/"
+ val google = "google" at "http://google-maven-repository.googlecode.com/svn/repository"
+ val m2 = "m2" at "http://download.java.net/maven/2"
+
+ // ------------------------------------------------------------
+ // project defintions
+ lazy val akka_java_util = project("akka-util-java", "akka-util-java", new AkkaJavaUtilProject(_))
+ lazy val akka_util = project("akka-util", "akka-util", new AkkaUtilProject(_))
+ lazy val akka_core = project("akka-core", "akka-core", new AkkaCoreProject(_), akka_util, akka_java_util)
+ lazy val akka_amqp = project("akka-amqp", "akka-amqp", new AkkaAMQPProject(_), akka_core)
+ lazy val akka_rest = project("akka-rest", "akka-rest", new AkkaRestProject(_), akka_core)
+ lazy val akka_comet = project("akka-comet", "akka-comet", new AkkaCometProject(_), akka_rest)
+ lazy val akka_camel = project("akka-camel", "akka-camel", new AkkaCamelProject(_), akka_core)
+ lazy val akka_patterns = project("akka-patterns", "akka-patterns", new AkkaPatternsProject(_), akka_core)
+ lazy val akka_security = project("akka-security", "akka-security", new AkkaSecurityProject(_), akka_core)
+ lazy val akka_persistence = project("akka-persistence", "akka-persistence", new AkkaPersistenceParentProject(_))
+ lazy val akka_cluster = project("akka-cluster", "akka-cluster", new AkkaClusterParentProject(_))
+ lazy val akka_kernel = project("akka-kernel", "akka-kernel", new AkkaKernelProject(_),
+ akka_core, akka_rest, akka_persistence, akka_cluster, akka_amqp, akka_security, akka_comet, akka_camel, akka_patterns)
+
+ // functional tests in java
+ lazy val akka_fun_test = project("akka-fun-test-java", "akka-fun-test-java", new AkkaFunTestProject(_), akka_kernel)
+
+ // examples
+ lazy val akka_samples = project("akka-samples", "akka-samples", new AkkaSamplesParentProject(_))
+
+ // ------------------------------------------------------------
+ // create executable jar
+ override def mainClass = Some("se.scalablesolutions.akka.kernel.Main")
+
+ override def packageOptions =
+ manifestClassPath.map(cp => ManifestAttributes((Attributes.Name.CLASS_PATH, cp))).toList :::
+ getMainClass(false).map(MainClass(_)).toList
+
+ // create a manifest with all akka jars and dependency jars on classpath
+ override def manifestClassPath = Some(allArtifacts.getFiles
+ .filter(_.getName.endsWith(".jar"))
+ .map("lib_managed/scala_%s/compile/".format(defScalaVersion.value) + _.getName)
+ .mkString(" ") +
+ " dist/akka-util_%s-%s.jar".format(defScalaVersion.value, version) +
+ " dist/akka-util-java_%s-%s.jar".format(defScalaVersion.value, version) +
+ " dist/akka-core_%s-%s.jar".format(defScalaVersion.value, version) +
+ " dist/akka-cluster-shoal_%s-%s.jar".format(defScalaVersion.value, version) +
+ " dist/akka-cluster-jgroups_%s-%s.jar".format(defScalaVersion.value, version) +
+ " dist/akka-rest_%s-%s.jar".format(defScalaVersion.value, version) +
+ " dist/akka-comet_%s-%s.jar".format(defScalaVersion.value, version) +
+ " dist/akka-camel_%s-%s.jar".format(defScalaVersion.value, version) +
+ " dist/akka-security_%s-%s.jar".format(defScalaVersion.value, version) +
+ " dist/akka-amqp_%s-%s.jar".format(defScalaVersion.value, version) +
+ " dist/akka-patterns_%s-%s.jar".format(defScalaVersion.value, version) +
+ " dist/akka-persistence-common_%s-%s.jar".format(defScalaVersion.value, version) +
+ " dist/akka-persistence-redis_%s-%s.jar".format(defScalaVersion.value, version) +
+ " dist/akka-persistence-mongo_%s-%s.jar".format(defScalaVersion.value, version) +
+ " dist/akka-persistence-cassandra_%s-%s.jar".format(defScalaVersion.value, version) +
+ " dist/akka-kernel_%s-%s.jar".format(defScalaVersion.value, version)
+ )
+
+ // ------------------------------------------------------------
+ // publishing
+ override def managedStyle = ManagedStyle.Maven
+ val publishTo = Resolver.file("maven-local", Path.userHome / ".m2" / "repository" asFile)
+
+ // Credentials(Path.userHome / ".akka_publish_credentials", log)
+ val sourceArtifact = Artifact(artifactID, "src", "jar", Some("sources"), Nil, None)
+ //val docsArtifact = Artifact(artifactID, "docs", "jar", Some("javadoc"), Nil, None)
+
+ override def packageDocsJar = defaultJarPath("-javadoc.jar")
+ override def packageSrcJar= defaultJarPath("-sources.jar")
+ override def packageToPublishActions = super.packageToPublishActions ++ Seq(packageDocs, packageSrc)
+
+ override def pomExtra =
+ 2009
+ http://akkasource.org
+
+ Scalable Solutions AB
+ http://scalablesolutions.se
+
+
+
+ Apache 2
+ http://www.apache.org/licenses/LICENSE-2.0.txt
+ repo
+
+
+
+ // ------------------------------------------------------------
+ // subprojects
+ class AkkaCoreProject(info: ProjectInfo) extends DefaultProject(info) {
+ val netty = "org.jboss.netty" % "netty" % "3.2.0.BETA1" % "compile"
+ val commons_io = "commons-io" % "commons-io" % "1.4" % "compile"
+ val dispatch_json = "net.databinder" % "dispatch-json_2.7.7" % "0.6.4" % "compile"
+ val dispatch_htdisttp = "net.databinder" % "dispatch-http_2.7.7" % "0.6.4" % "compile"
+ val sjson = "sjson.json" % "sjson" % "0.4" % "compile"
+ val sbinary = "sbinary" % "sbinary" % "0.3" % "compile"
+ val jackson = "org.codehaus.jackson" % "jackson-mapper-asl" % "1.2.1" % "compile"
+ val jackson_core = "org.codehaus.jackson" % "jackson-core-asl" % "1.2.1" % "compile"
+ val voldemort = "voldemort.store.compress" % "h2-lzf" % "1.0" % "compile"
+ val javautils = "org.scala-tools" % "javautils" % "2.7.4-0.1" % "compile"
+ // testing
+ val scalatest = "org.scalatest" % "scalatest" % "1.0" % "test"
+ val junit = "junit" % "junit" % "4.5" % "test"
+ lazy val dist = deployTask(info, distPath) dependsOn(`package`) describedAs("Deploying")
+ }
+
+ class AkkaUtilProject(info: ProjectInfo) extends DefaultProject(info) {
+ val werkz = "org.codehaus.aspectwerkz" % "aspectwerkz-nodeps-jdk5" % "2.1" % "compile"
+ val werkz_core = "org.codehaus.aspectwerkz" % "aspectwerkz-jdk5" % "2.1" % "compile"
+ val configgy = "net.lag" % "configgy" % "1.4.7" % "compile"
+ lazy val dist = deployTask(info, distPath) dependsOn(`package`) describedAs("Deploying")
+ }
+
+ class AkkaJavaUtilProject(info: ProjectInfo) extends DefaultProject(info) {
+ val guicey = "org.guiceyfruit" % "guice-core" % "2.0-beta-4" % "compile"
+ val protobuf = "com.google.protobuf" % "protobuf-java" % "2.2.0" % "compile"
+ val multiverse = "org.multiverse" % "multiverse-alpha" % "0.4-SNAPSHOT" % "compile"
+ lazy val dist = deployTask(info, distPath) dependsOn(`package`) describedAs("Deploying")
+ }
+
+ class AkkaAMQPProject(info: ProjectInfo) extends DefaultProject(info) {
+ val rabbit = "com.rabbitmq" % "amqp-client" % "1.7.2"
+ lazy val dist = deployTask(info, distPath) dependsOn(`package`) describedAs("Deploying")
+ }
+
+ class AkkaRestProject(info: ProjectInfo) extends DefaultProject(info) {
+ val servlet = "javax.servlet" % "servlet-api" % "2.5" % "compile"
+ val jersey = "com.sun.jersey" % "jersey-core" % JERSEY_VERSION % "compile"
+ val jersey_server = "com.sun.jersey" % "jersey-server" % JERSEY_VERSION % "compile"
+ val jersey_json = "com.sun.jersey" % "jersey-json" % JERSEY_VERSION % "compile"
+ val jersey_contrib = "com.sun.jersey.contribs" % "jersey-scala" % JERSEY_VERSION % "compile"
+ val jsr311 = "javax.ws.rs" % "jsr311-api" % "1.1" % "compile"
+ lazy val dist = deployTask(info, distPath) dependsOn(`package`) describedAs("Deploying")
+ }
+
+ class AkkaCometProject(info: ProjectInfo) extends DefaultProject(info) {
+ val grizzly = "com.sun.grizzly" % "grizzly-comet-webserver" % "1.9.18-i" % "compile"
+ val servlet = "javax.servlet" % "servlet-api" % "2.5" % "compile"
+ val atmo = "org.atmosphere" % "atmosphere-annotations" % ATMO_VERSION % "compile"
+ val atmo_jersey = "org.atmosphere" % "atmosphere-jersey" % ATMO_VERSION % "compile"
+ val atmo_runtime = "org.atmosphere" % "atmosphere-runtime" % ATMO_VERSION % "compile"
+ lazy val dist = deployTask(info, distPath) dependsOn(`package`) describedAs("Deploying")
+ }
+
+ class AkkaCamelProject(info: ProjectInfo) extends DefaultProject(info) {
+ val camel_core = "org.apache.camel" % "camel-core" % "2.2.0" % "compile"
+ lazy val dist = deployTask(info, distPath) dependsOn(`package`) describedAs("Deploying")
+ }
+
+ class AkkaPatternsProject(info: ProjectInfo) extends DefaultProject(info) {
+ // testing
+ val scalatest = "org.scalatest" % "scalatest" % "1.0" % "test"
+ val junit = "junit" % "junit" % "4.5" % "test"
+ lazy val dist = deployTask(info, distPath) dependsOn(`package`) describedAs("Deploying")
+ }
+
+ class AkkaSecurityProject(info: ProjectInfo) extends DefaultProject(info) {
+ val annotation = "javax.annotation" % "jsr250-api" % "1.0"
+ val jersey_server = "com.sun.jersey" % "jersey-server" % JERSEY_VERSION % "compile"
+ val jsr311 = "javax.ws.rs" % "jsr311-api" % "1.1" % "compile"
+ val lift_util = "net.liftweb" % "lift-util" % "1.1-M6" % "compile"
+ // testing
+ val scalatest = "org.scalatest" % "scalatest" % "1.0" % "test"
+ val junit = "junit" % "junit" % "4.5" % "test"
+ val mockito = "org.mockito" % "mockito-all" % "1.8.1" % "test"
+ lazy val dist = deployTask(info, distPath) dependsOn(`package`) describedAs("Deploying")
+ }
+
+ class AkkaPersistenceCommonProject(info: ProjectInfo) extends DefaultProject(info) {
+ val thrift = "com.facebook" % "thrift" % "1.0" % "compile"
+ val commons_pool = "commons-pool" % "commons-pool" % "1.5.1" % "compile"
+ lazy val dist = deployTask(info, distPath) dependsOn(`package`) describedAs("Deploying")
+ }
+
+ class AkkaRedisProject(info: ProjectInfo) extends DefaultProject(info) {
+ val redis = "com.redis" % "redisclient" % "1.2-SNAPSHOT" % "compile"
+ override def testOptions = TestFilter((name: String) => name.endsWith("Test")) :: Nil
+ lazy val dist = deployTask(info, distPath) dependsOn(`package`) describedAs("Deploying")
+ }
+
+ class AkkaMongoProject(info: ProjectInfo) extends DefaultProject(info) {
+ val mongo = "org.mongodb" % "mongo-java-driver" % "1.1" % "compile"
+ override def testOptions = TestFilter((name: String) => name.endsWith("Test")) :: Nil
+ lazy val dist = deployTask(info, distPath) dependsOn(`package`) describedAs("Deploying")
+ }
+
+ class AkkaCassandraProject(info: ProjectInfo) extends DefaultProject(info) {
+ val cassandra = "org.apache.cassandra" % "cassandra" % CASSANDRA_VERSION % "compile"
+ val high_scale = "org.apache.cassandra" % "high-scale-lib" % CASSANDRA_VERSION % "test"
+ val cassandra_clhm = "org.apache.cassandra" % "clhm-production" % CASSANDRA_VERSION % "test"
+ val commons_coll = "commons-collections" % "commons-collections" % "3.2.1" % "test"
+ val google_coll = "com.google.collections" % "google-collections" % "1.0" % "test"
+ val slf4j = "org.slf4j" % "slf4j-api" % "1.5.8" % "test"
+ val slf4j_log4j = "org.slf4j" % "slf4j-log4j12" % "1.5.8" % "test"
+ val log4j = "log4j" % "log4j" % "1.2.15" % "test"
+ override def testOptions = TestFilter((name: String) => name.endsWith("Test")) :: Nil
+ lazy val dist = deployTask(info, distPath) dependsOn(`package`) describedAs("Deploying")
+ }
+
+ class AkkaPersistenceParentProject(info: ProjectInfo) extends ParentProject(info) {
+ lazy val akka_persistence_common = project("akka-persistence-common", "akka-persistence-common", new AkkaPersistenceCommonProject(_), akka_core)
+ lazy val akka_persistence_redis = project("akka-persistence-redis", "akka-persistence-redis", new AkkaRedisProject(_), akka_persistence_common)
+ lazy val akka_persistence_mongo = project("akka-persistence-mongo", "akka-persistence-mongo", new AkkaMongoProject(_), akka_persistence_common)
+ lazy val akka_persistence_cassandra = project("akka-persistence-cassandra", "akka-persistence-cassandra", new AkkaCassandraProject(_), akka_persistence_common)
+ }
+
+ class AkkaJgroupsProject(info: ProjectInfo) extends DefaultProject(info) {
+ val jgroups = "jgroups" % "jgroups" % "2.8.0.CR7" % "compile"
+ lazy val dist = deployTask(info, distPath) dependsOn(`package`) describedAs("Deploying")
+ }
+
+ class AkkaShoalProject(info: ProjectInfo) extends DefaultProject(info) {
+ val shoal = "shoal-jxta" % "shoal" % "1.1-20090818" % "compile"
+ val shoal_extra = "shoal-jxta" % "jxta" % "1.1-20090818" % "compile"
+ lazy val dist = deployTask(info, distPath) dependsOn(`package`) describedAs("Deploying")
+ }
+
+ class AkkaClusterParentProject(info: ProjectInfo) extends ParentProject(info) {
+ lazy val akka_cluster_jgroups = project("akka-cluster-jgroups", "akka-cluster-jgroups", new AkkaJgroupsProject(_), akka_core)
+ lazy val akka_cluster_shoal = project("akka-cluster-shoal", "akka-cluster-shoal", new AkkaShoalProject(_), akka_core)
+ }
+
+ class AkkaKernelProject(info: ProjectInfo) extends DefaultProject(info) {
+ lazy val dist = deployTask(info, distPath) dependsOn(`package`) describedAs("Deploying")
+ }
+
+ // examples
+ class AkkaFunTestProject(info: ProjectInfo) extends DefaultProject(info) {
+ val protobuf = "com.google.protobuf" % "protobuf-java" % "2.2.0"
+ val grizzly = "com.sun.grizzly" % "grizzly-comet-webserver" % "1.9.18-i" % "compile"
+ val jersey_server = "com.sun.jersey" % "jersey-server" % JERSEY_VERSION % "compile"
+ val jersey_json = "com.sun.jersey" % "jersey-json" % JERSEY_VERSION % "compile"
+ val jersey_atom = "com.sun.jersey" % "jersey-atom" % JERSEY_VERSION % "compile"
+ // testing
+ val junit = "junit" % "junit" % "4.5" % "test"
+ val jmock = "org.jmock" % "jmock" % "2.4.0" % "test"
+ }
+
+ class AkkaSampleChatProject(info: ProjectInfo) extends DefaultProject(info) {
+ lazy val dist = deployTask(info, deployPath) dependsOn(`package`) describedAs("Deploying")
+ }
+
+ class AkkaSampleLiftProject(info: ProjectInfo) extends DefaultProject(info) {
+ val lift = "net.liftweb" % "lift-webkit" % "1.1-M6" % "compile"
+ val lift_util = "net.liftweb" % "lift-util" % "1.1-M6" % "compile"
+ val servlet = "javax.servlet" % "servlet-api" % "2.5" % "compile"
+ // testing
+ val jetty = "org.mortbay.jetty" % "jetty" % "6.1.22" % "test"
+ val junit = "junit" % "junit" % "4.5" % "test"
+ lazy val dist = deployTask(info, deployPath) dependsOn(`package`) describedAs("Deploying")
+ }
+
+ class AkkaSampleRestJavaProject(info: ProjectInfo) extends DefaultProject(info) {
+ lazy val dist = deployTask(info, deployPath) dependsOn(`package`) describedAs("Deploying")
+ }
+
+ class AkkaSampleRestScalaProject(info: ProjectInfo) extends DefaultProject(info) {
+ val jsr311 = "javax.ws.rs" % "jsr311-api" % "1.1" % "compile"
+ lazy val dist = deployTask(info, deployPath) dependsOn(`package`) describedAs("Deploying")
+ }
+
+ class AkkaSampleCamelProject(info: ProjectInfo) extends DefaultProject(info) {
+ val camel_jetty = "org.apache.camel" % "camel-jetty" % "2.2.0" % "compile"
+ val camel_jms = "org.apache.camel" % "camel-jms" % "2.2.0" % "compile"
+ val activemq_core = "org.apache.activemq" % "activemq-core" % "5.3.0" % "compile"
+ lazy val dist = deployTask(info, deployPath) dependsOn(`package`) describedAs("Deploying")
+ }
+
+ class AkkaSampleSecurityProject(info: ProjectInfo) extends DefaultProject(info) {
+ val jsr311 = "javax.ws.rs" % "jsr311-api" % "1.1" % "compile"
+ val jsr250 = "javax.annotation" % "jsr250-api" % "1.0"
+ lazy val dist = deployTask(info, deployPath) dependsOn(`package`) describedAs("Deploying")
+ }
+
+ class AkkaSamplesParentProject(info: ProjectInfo) extends ParentProject(info) {
+ lazy val akka_sample_chat = project("akka-sample-chat", "akka-sample-chat", new AkkaSampleChatProject(_), akka_kernel)
+ lazy val akka_sample_lift = project("akka-sample-lift", "akka-sample-lift", new AkkaSampleLiftProject(_), akka_kernel)
+ lazy val akka_sample_rest_java = project("akka-sample-rest-java", "akka-sample-rest-java", new AkkaSampleRestJavaProject(_), akka_kernel)
+ lazy val akka_sample_rest_scala = project("akka-sample-rest-scala", "akka-sample-rest-scala", new AkkaSampleRestScalaProject(_), akka_kernel)
+ lazy val akka_sample_camel = project("akka-sample-camel", "akka-sample-camel", new AkkaSampleCamelProject(_), akka_kernel)
+ lazy val akka_sample_security = project("akka-sample-security", "akka-sample-security", new AkkaSampleSecurityProject(_), akka_kernel)
+ }
+
+ // ------------------------------------------------------------
+ // helper functions
+ def removeDupEntries(paths: PathFinder) =
+ Path.lazyPathFinder {
+ val mapped = paths.get map { p => (p.relativePath, p) }
+ (Map() ++ mapped).values.toList
+ }
+
+ def allArtifacts = {
+ (removeDupEntries(runClasspath filter ClasspathUtilities.isArchive) +++
+ ((outputPath ##) / defaultJarName) +++
+ mainResources +++
+ mainDependencies.scalaJars +++
+ descendents(info.projectPath, "*.conf") +++
+ descendents(info.projectPath / "dist", "*.jar") +++
+ descendents(info.projectPath / "deploy", "*.jar") +++
+ descendents(path("lib") ##, "*.jar") +++
+ descendents(configurationPath(Configurations.Compile) ##, "*.jar"))
+ .filter(jar =>
+ !jar.toString.endsWith("scala-library-2.7.5.jar") && // remove redundant scala libs
+ !jar.toString.endsWith("scala-library-2.7.6.jar"))
+ }
+
+ def deployTask(info: ProjectInfo, toDir: Path) = task {
+ val projectPath = info.projectPath.toString
+ val moduleName = projectPath.substring(projectPath.lastIndexOf(System.getProperty("file.separator")) + 1, projectPath.length)
+ // FIXME need to find out a way to grab these paths from the sbt system
+ val JAR_FILE_NAME = moduleName + "_%s-%s.jar".format(defScalaVersion.value, version)
+ val JAR_FILE_PATH = projectPath + "/target/scala_%s/".format(defScalaVersion.value) + JAR_FILE_NAME
+
+ val from = Path.fromFile(new java.io.File(JAR_FILE_PATH))
+ val to = Path.fromFile(new java.io.File(toDir + "/" + JAR_FILE_NAME))
+ log.info("Deploying " + to)
+ FileUtilities.copyFile(from, to, log)
+ }
+}
diff --git a/scripts/run_akka.sh b/scripts/run_akka.sh
new file mode 100755
index 0000000000..c07397adeb
--- /dev/null
+++ b/scripts/run_akka.sh
@@ -0,0 +1,16 @@
+#!/bin/bash
+cd $AKKA_HOME
+VERSION=akka_2.7.7-0.7-SNAPSHOT
+TARGET_DIR=dist/$1
+shift 1
+VMARGS=$@
+
+if [ -d $TARGET_DIR ]; then
+ cd $TARGET_DIR
+else
+ unzip dist/${VERSION}.zip -d $TARGET_DIR
+ cd $TARGET_DIR
+fi
+
+export AKKA_HOME=`pwd`
+java -jar ${VMARGS} ${VERSION}.jar
\ No newline at end of file