diff --git a/.gitignore b/.gitignore index 22379bef4c..69dd6d55c9 100755 --- a/.gitignore +++ b/.gitignore @@ -1,5 +1,9 @@ *~ *# +project/boot/* +*/project/build/target +*/project/boot +lib_managed etags TAGS reports diff --git a/akka-amqp/pom.xml b/akka-amqp/pom.xml deleted file mode 100644 index aa569958a6..0000000000 --- a/akka-amqp/pom.xml +++ /dev/null @@ -1,29 +0,0 @@ - - 4.0.0 - - akka-amqp - Akka AMQP Module - - jar - - - akka - se.scalablesolutions.akka - 0.7-SNAPSHOT - - - - - akka-core - ${project.groupId} - ${project.version} - - - com.rabbitmq - amqp-client - 1.7.0 - - - - diff --git a/akka-camel/src/main/resources/META-INF/services/org/apache/camel/component/actor b/akka-camel/src/main/resources/META-INF/services/org/apache/camel/component/actor new file mode 100644 index 0000000000..a2141db8a9 --- /dev/null +++ b/akka-camel/src/main/resources/META-INF/services/org/apache/camel/component/actor @@ -0,0 +1 @@ +class=se.scalablesolutions.akka.camel.component.ActorComponent \ No newline at end of file diff --git a/akka-camel/src/main/scala/CamelContextLifecycle.scala b/akka-camel/src/main/scala/CamelContextLifecycle.scala new file mode 100644 index 0000000000..b9a696207c --- /dev/null +++ b/akka-camel/src/main/scala/CamelContextLifecycle.scala @@ -0,0 +1,95 @@ +/** + * Copyright (C) 2009-2010 Scalable Solutions AB + */ + +package se.scalablesolutions.akka.camel + +import org.apache.camel.{ProducerTemplate, CamelContext} +import org.apache.camel.impl.DefaultCamelContext + +import se.scalablesolutions.akka.util.Logging + +/** + * Defines the lifecycle of a CamelContext. Allowed state transitions are + * init -> start -> stop -> init -> ... etc. + * + * @author Martin Krasser + */ +trait CamelContextLifecycle extends Logging { + // TODO: enforce correct state transitions + // valid: init -> start -> stop -> init ... + + private var _context: CamelContext = _ + private var _template: ProducerTemplate = _ + + private var _initialized = false + private var _started = false + + /** + * Returns the managed CamelContext. + */ + protected def context: CamelContext = _context + + /** + * Returns the managed ProducerTemplate. + */ + protected def template: ProducerTemplate = _template + + /** + * Sets the managed CamelContext. + */ + protected def context_= (context: CamelContext) { _context = context } + + /** + * Sets the managed ProducerTemplate. + */ + protected def template_= (template: ProducerTemplate) { _template = template } + + def initialized = _initialized + def started = _started + + /** + * Starts the CamelContext and ProducerTemplate. + */ + def start = { + context.start + template.start + _started = true + log.info("Camel context started") + } + + /** + * Stops the CamelContext and ProducerTemplate. + */ + def stop = { + template.stop + context.stop + _initialized = false + _started = false + log.info("Camel context stopped") + } + + /** + * Initializes this lifecycle object with the a DefaultCamelContext. + */ + def init: Unit = init(new DefaultCamelContext) + + /** + * Initializes this lifecycle object with the given CamelContext. + */ + def init(context: CamelContext) { + this.context = context + this.template = context.createProducerTemplate + _initialized = true + log.info("Camel context initialized") + } +} + +/** + * Makes a global CamelContext and ProducerTemplate accessible to applications. The lifecycle + * of these objects is managed by se.scalablesolutions.akka.camel.service.CamelService. + */ +object CamelContextManager extends CamelContextLifecycle { + override def context: CamelContext = super.context + override def template: ProducerTemplate = super.template +} \ No newline at end of file diff --git a/akka-camel/src/main/scala/Consumer.scala b/akka-camel/src/main/scala/Consumer.scala new file mode 100644 index 0000000000..27ec98b25d --- /dev/null +++ b/akka-camel/src/main/scala/Consumer.scala @@ -0,0 +1,20 @@ +/** + * Copyright (C) 2009-2010 Scalable Solutions AB + */ + +package se.scalablesolutions.akka.camel + +import se.scalablesolutions.akka.actor.Actor + +/** + * Mixed in by Actor implementations that consume message from Camel endpoints. + * + * @author Martin Krasser + */ +trait Consumer { self: Actor => + + /** + * Returns the Camel endpoint URI to consume messages from. + */ + def endpointUri: String +} \ No newline at end of file diff --git a/akka-camel/src/main/scala/Message.scala b/akka-camel/src/main/scala/Message.scala new file mode 100644 index 0000000000..8e0156c669 --- /dev/null +++ b/akka-camel/src/main/scala/Message.scala @@ -0,0 +1,249 @@ +/** + * Copyright (C) 2009-2010 Scalable Solutions AB + */ + +package se.scalablesolutions.akka.camel + +import org.apache.camel.{Exchange, Message => CamelMessage} +import org.apache.camel.util.ExchangeHelper + +import scala.collection.jcl.{Map => MapWrapper} + +/** + * An immutable representation of a Camel message. Actor classes that mix in + * se.scalablesolutions.akka.camel.Producer or + * se.scalablesolutions.akka.camel.Consumer use this message type for communication. + * + * @author Martin Krasser + */ +case class Message(val body: Any, val headers: Map[String, Any]) { + /** + * Creates a message with a body and an empty header map. + */ + def this(body: Any) = this(body, Map.empty) + + /** + * Returns the body of the message converted to the type given by the clazz + * argument. Conversion is done using Camel's type converter. The type converter is obtained + * from the CamelContext managed by CamelContextManager. Applications have to ensure proper + * initialization of CamelContextManager. + * + * @see CamelContextManager. + */ + def bodyAs[T](clazz: Class[T]): T = + CamelContextManager.context.getTypeConverter.mandatoryConvertTo[T](clazz, body) + + /** + * Returns those headers from this message whose name is contained in names. + */ + def headers(names: Set[String]): Map[String, Any] = headers.filter(names contains _._1) + + /** + * Creates a Message with a new body using a transformer function. + */ + def transformBody[A](transformer: A => Any): Message = setBody(transformer(body.asInstanceOf[A])) + + /** + * Creates a Message with a new body converted to type clazz. + * + * @see Message#bodyAs(Class) + */ + def setBodyAs[T](clazz: Class[T]): Message = setBody(bodyAs(clazz)) + + /** + * Creates a Message with a new body. + */ + def setBody(body: Any) = new Message(body, this.headers) + + /** + * Creates a new Message with new headers. + */ + def setHeaders(headers: Map[String, Any]) = new Message(this.body, headers) + + /** + * Creates a new Message with the headers argument added to the existing headers. + */ + def addHeaders(headers: Map[String, Any]) = new Message(this.body, this.headers ++ headers) + + /** + * Creates a new Message with the header argument added to the existing headers. + */ + def addHeader(header: (String, Any)) = new Message(this.body, this.headers + header) + + /** + * Creates a new Message where the header with name headerName is removed from + * the existing headers. + */ + def removeHeader(headerName: String) = new Message(this.body, this.headers - headerName) +} + +/** + * Companion object of Message class. + * + * @author Martin Krasser + */ +object Message { + + /** + * Message header to correlate request with response messages. Applications that send + * messages to a Producer actor may want to set this header on the request message + * so that it can be correlated with an asynchronous response. Messages send to Consumer + * actors have this header already set. + */ + val MessageExchangeId = "MessageExchangeId".intern + + /** + * Creates a new Message with body as message body and an empty header map. + */ + def apply(body: Any) = new Message(body) + + /** + * Creates a canonical form of the given message msg. If msg of type + * Message then msg is returned, otherwise msg is set as body of a + * newly created Message object. + */ + def canonicalize(msg: Any) = msg match { + case mobj: Message => mobj + case body => new Message(body) + } +} + +/** + * An immutable representation of a failed Camel exchange. It contains the failure cause + * obtained from Exchange.getException and the headers from either the Exchange.getIn + * message or Exchange.getOut message, depending on the exchange pattern. + * + * @author Martin Krasser + */ +case class Failure(val cause: Exception, val headers: Map[String, Any]) + +/** + * Adapter for converting an org.apache.camel.Exchange to and from Message and Failure objects. + * + * @author Martin Krasser + */ +class CamelExchangeAdapter(exchange: Exchange) { + + import CamelMessageConversion.toMessageAdapter + + /** + * Sets Exchange.getIn from the given Message object. + */ + def fromRequestMessage(msg: Message): Exchange = { requestMessage.fromMessage(msg); exchange } + + /** + * Depending on the exchange pattern, sets Exchange.getIn or Exchange.getOut from the given + * Message object. If the exchange is out-capable then the Exchange.getOut is set, otherwise + * Exchange.getIn. + */ + def fromResponseMessage(msg: Message): Exchange = { responseMessage.fromMessage(msg); exchange } + + /** + * Sets Exchange.getException from the given Failure message. Headers of the Failure message + * are ignored. + */ + def fromFailureMessage(msg: Failure): Exchange = { exchange.setException(msg.cause); exchange } + + /** + * Creates a Message object from Exchange.getIn. + */ + def toRequestMessage: Message = toRequestMessage(Map.empty) + + /** + * Depending on the exchange pattern, creates a Message object from Exchange.getIn or Exchange.getOut. + * If the exchange is out-capable then the Exchange.getOut is set, otherwise Exchange.getIn. + */ + def toResponseMessage: Message = toResponseMessage(Map.empty) + + /** + * Creates a Failure object from the adapted Exchange. + * + * @see Failure + */ + def toFailureMessage: Failure = toFailureMessage(Map.empty) + + /** + * Creates a Message object from Exchange.getIn. + * + * @param headers additional headers to set on the created Message in addition to those + * in the Camel message. + */ + def toRequestMessage(headers: Map[String, Any]): Message = requestMessage.toMessage(headers) + + /** + * Depending on the exchange pattern, creates a Message object from Exchange.getIn or Exchange.getOut. + * If the exchange is out-capable then the Exchange.getOut is set, otherwise Exchange.getIn. + * + * @param headers additional headers to set on the created Message in addition to those + * in the Camel message. + */ + def toResponseMessage(headers: Map[String, Any]): Message = responseMessage.toMessage(headers) + + /** + * Creates a Failure object from the adapted Exchange. + * + * @param headers additional headers to set on the created Message in addition to those + * in the Camel message. + * + * @see Failure + */ + def toFailureMessage(headers: Map[String, Any]): Failure = + Failure(exchange.getException, headers ++ responseMessage.toMessage.headers) + + private def requestMessage = exchange.getIn + + private def responseMessage = ExchangeHelper.getResultMessage(exchange) + +} + +/** + * Adapter for converting an org.apache.camel.Message to and from Message objects. + * + * @author Martin Krasser + */ +class CamelMessageAdapter(val cm: CamelMessage) { + /** + * Set the adapted Camel message from the given Message object. + */ + def fromMessage(m: Message): CamelMessage = { + cm.setBody(m.body) + for (h <- m.headers) cm.getHeaders.put(h._1, h._2.asInstanceOf[AnyRef]) + cm + } + + /** + * Creates a new Message object from the adapted Camel message. + */ + def toMessage: Message = toMessage(Map.empty) + + /** + * Creates a new Message object from the adapted Camel message. + * + * @param headers additional headers to set on the created Message in addition to those + * in the Camel message. + */ + def toMessage(headers: Map[String, Any]): Message = Message(cm.getBody, cmHeaders(headers, cm)) + + private def cmHeaders(headers: Map[String, Any], cm: CamelMessage) = + headers ++ MapWrapper[String, AnyRef](cm.getHeaders).elements +} + +/** + * Defines conversion methods to CamelExchangeAdapter and CamelMessageAdapter. + * Imported by applications + * that implicitly want to use conversion methods of CamelExchangeAdapter and CamelMessageAdapter. + */ +object CamelMessageConversion { + + /** + * Creates an CamelExchangeAdapter for the given Camel exchange. + */ + implicit def toExchangeAdapter(ce: Exchange): CamelExchangeAdapter = + new CamelExchangeAdapter(ce) + + /** + * Creates an CamelMessageAdapter for the given Camel message. + */ + implicit def toMessageAdapter(cm: CamelMessage): CamelMessageAdapter = + new CamelMessageAdapter(cm) +} \ No newline at end of file diff --git a/akka-camel/src/main/scala/Producer.scala b/akka-camel/src/main/scala/Producer.scala new file mode 100644 index 0000000000..f4cafa2b2e --- /dev/null +++ b/akka-camel/src/main/scala/Producer.scala @@ -0,0 +1,211 @@ +/** + * Copyright (C) 2009-2010 Scalable Solutions AB + */ + +package se.scalablesolutions.akka.camel + +import CamelMessageConversion.toExchangeAdapter + +import org.apache.camel.{Processor, ExchangePattern, Exchange, ProducerTemplate} +import org.apache.camel.impl.DefaultExchange +import org.apache.camel.spi.Synchronization + +import se.scalablesolutions.akka.actor.Actor +import se.scalablesolutions.akka.dispatch.CompletableFuture +import se.scalablesolutions.akka.util.Logging + +/** + * Mixed in by Actor implementations that produce messages to Camel endpoints. + * + * @author Martin Krasser + */ +trait Producer { self: Actor => + + private val headersToCopyDefault = Set(Message.MessageExchangeId) + + /** + * If set to true (default), communication with the Camel endpoint is done via the Camel + * Async API. Camel then processes the + * message in a separate thread. If set to false, the actor thread is blocked until Camel + * has finished processing the produced message. + */ + def async: Boolean = true + + /** + * If set to false (default), this producer expects a response message from the Camel endpoint. + * If set to true, this producer communicates with the Camel endpoint with an in-only message + * exchange pattern (fire and forget). + */ + def oneway: Boolean = false + + /** + * Returns the Camel endpoint URI to produce messages to. + */ + def endpointUri: String + + /** + * Returns the names of message headers to copy from a request message to a response message. + * By default only the Message.MessageExchangeId is copied. Applications may override this to + * define an application-specific set of message headers to copy. + */ + def headersToCopy: Set[String] = headersToCopyDefault + + /** + * Returns the producer template from the CamelContextManager. Applications either have to ensure + * proper initialization of CamelContextManager or override this method. + * + * @see CamelContextManager. + */ + protected def template: ProducerTemplate = CamelContextManager.template + + /** + * Initiates a one-way (in-only) message exchange to the Camel endpoint given by + * endpointUri. This method blocks until Camel finishes processing + * the message exchange. + * + * @param msg: the message to produce. The message is converted to its canonical + * representation via Message.canonicalize. + */ + protected def produceOneway(msg: Any): Unit = + template.send(endpointUri, createInOnlyExchange.fromRequestMessage(Message.canonicalize(msg))) + + /** + * Initiates a one-way (in-only) message exchange to the Camel endpoint given by + * endpointUri. This method triggers asynchronous processing of the + * message exchange by Camel. + * + * @param msg: the message to produce. The message is converted to its canonical + * representation via Message.canonicalize. + */ + protected def produceOnewayAsync(msg: Any): Unit = + template.asyncSend( + endpointUri, createInOnlyExchange.fromRequestMessage(Message.canonicalize(msg))) + + /** + * Initiates a two-way (in-out) message exchange to the Camel endpoint given by + * endpointUri. This method blocks until Camel finishes processing + * the message exchange. + * + * @param msg: the message to produce. The message is converted to its canonical + * representation via Message.canonicalize. + * @return either a response Message or a Failure object. + */ + protected def produce(msg: Any): Any = { + val cmsg = Message.canonicalize(msg) + val requestProcessor = new Processor() { + def process(exchange: Exchange) = exchange.fromRequestMessage(cmsg) + } + val result = template.request(endpointUri, requestProcessor) + if (result.isFailed) result.toFailureMessage(cmsg.headers(headersToCopy)) + else result.toResponseMessage(cmsg.headers(headersToCopy)) + } + + /** + * Initiates a two-way (in-out) message exchange to the Camel endpoint given by + * endpointUri. This method triggers asynchronous processing of the + * message exchange by Camel. The response message is returned asynchronously to + * the original sender (or sender future). + * + * @param msg: the message to produce. The message is converted to its canonical + * representation via Message.canonicalize. + * @return either a response Message or a Failure object. + * @see ProducerResponseSender + */ + protected def produceAsync(msg: Any): Unit = { + val cmsg = Message.canonicalize(msg) + val sync = new ProducerResponseSender( + cmsg.headers(headersToCopy), this.sender, this.senderFuture, this) + template.asyncCallback(endpointUri, createInOutExchange.fromRequestMessage(cmsg), sync) + } + + /** + * Default implementation for Actor.receive. Implementors may choose to + * def receive = produce. This partial function calls one of + * the protected produce methods depending on the return values of + * oneway and async. + */ + protected def produce: PartialFunction[Any, Unit] = { + case msg => { + if ( oneway && !async) produceOneway(msg) + else if ( oneway && async) produceOnewayAsync(msg) + else if (!oneway && !async) reply(produce(msg)) + else /*(!oneway && async)*/ produceAsync(msg) + } + } + + /** + * Creates a new in-only Exchange. + */ + protected def createInOnlyExchange: Exchange = createExchange(ExchangePattern.InOnly) + + /** + * Creates a new in-out Exchange. + */ + protected def createInOutExchange: Exchange = createExchange(ExchangePattern.InOut) + + /** + * Creates a new Exchange with given pattern from the CamelContext managed by + * CamelContextManager. Applications either have to ensure proper initialization + * of CamelContextManager or override this method. + * + * @see CamelContextManager. + */ + protected def createExchange(pattern: ExchangePattern): Exchange = + new DefaultExchange(CamelContextManager.context, pattern) +} + +/** + * Synchronization object that sends responses asynchronously to initial senders. This + * class is used by Producer for asynchronous two-way messaging with a Camel endpoint. + * + * @author Martin Krasser + */ +class ProducerResponseSender( + headers: Map[String, Any], + sender: Option[Actor], + senderFuture: Option[CompletableFuture], + producer: Actor) extends Synchronization with Logging { + + implicit val producerActor = Some(producer) // the response sender + + /** + * Replies a Failure message, created from the given exchange, to sender (or + * senderFuture if applicable). + */ + def onFailure(exchange: Exchange) = reply(exchange.toFailureMessage(headers)) + + /** + * Replies a response Message, created from the given exchange, to sender (or + * senderFuture if applicable). + */ + def onComplete(exchange: Exchange) = reply(exchange.toResponseMessage(headers)) + + private def reply(message: Any) = { + sender match { + case Some(actor) => actor ! message + case None => senderFuture match { + case Some(future) => future.completeWithResult(message) + case None => log.warning("No destination for sending response") + } + } + } +} + +/** + * A one-way producer. + * + * @author Martin Krasser + */ +trait Oneway extends Producer { self: Actor => + override def oneway = true +} + +/** + * A synchronous producer. + * + * @author Martin Krasser + */ +trait Sync extends Producer { self: Actor => + override def async = false +} + diff --git a/akka-camel/src/main/scala/component/ActorComponent.scala b/akka-camel/src/main/scala/component/ActorComponent.scala new file mode 100644 index 0000000000..763f9dd017 --- /dev/null +++ b/akka-camel/src/main/scala/component/ActorComponent.scala @@ -0,0 +1,152 @@ +/** + * Copyright (C) 2009-2010 Scalable Solutions AB + */ + +package se.scalablesolutions.akka.camel.component + +import java.lang.{RuntimeException, String} +import java.util.{Map => JavaMap} +import java.util.concurrent.TimeoutException + +import org.apache.camel.{Exchange, Consumer, Processor} +import org.apache.camel.impl.{DefaultProducer, DefaultEndpoint, DefaultComponent} + +import se.scalablesolutions.akka.actor.{ActorRegistry, Actor} +import se.scalablesolutions.akka.camel.{Failure, CamelMessageConversion, Message} + +/** + * Camel component for sending messages to and receiving replies from actors. + * + * @see se.scalablesolutions.akka.camel.component.ActorEndpoint + * @see se.scalablesolutions.akka.camel.component.ActorProducer + * + * @author Martin Krasser + */ +class ActorComponent extends DefaultComponent { + def createEndpoint(uri: String, remaining: String, parameters: JavaMap[String, Object]): ActorEndpoint = { + val idAndUuid = idAndUuidPair(remaining) + new ActorEndpoint(uri, this, idAndUuid._1, idAndUuid._2) + } + + private def idAndUuidPair(remaining: String): Tuple2[Option[String], Option[String]] = { + remaining split ":" toList match { + case id :: Nil => (Some(id), None) + case "id" :: id :: Nil => (Some(id), None) + case "uuid" :: uuid :: Nil => (None, Some(uuid)) + case _ => throw new IllegalArgumentException( + "invalid path format: %s - should be or id: or uuid:" format remaining) + } + } +} + +/** + * Camel endpoint for referencing an actor. The actor reference is given by the endpoint URI. + * An actor can be referenced by its Actor.getId or its Actor.uuid. + * Supported endpoint URI formats are + * actor:<actorid>, + * actor:id:<actorid> and + * actor:uuid:<actoruuid>. + * + * @see se.scalablesolutions.akka.camel.component.ActorComponent + * @see se.scalablesolutions.akka.camel.component.ActorProducer + + * @author Martin Krasser + */ +class ActorEndpoint(uri: String, + comp: ActorComponent, + val id: Option[String], + val uuid: Option[String]) extends DefaultEndpoint(uri, comp) { + + /** + * @throws UnsupportedOperationException + */ + def createConsumer(processor: Processor): Consumer = + throw new UnsupportedOperationException("actor consumer not supported yet") + + /** + * Creates a new ActorProducer instance initialized with this endpoint. + */ + def createProducer: ActorProducer = new ActorProducer(this) + + /** + * Returns true. + */ + def isSingleton: Boolean = true +} + +/** + * Sends the in-message of an exchange to an actor. If the exchange pattern is out-capable, + * the producer waits for a reply (using the !! operator), otherwise the ! operator is used + * for sending the message. + * + * @see se.scalablesolutions.akka.camel.component.ActorComponent + * @see se.scalablesolutions.akka.camel.component.ActorEndpoint + * + * @author Martin Krasser + */ +class ActorProducer(val ep: ActorEndpoint) extends DefaultProducer(ep) { + import CamelMessageConversion.toExchangeAdapter + + implicit val sender = None + + /** + * Depending on the exchange pattern, this method either calls processInOut or + * processInOnly for interacting with an actor. This methods looks up the actor + * from the ActorRegistry according to this producer's endpoint URI. + * + * @param exchange represents the message exchange with the actor. + */ + def process(exchange: Exchange) { + val actor = target getOrElse (throw new ActorNotRegisteredException(ep.getEndpointUri)) + if (exchange.getPattern.isOutCapable) processInOut(exchange, actor) + else processInOnly(exchange, actor) + } + + /** + * Send the exchange in-message to the given actor using the ! operator. The message + * send to the actor is of type se.scalablesolutions.akka.camel.Message. + */ + protected def processInOnly(exchange: Exchange, actor: Actor): Unit = + actor ! exchange.toRequestMessage(Map(Message.MessageExchangeId -> exchange.getExchangeId)) + + /** + * Send the exchange in-message to the given actor using the !! operator. The exchange + * out-message is populated from the actor's reply message. The message sent to the + * actor is of type se.scalablesolutions.akka.camel.Message. + */ + protected def processInOut(exchange: Exchange, actor: Actor) { + val header = Map(Message.MessageExchangeId -> exchange.getExchangeId) + val result: Any = actor !! exchange.toRequestMessage(header) + + result match { + case Some(msg: Failure) => exchange.fromFailureMessage(msg) + case Some(msg) => exchange.fromResponseMessage(Message.canonicalize(msg)) + case None => { + throw new TimeoutException("timeout (%d ms) while waiting response from %s" + format (actor.timeout, ep.getEndpointUri)) + } + } + } + + private def target: Option[Actor] = + if (ep.id.isDefined) targetById(ep.id.get) + else targetByUuid(ep.uuid.get) + + private def targetById(id: String) = ActorRegistry.actorsFor(id) match { + case Nil => None + case actor :: Nil => Some(actor) + case actors => Some(actors.first) + } + + private def targetByUuid(uuid: String) = ActorRegistry.actorFor(uuid) +} + +/** + * Thrown to indicate that an actor referenced by an endpoint URI cannot be + * found in the ActorRegistry. + * + * @author Martin Krasser + */ +class ActorNotRegisteredException(uri: String) extends RuntimeException { + override def getMessage = "%s not registered" format uri +} \ No newline at end of file diff --git a/akka-camel/src/main/scala/service/CamelService.scala b/akka-camel/src/main/scala/service/CamelService.scala new file mode 100644 index 0000000000..86b4f2dc23 --- /dev/null +++ b/akka-camel/src/main/scala/service/CamelService.scala @@ -0,0 +1,89 @@ +/** + * Copyright (C) 2009-2010 Scalable Solutions AB + */ + +package se.scalablesolutions.akka.camel.service + +import se.scalablesolutions.akka.actor.ActorRegistry +import se.scalablesolutions.akka.camel.CamelContextManager +import se.scalablesolutions.akka.util.{Bootable, Logging} + +/** + * Used by applications (and the Kernel) to publish consumer actors via Camel + * endpoints and to manage the life cycle of a a global CamelContext which can + * be accessed via se.scalablesolutions.akka.camel.CamelContextManager. + * + * @author Martin Krasser + */ +trait CamelService extends Bootable with Logging { + + import se.scalablesolutions.akka.actor.Actor.Sender.Self + import CamelContextManager._ + + private[camel] val consumerPublisher = new ConsumerPublisher + private[camel] val publishRequestor = new PublishRequestor(consumerPublisher) + + /** + * Starts the CamelService. Any started actor that is a consumer actor will be (asynchronously) + * published as Camel endpoint. Consumer actors that are started after this method returned will + * be published as well. Actor publishing is done asynchronously. + */ + abstract override def onLoad = { + super.onLoad + + // Only init and start if not already done by application + if (!initialized) init + if (!started) start + + // Camel should cache input streams + context.setStreamCaching(true) + + // start actor that exposes consumer actors via Camel endpoints + consumerPublisher.start + + // add listener for actor registration events + ActorRegistry.addRegistrationListener(publishRequestor.start) + + // publish already registered consumer actors + for (publish <- Publish.forConsumers(ActorRegistry.actors)) consumerPublisher ! publish + } + + /** + * Stops the CamelService. + */ + abstract override def onUnload = { + ActorRegistry.removeRegistrationListener(publishRequestor) + publishRequestor.stop + consumerPublisher.stop + stop + super.onUnload + } + + /** + * Starts the CamelService. + * + * @see onLoad + */ + def load = onLoad + + /** + * Stops the CamelService. + * + * @see onUnload + */ + def unload = onUnload +} + +/** + * CamelService companion object used by standalone applications to create their own + * CamelService instance. + * + * @author Martin Krasser + */ +object CamelService { + + /** + * Creates a new CamelService instance. + */ + def newInstance: CamelService = new CamelService {} +} diff --git a/akka-camel/src/main/scala/service/ConsumerPublisher.scala b/akka-camel/src/main/scala/service/ConsumerPublisher.scala new file mode 100644 index 0000000000..a6509e2694 --- /dev/null +++ b/akka-camel/src/main/scala/service/ConsumerPublisher.scala @@ -0,0 +1,135 @@ +/** + * Copyright (C) 2009-2010 Scalable Solutions AB + */ +package se.scalablesolutions.akka.camel.service + +import java.io.InputStream +import java.util.concurrent.CountDownLatch + +import org.apache.camel.builder.RouteBuilder + +import se.scalablesolutions.akka.actor.{ActorUnregistered, ActorRegistered, Actor} +import se.scalablesolutions.akka.actor.annotation.consume +import se.scalablesolutions.akka.camel.{Consumer, CamelContextManager} +import se.scalablesolutions.akka.util.Logging + +/** + * Actor that publishes consumer actors as Camel endpoints at the CamelContext managed + * by se.scalablesolutions.akka.camel.CamelContextManager. It accepts messages of type + * se.scalablesolutions.akka.camel.service.Publish. + * + * @author Martin Krasser + */ +class ConsumerPublisher extends Actor with Logging { + @volatile private var latch = new CountDownLatch(0) + + /** + * Adds a route to the actor identified by a Publish message to the global CamelContext. + */ + protected def receive = { + case p: Publish => publish(new ConsumerRoute(p.endpointUri, p.id, p.uuid)) + case _ => { /* ignore */} + } + + /** + * Sets the number of expected Publish messages received by this actor. Used for testing + * only. + */ + private[camel] def expectPublishCount(count: Int): Unit = latch = new CountDownLatch(count) + + /** + * Waits for the number of expected Publish messages to arrive. Used for testing only. + */ + private[camel] def awaitPublish = latch.await + + private def publish(route: ConsumerRoute) { + CamelContextManager.context.addRoutes(route) + log.info("published actor via endpoint %s" format route.endpointUri) + latch.countDown // needed for testing only. + } +} + +/** + * Defines the route to a consumer actor. + * + * @param endpointUri endpoint URI of the consumer actor + * @param id actor identifier + * @param uuid true if id refers to Actor.uuid, false if + * id refers to Acotr.getId. + * + * @author Martin Krasser + */ +class ConsumerRoute(val endpointUri: String, id: String, uuid: Boolean) extends RouteBuilder { + // TODO: make conversions configurable + private val bodyConversions = Map( + "file" -> classOf[InputStream] + ) + + def configure = { + val schema = endpointUri take endpointUri.indexOf(":") // e.g. "http" from "http://whatever/..." + bodyConversions.get(schema) match { + case Some(clazz) => from(endpointUri).convertBodyTo(clazz).to(actorUri) + case None => from(endpointUri).to(actorUri) + } + } + + private def actorUri = (if (uuid) "actor:uuid:%s" else "actor:id:%s") format id +} + +/** + * A registration listener that publishes consumer actors (and ignores other actors). + * + * @author Martin Krasser + */ +class PublishRequestor(consumerPublisher: Actor) extends Actor { + protected def receive = { + case ActorUnregistered(actor) => { /* ignore */ } + case ActorRegistered(actor) => Publish.forConsumer(actor) match { + case Some(publish) => consumerPublisher ! publish + case None => { /* ignore */ } + } + } +} + +/** + * Request message for publishing a consumer actor. + * + * @param endpointUri endpoint URI of the consumer actor + * @param id actor identifier + * @param uuid true if id refers to Actor.uuid, false if + * id refers to Acotr.getId. + * + * @author Martin Krasser + */ +case class Publish(endpointUri: String, id: String, uuid: Boolean) + +/** + * @author Martin Krasser + */ +object Publish { + + /** + * Creates a list of Publish request messages for all consumer actors in the actors + * list. + */ + def forConsumers(actors: List[Actor]): List[Publish] = + for (actor <- actors; pub = forConsumer(actor); if pub.isDefined) yield pub.get + + /** + * Creates a Publish request message if actor is a consumer actor. + */ + def forConsumer(actor: Actor): Option[Publish] = + forConsumeAnnotated(actor) orElse forConsumerType(actor) + + private def forConsumeAnnotated(actor: Actor): Option[Publish] = { + val annotation = actor.getClass.getAnnotation(classOf[consume]) + if (annotation eq null) None + else if (actor._remoteAddress.isDefined) None // do not publish proxies + else Some(Publish(annotation.value, actor.getId, false)) + } + + private def forConsumerType(actor: Actor): Option[Publish] = + if (!actor.isInstanceOf[Consumer]) None + else if (actor._remoteAddress.isDefined) None + else Some(Publish(actor.asInstanceOf[Consumer].endpointUri, actor.uuid, true)) +} diff --git a/akka-camel/src/test/scala/CamelContextLifecycleTest.scala b/akka-camel/src/test/scala/CamelContextLifecycleTest.scala new file mode 100644 index 0000000000..b61db27878 --- /dev/null +++ b/akka-camel/src/test/scala/CamelContextLifecycleTest.scala @@ -0,0 +1,23 @@ +package se.scalablesolutions.akka.camel + +import org.apache.camel.impl.{DefaultProducerTemplate, DefaultCamelContext} +import org.junit.Test +import org.scalatest.junit.JUnitSuite + +class CamelContextLifecycleTest extends JUnitSuite with CamelContextLifecycle { + @Test def shouldManageCustomCamelContext { + assert(context === null) + assert(template === null) + init(new TestCamelContext) + assert(!context.asInstanceOf[TestCamelContext].isStarted) + assert(!template.asInstanceOf[DefaultProducerTemplate].isStarted) + start + assert(context.asInstanceOf[TestCamelContext].isStarted) + assert(template.asInstanceOf[DefaultProducerTemplate].isStarted) + stop + assert(!context.asInstanceOf[TestCamelContext].isStarted) + assert(!template.asInstanceOf[DefaultProducerTemplate].isStarted) + } + + class TestCamelContext extends DefaultCamelContext +} \ No newline at end of file diff --git a/akka-camel/src/test/scala/CamelExchangeAdapterTest.scala b/akka-camel/src/test/scala/CamelExchangeAdapterTest.scala new file mode 100644 index 0000000000..cb3e0bde29 --- /dev/null +++ b/akka-camel/src/test/scala/CamelExchangeAdapterTest.scala @@ -0,0 +1,109 @@ +package se.scalablesolutions.akka.camel + +import org.apache.camel.impl.{DefaultCamelContext, DefaultExchange} +import org.apache.camel.ExchangePattern +import org.junit.Test +import org.scalatest.junit.JUnitSuite + +class CamelExchangeAdapterTest extends JUnitSuite { + import CamelMessageConversion.toExchangeAdapter + + @Test def shouldSetInMessageFromRequestMessage = { + val e1 = sampleInOnly.fromRequestMessage(Message("x")) + assert(e1.getIn.getBody === "x") + val e2 = sampleInOut.fromRequestMessage(Message("y")) + assert(e2.getIn.getBody === "y") + } + + @Test def shouldSetOutMessageFromResponseMessage = { + val e1 = sampleInOut.fromResponseMessage(Message("y")) + assert(e1.getOut.getBody === "y") + } + + @Test def shouldSetInMessageFromResponseMessage = { + val e1 = sampleInOnly.fromResponseMessage(Message("x")) + assert(e1.getIn.getBody === "x") + } + + @Test def shouldSetExceptionFromFailureMessage = { + val e1 = sampleInOnly.fromFailureMessage(Failure(new Exception("test1"), Map.empty)) + assert(e1.getException.getMessage === "test1") + val e2 = sampleInOut.fromFailureMessage(Failure(new Exception("test2"), Map.empty)) + assert(e2.getException.getMessage === "test2") + } + + @Test def shouldCreateRequestMessageFromInMessage = { + val m = sampleInOnly.toRequestMessage + assert(m === Message("test-in", Map("key-in" -> "val-in"))) + } + + @Test def shouldCreateResponseMessageFromInMessage = { + val m = sampleInOnly.toResponseMessage + assert(m === Message("test-in", Map("key-in" -> "val-in"))) + } + + @Test def shouldCreateResponseMessageFromOutMessage = { + val m = sampleInOut.toResponseMessage + assert(m === Message("test-out", Map("key-out" -> "val-out"))) + } + + @Test def shouldCreateFailureMessageFromExceptionAndInMessage = { + val e1 = sampleInOnly + e1.setException(new Exception("test1")) + assert(e1.toFailureMessage.cause.getMessage === "test1") + assert(e1.toFailureMessage.headers("key-in") === "val-in") + } + + @Test def shouldCreateFailureMessageFromExceptionAndOutMessage = { + val e1 = sampleInOut + e1.setException(new Exception("test2")) + assert(e1.toFailureMessage.cause.getMessage === "test2") + assert(e1.toFailureMessage.headers("key-out") === "val-out") + } + + @Test def shouldCreateRequestMessageFromInMessageWithAdditionalHeader = { + val m = sampleInOnly.toRequestMessage(Map("x" -> "y")) + assert(m === Message("test-in", Map("key-in" -> "val-in", "x" -> "y"))) + } + + @Test def shouldCreateResponseMessageFromInMessageWithAdditionalHeader = { + val m = sampleInOnly.toResponseMessage(Map("x" -> "y")) + assert(m === Message("test-in", Map("key-in" -> "val-in", "x" -> "y"))) + } + + @Test def shouldCreateResponseMessageFromOutMessageWithAdditionalHeader = { + val m = sampleInOut.toResponseMessage(Map("x" -> "y")) + assert(m === Message("test-out", Map("key-out" -> "val-out", "x" -> "y"))) + } + + @Test def shouldCreateFailureMessageFromExceptionAndInMessageWithAdditionalHeader = { + val e1 = sampleInOnly + e1.setException(new Exception("test1")) + assert(e1.toFailureMessage.cause.getMessage === "test1") + val headers = e1.toFailureMessage(Map("x" -> "y")).headers + assert(headers("key-in") === "val-in") + assert(headers("x") === "y") + } + + @Test def shouldCreateFailureMessageFromExceptionAndOutMessageWithAdditionalHeader = { + val e1 = sampleInOut + e1.setException(new Exception("test2")) + assert(e1.toFailureMessage.cause.getMessage === "test2") + val headers = e1.toFailureMessage(Map("x" -> "y")).headers + assert(headers("key-out") === "val-out") + assert(headers("x") === "y") + } + + private def sampleInOnly = sampleExchange(ExchangePattern.InOnly) + private def sampleInOut = sampleExchange(ExchangePattern.InOut) + + private def sampleExchange(pattern: ExchangePattern) = { + val exchange = new DefaultExchange(new DefaultCamelContext) + exchange.getIn.setBody("test-in") + exchange.getOut.setBody("test-out") + exchange.getIn.setHeader("key-in", "val-in") + exchange.getOut.setHeader("key-out", "val-out") + exchange.setPattern(pattern) + exchange + } +} \ No newline at end of file diff --git a/akka-camel/src/test/scala/CamelMessageAdapterTest.scala b/akka-camel/src/test/scala/CamelMessageAdapterTest.scala new file mode 100644 index 0000000000..5f63d1b91c --- /dev/null +++ b/akka-camel/src/test/scala/CamelMessageAdapterTest.scala @@ -0,0 +1,38 @@ +package se.scalablesolutions.akka.camel + +import org.apache.camel.impl.DefaultMessage +import org.junit.Test +import org.scalatest.junit.JUnitSuite + +class CamelMessageAdapterTest extends JUnitSuite { + import CamelMessageConversion.toMessageAdapter + + @Test def shouldOverwriteBodyAndAddHeader = { + val cm = sampleMessage.fromMessage(Message("blah", Map("key" -> "baz"))) + assert(cm.getBody === "blah") + assert(cm.getHeader("foo") === "bar") + assert(cm.getHeader("key") === "baz") + } + + @Test def shouldCreateMessageWithBodyAndHeader = { + val m = sampleMessage.toMessage + assert(m.body === "test") + assert(m.headers("foo") === "bar") + } + + @Test def shouldCreateMessageWithBodyAndHeaderAndCustomHeader = { + val m = sampleMessage.toMessage(Map("key" -> "baz")) + assert(m.body === "test") + assert(m.headers("foo") === "bar") + assert(m.headers("key") === "baz") + } + + private[camel] def sampleMessage = { + val message = new DefaultMessage + message.setBody("test") + message.setHeader("foo", "bar") + message + } + + +} \ No newline at end of file diff --git a/akka-camel/src/test/scala/MessageTest.scala b/akka-camel/src/test/scala/MessageTest.scala new file mode 100644 index 0000000000..797d48ee57 --- /dev/null +++ b/akka-camel/src/test/scala/MessageTest.scala @@ -0,0 +1,73 @@ +package se.scalablesolutions.akka.camel + +import java.io.InputStream + +import org.apache.camel.NoTypeConversionAvailableException +import org.junit.Assert._ +import org.junit.Test + +import org.scalatest.BeforeAndAfterAll +import org.scalatest.junit.JUnitSuite + + +class MessageTest extends JUnitSuite with BeforeAndAfterAll { + override protected def beforeAll = CamelContextManager.init + + @Test def shouldConvertDoubleBodyToString = { + assertEquals("1.4", Message(1.4, null).bodyAs(classOf[String])) + } + + @Test def shouldThrowExceptionWhenConvertingDoubleBodyToInputStream { + intercept[NoTypeConversionAvailableException] { + Message(1.4, null).bodyAs(classOf[InputStream]) + } + } + + @Test def shouldReturnSubsetOfHeaders = { + val message = Message("test" , Map("A" -> "1", "B" -> "2")) + assertEquals(Map("B" -> "2"), message.headers(Set("B"))) + } + + @Test def shouldTransformBodyAndPreserveHeaders = { + assertEquals( + Message("ab", Map("A" -> "1")), + Message("a" , Map("A" -> "1")).transformBody[String](body => body + "b")) + } + + @Test def shouldConvertBodyAndPreserveHeaders = { + assertEquals( + Message("1.4", Map("A" -> "1")), + Message(1.4 , Map("A" -> "1")).setBodyAs(classOf[String])) + } + + @Test def shouldSetBodyAndPreserveHeaders = { + assertEquals( + Message("test2" , Map("A" -> "1")), + Message("test1" , Map("A" -> "1")).setBody("test2")) + } + + @Test def shouldSetHeadersAndPreserveBody = { + assertEquals( + Message("test1" , Map("C" -> "3")), + Message("test1" , Map("A" -> "1")).setHeaders(Map("C" -> "3"))) + + } + + @Test def shouldAddHeaderAndPreserveBodyAndHeaders = { + assertEquals( + Message("test1" , Map("A" -> "1", "B" -> "2")), + Message("test1" , Map("A" -> "1")).addHeader("B" -> "2")) + } + + @Test def shouldAddHeadersAndPreserveBodyAndHeaders = { + assertEquals( + Message("test1" , Map("A" -> "1", "B" -> "2")), + Message("test1" , Map("A" -> "1")).addHeaders(Map("B" -> "2"))) + } + + @Test def shouldRemoveHeadersAndPreserveBodyAndRemainingHeaders = { + assertEquals( + Message("test1" , Map("A" -> "1")), + Message("test1" , Map("A" -> "1", "B" -> "2")).removeHeader("B")) + } +} \ No newline at end of file diff --git a/akka-camel/src/test/scala/ProducerFeatureTest.scala b/akka-camel/src/test/scala/ProducerFeatureTest.scala new file mode 100644 index 0000000000..c203615a73 --- /dev/null +++ b/akka-camel/src/test/scala/ProducerFeatureTest.scala @@ -0,0 +1,137 @@ +package se.scalablesolutions.akka.camel + +import org.apache.camel.{Exchange, Processor} +import org.apache.camel.builder.RouteBuilder +import org.apache.camel.component.mock.MockEndpoint +import org.scalatest.{GivenWhenThen, BeforeAndAfterEach, BeforeAndAfterAll, FeatureSpec} + +import se.scalablesolutions.akka.actor.{Actor, ActorRegistry} +import se.scalablesolutions.akka.actor.Actor.Sender.Self + +class ProducerFeatureTest extends FeatureSpec with BeforeAndAfterAll with BeforeAndAfterEach with GivenWhenThen { + override protected def beforeAll = { + ActorRegistry.shutdownAll + CamelContextManager.init + CamelContextManager.context.addRoutes(new TestRoute) + CamelContextManager.start + } + + override protected def afterAll = CamelContextManager.stop + + override protected def afterEach = { + mockEndpoint.reset + ActorRegistry.shutdownAll + } + + feature("Produce a message to a Camel endpoint") { + + scenario("produce message sync and receive response") { + given("a registered synchronous two-way producer for endpoint direct:producer-test-2") + val producer = new TestProducer("direct:producer-test-2") with Sync + producer.start + + when("a test message is sent to the producer") + val message = Message("test", Map(Message.MessageExchangeId -> "123")) + val result = producer !! message + + then("the expected result message should be returned including a correlation identifier") + val expected = Message("received test", Map(Message.MessageExchangeId -> "123")) + assert(result === Some(expected)) + } + + scenario("produce message async and receive response") { + given("a registered asynchronous two-way producer for endpoint direct:producer-test-2") + val producer = new TestProducer("direct:producer-test-2") + producer.start + + when("a test message is sent to the producer") + val message = Message("test", Map(Message.MessageExchangeId -> "123")) + val result = producer !! message + + then("the expected result message should be returned including a correlation identifier") + val expected = Message("received test", Map(Message.MessageExchangeId -> "123")) + assert(result === Some(expected)) + } + + scenario("produce message sync and receive failure") { + given("a registered synchronous two-way producer for endpoint direct:producer-test-2") + val producer = new TestProducer("direct:producer-test-2") with Sync + producer.start + + when("a fail message is sent to the producer") + val message = Message("fail", Map(Message.MessageExchangeId -> "123")) + val result = producer.!![Failure](message) + + then("the expected failure message should be returned including a correlation identifier") + val expectedFailureText = result.get.cause.getMessage + val expectedHeaders = result.get.headers + assert(expectedFailureText === "failure") + assert(expectedHeaders === Map(Message.MessageExchangeId -> "123")) + } + + scenario("produce message async and receive failure") { + given("a registered asynchronous two-way producer for endpoint direct:producer-test-2") + val producer = new TestProducer("direct:producer-test-2") + producer.start + + when("a fail message is sent to the producer") + val message = Message("fail", Map(Message.MessageExchangeId -> "123")) + val result = producer.!![Failure](message) + + then("the expected failure message should be returned including a correlation identifier") + val expectedFailureText = result.get.cause.getMessage + val expectedHeaders = result.get.headers + assert(expectedFailureText === "failure") + assert(expectedHeaders === Map(Message.MessageExchangeId -> "123")) + } + + scenario("produce message sync oneway") { + given("a registered synchronous one-way producer for endpoint direct:producer-test-1") + val producer = new TestProducer("direct:producer-test-1") with Sync with Oneway + producer.start + + when("a test message is sent to the producer") + mockEndpoint.expectedBodiesReceived("test") + producer ! Message("test") + + then("the expected message should have been sent to mock:mock") + mockEndpoint.assertIsSatisfied + } + + scenario("produce message async oneway") { + given("a registered asynchronous one-way producer for endpoint direct:producer-test-1") + val producer = new TestProducer("direct:producer-test-1") with Oneway + producer.start + + when("a test message is sent to the producer") + mockEndpoint.expectedBodiesReceived("test") + producer ! Message("test") + + then("the expected message should have been sent to mock:mock") + mockEndpoint.assertIsSatisfied + } + } + + private def mockEndpoint = CamelContextManager.context.getEndpoint("mock:mock", classOf[MockEndpoint]) + + class TestProducer(uri: String) extends Actor with Producer { + def endpointUri = uri + def receive = produce + } + + class TestRoute extends RouteBuilder { + def configure { + // for one-way messaging tests + from("direct:producer-test-1").to("mock:mock") + // for two-way messaging tests + from("direct:producer-test-2").process(new Processor() { + def process(exchange: Exchange) = { + exchange.getIn.getBody match { + case "fail" => throw new Exception("failure") + case body => exchange.getOut.setBody("received %s" format body) + } + } + }) + } + } +} \ No newline at end of file diff --git a/akka-camel/src/test/scala/component/ActorComponentFeatureTest.scala b/akka-camel/src/test/scala/component/ActorComponentFeatureTest.scala new file mode 100644 index 0000000000..396fceacb8 --- /dev/null +++ b/akka-camel/src/test/scala/component/ActorComponentFeatureTest.scala @@ -0,0 +1,62 @@ +package se.scalablesolutions.akka.camel.component + +import org.apache.camel.RuntimeCamelException +import org.scalatest.{BeforeAndAfterEach, BeforeAndAfterAll, FeatureSpec} + +import se.scalablesolutions.akka.actor.ActorRegistry +import se.scalablesolutions.akka.camel.support.{Respond, Countdown, Tester, Retain} +import se.scalablesolutions.akka.camel.{Message, CamelContextManager} + +class ActorComponentFeatureTest extends FeatureSpec with BeforeAndAfterAll with BeforeAndAfterEach { + override protected def beforeAll = { + ActorRegistry.shutdownAll + CamelContextManager.init + CamelContextManager.start + } + + override protected def afterAll = CamelContextManager.stop + + override protected def afterEach = ActorRegistry.shutdownAll + + feature("Communicate with an actor from a Camel application using actor endpoint URIs") { + import CamelContextManager.template + + scenario("one-way communication using actor id") { + val actor = new Tester with Retain with Countdown[Message] + actor.start + template.sendBody("actor:%s" format actor.getId, "Martin") + assert(actor.waitFor) + assert(actor.body === "Martin") + } + + scenario("one-way communication using actor uuid") { + val actor = new Tester with Retain with Countdown[Message] + actor.start + template.sendBody("actor:uuid:%s" format actor.uuid, "Martin") + assert(actor.waitFor) + assert(actor.body === "Martin") + } + + scenario("two-way communication using actor id") { + val actor = new Tester with Respond + actor.start + assert(template.requestBody("actor:%s" format actor.getId, "Martin") === "Hello Martin") + } + + scenario("two-way communication using actor uuid") { + val actor = new Tester with Respond + actor.start + assert(template.requestBody("actor:uuid:%s" format actor.uuid, "Martin") === "Hello Martin") + } + + scenario("two-way communication with timeout") { + val actor = new Tester { + timeout = 1 + } + actor.start + intercept[RuntimeCamelException] { + template.requestBody("actor:uuid:%s" format actor.uuid, "Martin") + } + } + } +} \ No newline at end of file diff --git a/akka-camel/src/test/scala/component/ActorComponentTest.scala b/akka-camel/src/test/scala/component/ActorComponentTest.scala new file mode 100644 index 0000000000..6bf472916b --- /dev/null +++ b/akka-camel/src/test/scala/component/ActorComponentTest.scala @@ -0,0 +1,34 @@ +package se.scalablesolutions.akka.camel.component + +import org.apache.camel.impl.DefaultCamelContext +import org.junit._ +import org.scalatest.junit.JUnitSuite + +class ActorComponentTest extends JUnitSuite { + val component: ActorComponent = ActorComponentTest.mockComponent + + @Test def shouldCreateEndpointWithIdDefined = { + val ep1: ActorEndpoint = component.createEndpoint("actor:abc").asInstanceOf[ActorEndpoint] + val ep2: ActorEndpoint = component.createEndpoint("actor:id:abc").asInstanceOf[ActorEndpoint] + assert(ep1.id === Some("abc")) + assert(ep2.id === Some("abc")) + assert(ep1.uuid === None) + assert(ep2.uuid === None) + } + + @Test def shouldCreateEndpointWithUuidDefined = { + val ep: ActorEndpoint = component.createEndpoint("actor:uuid:abc").asInstanceOf[ActorEndpoint] + assert(ep.uuid === Some("abc")) + assert(ep.id === None) + } +} + +object ActorComponentTest { + def mockComponent = { + val component = new ActorComponent + component.setCamelContext(new DefaultCamelContext) + component + } + + def mockEndpoint(uri:String) = mockComponent.createEndpoint(uri) +} diff --git a/akka-camel/src/test/scala/component/ActorProducerTest.scala b/akka-camel/src/test/scala/component/ActorProducerTest.scala new file mode 100644 index 0000000000..ad8d074f89 --- /dev/null +++ b/akka-camel/src/test/scala/component/ActorProducerTest.scala @@ -0,0 +1,73 @@ +package se.scalablesolutions.akka.camel.component + +import ActorComponentTest._ + +import java.util.concurrent.TimeoutException + +import org.apache.camel.ExchangePattern +import org.junit.{After, Test} +import org.scalatest.junit.JUnitSuite +import org.scalatest.BeforeAndAfterAll + +import se.scalablesolutions.akka.actor.ActorRegistry +import se.scalablesolutions.akka.camel.support.{Countdown, Retain, Tester, Respond} +import se.scalablesolutions.akka.camel.{Failure, Message} + +class ActorProducerTest extends JUnitSuite with BeforeAndAfterAll { + @After def tearDown = ActorRegistry.shutdownAll + + @Test def shouldSendMessageToActor = { + val actor = new Tester with Retain with Countdown[Message] + val endpoint = mockEndpoint("actor:uuid:%s" format actor.uuid) + val exchange = endpoint.createExchange(ExchangePattern.InOnly) + actor.start + exchange.getIn.setBody("Martin") + exchange.getIn.setHeader("k1", "v1") + endpoint.createProducer.process(exchange) + actor.waitFor + assert(actor.body === "Martin") + assert(actor.headers === Map(Message.MessageExchangeId -> exchange.getExchangeId, "k1" -> "v1")) + } + + @Test def shouldSendMessageToActorAndReceiveResponse = { + val actor = new Tester with Respond { + override def response(msg: Message) = Message(super.response(msg), Map("k2" -> "v2")) + } + val endpoint = mockEndpoint("actor:uuid:%s" format actor.uuid) + val exchange = endpoint.createExchange(ExchangePattern.InOut) + actor.start + exchange.getIn.setBody("Martin") + exchange.getIn.setHeader("k1", "v1") + endpoint.createProducer.process(exchange) + assert(exchange.getOut.getBody === "Hello Martin") + assert(exchange.getOut.getHeader("k2") === "v2") + } + + @Test def shouldSendMessageToActorAndReceiveFailure = { + val actor = new Tester with Respond { + override def response(msg: Message) = Failure(new Exception("testmsg"), Map("k3" -> "v3")) + } + val endpoint = mockEndpoint("actor:uuid:%s" format actor.uuid) + val exchange = endpoint.createExchange(ExchangePattern.InOut) + actor.start + exchange.getIn.setBody("Martin") + exchange.getIn.setHeader("k1", "v1") + endpoint.createProducer.process(exchange) + assert(exchange.getException.getMessage === "testmsg") + assert(exchange.getOut.getBody === null) + assert(exchange.getOut.getHeader("k3") === null) // headers from failure message are currently ignored + } + + @Test def shouldSendMessageToActorAndTimeout: Unit = { + val actor = new Tester { + timeout = 1 + } + val endpoint = mockEndpoint("actor:uuid:%s" format actor.uuid) + val exchange = endpoint.createExchange(ExchangePattern.InOut) + actor.start + exchange.getIn.setBody("Martin") + intercept[TimeoutException] { + endpoint.createProducer.process(exchange) + } + } +} diff --git a/akka-camel/src/test/scala/service/CamelServiceFeatureTest.scala b/akka-camel/src/test/scala/service/CamelServiceFeatureTest.scala new file mode 100644 index 0000000000..587fe64a43 --- /dev/null +++ b/akka-camel/src/test/scala/service/CamelServiceFeatureTest.scala @@ -0,0 +1,85 @@ +package se.scalablesolutions.akka.camel.service + +import org.apache.camel.builder.RouteBuilder +import org.scalatest.{GivenWhenThen, BeforeAndAfterAll, FeatureSpec} + +import se.scalablesolutions.akka.actor.{Actor, ActorRegistry} +import se.scalablesolutions.akka.camel.{CamelContextManager, Message, Consumer} + +class CamelServiceFeatureTest extends FeatureSpec with BeforeAndAfterAll with GivenWhenThen { + var service: CamelService = CamelService.newInstance + + override protected def beforeAll = { + ActorRegistry.shutdownAll + // register test consumer before starting the CamelService + new TestConsumer("direct:publish-test-1").start + // Consigure a custom camel route + CamelContextManager.init + CamelContextManager.context.addRoutes(new TestRoute) + // set expectations for testing purposes + service.consumerPublisher.expectPublishCount(1) + // start the CamelService + service.load + // await publication of first test consumer + service.consumerPublisher.awaitPublish + } + + override protected def afterAll = { + service.unload + ActorRegistry.shutdownAll + } + + feature("Publish registered consumer actors in the global CamelContext") { + + scenario("access registered consumer actors via Camel direct-endpoints") { + + given("two consumer actors registered before and after CamelService startup") + service.consumerPublisher.expectPublishCount(1) + new TestConsumer("direct:publish-test-2").start + + when("requests are sent to these actors") + service.consumerPublisher.awaitPublish + val response1 = CamelContextManager.template.requestBody("direct:publish-test-1", "msg1") + val response2 = CamelContextManager.template.requestBody("direct:publish-test-2", "msg2") + + then("both actors should have replied with expected responses") + assert(response1 === "received msg1") + assert(response2 === "received msg2") + } + } + + feature("Configure a custom Camel route for the global CamelContext") { + + scenario("access an actor from the custom Camel route") { + + given("a registered actor and a custom route to that actor") + val actor = new TestActor().start + + when("sending a a message to that route") + val response = CamelContextManager.template.requestBody("direct:custom-route-test-1", "msg3") + + then("an expected response generated by the actor should be returned") + assert(response === "received msg3") + } + } + + class TestConsumer(uri: String) extends Actor with Consumer { + def endpointUri = uri + protected def receive = { + case msg: Message => reply("received %s" format msg.body) + } + } + + class TestActor extends Actor { + id = "custom-actor-id" + protected def receive = { + case msg: Message => reply("received %s" format msg.body) + } + } + + class TestRoute extends RouteBuilder { + def configure { + from("direct:custom-route-test-1").to("actor:custom-actor-id") + } + } +} \ No newline at end of file diff --git a/akka-camel/src/test/scala/service/PublishRequestorTest.scala b/akka-camel/src/test/scala/service/PublishRequestorTest.scala new file mode 100644 index 0000000000..d9b9b7bc61 --- /dev/null +++ b/akka-camel/src/test/scala/service/PublishRequestorTest.scala @@ -0,0 +1,36 @@ +package se.scalablesolutions.akka.camel.service + +import org.junit.{After, Test} +import org.scalatest.junit.JUnitSuite + +import se.scalablesolutions.akka.camel.Consumer +import se.scalablesolutions.akka.camel.support.{Receive, Countdown} +import se.scalablesolutions.akka.actor.{ActorRegistry, ActorRegistered, Actor} + +class PublishRequestorTest extends JUnitSuite { + @After def tearDown = ActorRegistry.shutdownAll + + @Test def shouldReceivePublishRequestOnActorRegisteredEvent = { + val consumer = new Actor with Consumer { + def endpointUri = "mock:test" + protected def receive = null + } + val publisher = new PublisherMock with Countdown[Publish] + val requestor = new PublishRequestor(publisher) + publisher.start + requestor.start + requestor.!(ActorRegistered(consumer))(None) + publisher.waitFor + assert(publisher.received === Publish("mock:test", consumer.uuid, true)) + publisher.stop + requestor.stop + } + + class PublisherMock extends Actor with Receive[Publish] { + var received: Publish = _ + protected def receive = { + case msg: Publish => onMessage(msg) + } + def onMessage(msg: Publish) = received = msg + } +} \ No newline at end of file diff --git a/akka-camel/src/test/scala/service/PublishTest.scala b/akka-camel/src/test/scala/service/PublishTest.scala new file mode 100644 index 0000000000..a73e610003 --- /dev/null +++ b/akka-camel/src/test/scala/service/PublishTest.scala @@ -0,0 +1,47 @@ +package se.scalablesolutions.akka.camel.service + +import org.junit.Test +import org.scalatest.junit.JUnitSuite + +import se.scalablesolutions.akka.actor.Actor +import se.scalablesolutions.akka.actor.annotation.consume +import se.scalablesolutions.akka.camel.Consumer + +class PublishTest extends JUnitSuite { + @Test def shouldCreatePublishRequestList = { + val publish = Publish.forConsumers(List(new ConsumeAnnotatedActor)) + assert(publish === List(Publish("mock:test1", "test", false))) + } + + @Test def shouldCreateSomePublishRequestWithActorId = { + val publish = Publish.forConsumer(new ConsumeAnnotatedActor) + assert(publish === Some(Publish("mock:test1", "test", false))) + } + + @Test def shouldCreateSomePublishRequestWithActorUuid = { + val actor = new ConsumerActor + val publish = Publish.forConsumer(actor) + assert(publish === Some(Publish("mock:test2", actor.uuid, true))) + assert(publish === Some(Publish("mock:test2", actor.uuid, true))) + } + + @Test def shouldCreateNone = { + val publish = Publish.forConsumer(new PlainActor) + assert(publish === None) + } + + @consume("mock:test1") + class ConsumeAnnotatedActor extends Actor { + id = "test" + protected def receive = null + } + + class ConsumerActor extends Actor with Consumer { + def endpointUri = "mock:test2" + protected def receive = null + } + + class PlainActor extends Actor { + protected def receive = null + } +} \ No newline at end of file diff --git a/akka-camel/src/test/scala/support/TestSupport.scala b/akka-camel/src/test/scala/support/TestSupport.scala new file mode 100644 index 0000000000..8dc7d4dd04 --- /dev/null +++ b/akka-camel/src/test/scala/support/TestSupport.scala @@ -0,0 +1,49 @@ +package se.scalablesolutions.akka.camel.support + +import java.util.concurrent.{TimeUnit, CountDownLatch} + +import se.scalablesolutions.akka.camel.Message +import se.scalablesolutions.akka.actor.Actor + +trait Receive[T] { + def onMessage(msg: T): Unit +} + +trait Respond extends Receive[Message] {self: Actor => + abstract override def onMessage(msg: Message): Unit = { + super.onMessage(msg) + reply(response(msg)) + } + def response(msg: Message): Any = "Hello %s" format msg.body +} + +trait Retain extends Receive[Message] { + var body: Any = _ + var headers = Map.empty[String, Any] + abstract override def onMessage(msg: Message): Unit = { + super.onMessage(msg) + body = msg.body + headers = msg.headers + } +} + +trait Countdown[T] extends Receive[T] { + val count = 1 + val duration = 5000 + val latch = new CountDownLatch(count) + + def waitFor = latch.await(duration, TimeUnit.MILLISECONDS) + def countDown = latch.countDown + + abstract override def onMessage(msg: T) = { + super.onMessage(msg) + countDown + } +} + +class Tester extends Actor with Receive[Message] { + def receive = { + case msg: Message => onMessage(msg) + } + def onMessage(msg: Message): Unit = {} +} diff --git a/akka-cluster/akka-cluster-jgroups/pom.xml b/akka-cluster/akka-cluster-jgroups/pom.xml deleted file mode 100644 index 85d25e2330..0000000000 --- a/akka-cluster/akka-cluster-jgroups/pom.xml +++ /dev/null @@ -1,24 +0,0 @@ - - 4.0.0 - - akka-cluster-jgroups - Akka Cluster JGroups Module - - jar - - - akka-cluster-parent - se.scalablesolutions.akka - 0.7-SNAPSHOT - - - - - jgroups - jgroups - 2.8.0.CR7 - - - - diff --git a/akka-cluster/akka-cluster-jgroups/src/main/scala/JGroupsClusterActor.scala b/akka-cluster/akka-cluster-jgroups/src/main/scala/JGroupsClusterActor.scala index afc917260e..e800def56f 100644 --- a/akka-cluster/akka-cluster-jgroups/src/main/scala/JGroupsClusterActor.scala +++ b/akka-cluster/akka-cluster-jgroups/src/main/scala/JGroupsClusterActor.scala @@ -1,7 +1,12 @@ -package se.scalablesolutions.akka.remote +package se.scalablesolutions.akka.cluster.jgroups import org.jgroups.{JChannel, View => JG_VIEW, Address, Message => JG_MSG, ExtendedMembershipListener, Receiver} +import se.scalablesolutions.akka.remote.ClusterActor._ +import se.scalablesolutions.akka.remote.BasicClusterActor + +import org.scala_tools.javautils.Imports._ + /** * Clustering support via JGroups. * @Author Viktor Klang @@ -44,7 +49,8 @@ class JGroupsClusterActor extends BasicClusterActor { log debug "UNSUPPORTED: JGroupsClusterActor::unblock" //TODO HotSwap back and flush the buffer }) }) - channel.map(_.connect(name)) + + channel.foreach(_.connect(name)) } protected def toOneNode(dest : Address, msg: Array[Byte]): Unit = diff --git a/akka-cluster/akka-cluster-shoal/pom.xml b/akka-cluster/akka-cluster-shoal/pom.xml deleted file mode 100644 index b58e77dcf5..0000000000 --- a/akka-cluster/akka-cluster-shoal/pom.xml +++ /dev/null @@ -1,34 +0,0 @@ - - 4.0.0 - - akka-cluster-shoal - Akka Cluster Shoal Module - - jar - - - akka-cluster-parent - se.scalablesolutions.akka - 0.7-SNAPSHOT - - - - - - shoal-jxta - shoal - 1.1-20090818 - - - shoal-jxta - jxta - 1.1-20090818 - - - - diff --git a/akka-cluster/akka-cluster-shoal/src/main/scala/ShoalClusterActor.scala b/akka-cluster/akka-cluster-shoal/src/main/scala/ShoalClusterActor.scala index 007cc96b9a..2768592a8d 100644 --- a/akka-cluster/akka-cluster-shoal/src/main/scala/ShoalClusterActor.scala +++ b/akka-cluster/akka-cluster-shoal/src/main/scala/ShoalClusterActor.scala @@ -1,29 +1,16 @@ /** * Copyright (C) 2009-2010 Scalable Solutions AB */ -package se.scalablesolutions.akka.remote +package se.scalablesolutions.akka.cluster.shoal -import se.scalablesolutions.akka.Config.config import java.util.Properties -import com.sun.enterprise.ee.cms.core.{CallBack, - GMSConstants, - GMSFactory, - GroupManagementService, - MessageSignal, - Signal, - GMSException, - SignalAcquireException, - SignalReleaseException, - JoinNotificationSignal, - FailureSuspectedSignal, - FailureNotificationSignal } -import com.sun.enterprise.ee.cms.impl.client.{FailureNotificationActionFactoryImpl, - FailureSuspectedActionFactoryImpl, - JoinNotificationActionFactoryImpl, - MessageActionFactoryImpl, - PlannedShutdownActionFactoryImpl -} +import se.scalablesolutions.akka.config.Config.config +import se.scalablesolutions.akka.remote.{ClusterActor, BasicClusterActor, RemoteServer} + +import com.sun.enterprise.ee.cms.core._ +import com.sun.enterprise.ee.cms.impl.client._ + /** * Clustering support via Shoal. */ @@ -67,9 +54,9 @@ class ShoalClusterActor extends BasicClusterActor { * Adds callbacks and boots up the cluster */ protected def createGMS : GroupManagementService = { - - val g = GMSFactory.startGMSModule(serverName,name, GroupManagementService.MemberType.CORE, properties()).asInstanceOf[GroupManagementService] - + val g = GMSFactory + .startGMSModule(serverName,name, GroupManagementService.MemberType.CORE, properties()) + .asInstanceOf[GroupManagementService] val callback = createCallback g.addActionFactory(new JoinNotificationActionFactoryImpl(callback)) g.addActionFactory(new FailureSuspectedActionFactoryImpl(callback)) @@ -85,6 +72,8 @@ class ShoalClusterActor extends BasicClusterActor { */ protected def createCallback : CallBack = { import scala.collection.JavaConversions._ + import ClusterActor._ + val me = this new CallBack { def processNotification(signal : Signal) { @@ -92,17 +81,17 @@ class ShoalClusterActor extends BasicClusterActor { signal.acquire() if(isActive) { signal match { - case ms : MessageSignal => me send Message(ms.getMemberToken,ms.getMessage) - case jns : JoinNotificationSignal => me send View(Set[ADDR_T]() ++ jns.getCurrentCoreMembers - serverName) - case fss : FailureSuspectedSignal => me send Zombie(fss.getMemberToken) - case fns : FailureNotificationSignal => me send Zombie(fns.getMemberToken) + case ms : MessageSignal => me send Message[ADDR_T](ms.getMemberToken,ms.getMessage) + case jns : JoinNotificationSignal => me send View[ADDR_T](Set[ADDR_T]() ++ jns.getCurrentCoreMembers.asScala - serverName) + case fss : FailureSuspectedSignal => me send Zombie[ADDR_T](fss.getMemberToken) + case fns : FailureNotificationSignal => me send Zombie[ADDR_T](fns.getMemberToken) case _ => log.debug("Unhandled signal: [%s]",signal) } } signal.release() } catch { - case e : SignalAcquireException => log.warning(e,"SignalAcquireException") - case e : SignalReleaseException => log.warning(e,"SignalReleaseException") + case e : SignalAcquireException => log.warning(e,"SignalAcquireException") + case e : SignalReleaseException => log.warning(e,"SignalReleaseException") } } } diff --git a/akka-cluster/akka-cluster-tribes/pom.xml b/akka-cluster/akka-cluster-tribes/pom.xml deleted file mode 100644 index efcea51aa8..0000000000 --- a/akka-cluster/akka-cluster-tribes/pom.xml +++ /dev/null @@ -1,24 +0,0 @@ - - 4.0.0 - - akka-cluster-tribes - Akka Cluster Tribes Module - - jar - - - akka-cluster-parent - se.scalablesolutions.akka - 0.7-SNAPSHOT - - - - - org.apache.tomcat - tribes - 6.0.20 - - - - diff --git a/akka-cluster/pom.xml b/akka-cluster/pom.xml deleted file mode 100644 index a502edea3f..0000000000 --- a/akka-cluster/pom.xml +++ /dev/null @@ -1,42 +0,0 @@ - - 4.0.0 - - akka-cluster-parent - Akka Cluster Modules - - pom - - - akka - se.scalablesolutions.akka - 0.7-SNAPSHOT - - - - akka-cluster-jgroups - - akka-cluster-shoal - - - - - akka-core - ${project.groupId} - ${project.version} - - - - org.scalatest - scalatest - ${scalatest.version} - test - - - junit - junit - 4.5 - test - - - diff --git a/akka-comet/pom.xml b/akka-comet/pom.xml deleted file mode 100644 index 88cdc0cf57..0000000000 --- a/akka-comet/pom.xml +++ /dev/null @@ -1,54 +0,0 @@ - - 4.0.0 - - akka-comet - Akka Comet Module - - jar - - - akka - se.scalablesolutions.akka - 0.7-SNAPSHOT - - - - - - akka-rest - ${project.groupId} - ${project.version} - - - - - com.sun.grizzly - grizzly-comet-webserver - ${grizzly.version} - - - - - javax.servlet - servlet-api - 2.5 - - - org.atmosphere - atmosphere-annotations - ${atmosphere.version} - - - org.atmosphere - atmosphere-jersey - ${atmosphere.version} - - - org.atmosphere - atmosphere-runtime - ${atmosphere.version} - - - diff --git a/akka-comet/src/main/scala/AkkaClusterBroadcastFilter.scala b/akka-comet/src/main/scala/AkkaClusterBroadcastFilter.scala index 724c82432e..8fdd47fddd 100644 --- a/akka-comet/src/main/scala/AkkaClusterBroadcastFilter.scala +++ b/akka-comet/src/main/scala/AkkaClusterBroadcastFilter.scala @@ -4,13 +4,13 @@ package se.scalablesolutions.akka.comet -import se.scalablesolutions.akka.actor.{Actor} -import se.scalablesolutions.akka.remote.{Cluster} -import scala.reflect.{BeanProperty} +import se.scalablesolutions.akka.actor.Actor +import se.scalablesolutions.akka.remote.Cluster +import scala.reflect.BeanProperty import org.atmosphere.cpr.{BroadcastFilter, ClusterBroadcastFilter, Broadcaster} sealed trait ClusterCometMessageType -case class ClusterCometBroadcast(val name : String, val msg : AnyRef) extends ClusterCometMessageType +case class ClusterCometBroadcast(name: String, msg: AnyRef) extends ClusterCometMessageType /** * Enables explicit clustering of Atmosphere (Comet) resources diff --git a/akka-kernel/src/main/scala/BootableCometActorService.scala b/akka-comet/src/main/scala/BootableCometActorService.scala similarity index 87% rename from akka-kernel/src/main/scala/BootableCometActorService.scala rename to akka-comet/src/main/scala/BootableCometActorService.scala index b014fcb9ad..496cc33aed 100644 --- a/akka-kernel/src/main/scala/BootableCometActorService.scala +++ b/akka-comet/src/main/scala/BootableCometActorService.scala @@ -2,16 +2,16 @@ * Copyright (C) 2009-2010 Scalable Solutions AB */ -package se.scalablesolutions.akka +package se.scalablesolutions.akka.comet import com.sun.grizzly.http.SelectorThread import com.sun.grizzly.http.servlet.ServletAdapter import com.sun.grizzly.standalone.StaticStreamAlgorithm import javax.ws.rs.core.UriBuilder -import se.scalablesolutions.akka.comet.AkkaServlet + import se.scalablesolutions.akka.actor.BootableActorLoaderService -import se.scalablesolutions.akka.util.{Bootable,Logging} +import se.scalablesolutions.akka.util.{Bootable, Logging} /** * Handles the Akka Comet Support (load/unload) @@ -19,16 +19,17 @@ import se.scalablesolutions.akka.util.{Bootable,Logging} trait BootableCometActorService extends Bootable with Logging { self : BootableActorLoaderService => - import Config._ + import config.Config._ val REST_HOSTNAME = config.getString("akka.rest.hostname", "localhost") val REST_URL = "http://" + REST_HOSTNAME val REST_PORT = config.getInt("akka.rest.port", 9998) + protected var jerseySelectorThread: Option[SelectorThread] = None abstract override def onLoad = { super.onLoad - if(config.getBool("akka.rest.service", true)){ + if (config.getBool("akka.rest.service", true)) { val uri = UriBuilder.fromUri(REST_URL).port(REST_PORT).build() @@ -42,8 +43,7 @@ trait BootableCometActorService extends Bootable with Logging { adapter.setHandleStaticResources(true) adapter.setServletInstance(new AkkaServlet) adapter.setContextPath(uri.getPath) - //Using autodetection for now - //adapter.addInitParameter("cometSupport", "org.atmosphere.container.GrizzlyCometSupport") + adapter.addInitParameter("cometSupport", "org.atmosphere.container.GrizzlyCometSupport") if (HOME.isDefined) adapter.setRootFolder(HOME.get + "/deploy/root") log.info("REST service root path [%s] and context path [%s]", adapter.getRootFolder, adapter.getContextPath) diff --git a/akka-core/pom.xml b/akka-core/pom.xml deleted file mode 100644 index ef3fa24829..0000000000 --- a/akka-core/pom.xml +++ /dev/null @@ -1,111 +0,0 @@ - - 4.0.0 - - akka-core - Akka Core - Actors, Remote Actors, Transactors and STM Module - - jar - - - akka - se.scalablesolutions.akka - 0.7-SNAPSHOT - - - - - - akka-util-java - ${project.groupId} - ${project.version} - - - akka-util - ${project.groupId} - ${project.version} - - - org.scala-lang - scala-library - ${scala.version} - - - org.codehaus.aspectwerkz - aspectwerkz-nodeps-jdk5 - 2.1 - - - org.codehaus.aspectwerkz - aspectwerkz-jdk5 - 2.1 - - - org.jboss.netty - netty - 3.2.0.ALPHA3 - - - - - voldemort.store.compress - h2-lzf - 1.0 - - - org.codehaus.jackson - jackson-core-asl - 1.2.1 - - - org.codehaus.jackson - jackson-mapper-asl - 1.2.1 - - - - net.databinder - dispatch-json_2.8.0.Beta1 - 0.7.0 - - - commons-io - commons-io - 1.4 - - - commons-codec - commons-codec - 1.4 - - - net.databinder - dispatch-http_2.8.0.Beta1 - 0.7.0 - - - sjson.json - sjson - 0.5-SNAPSHOT-2.8.Beta1 - - - - - org.scalatest - scalatest - ${scalatest.version} - test - - - junit - junit - 4.5 - test - - - diff --git a/akka-core/src/main/scala/actor/ActiveObject.scala b/akka-core/src/main/scala/actor/ActiveObject.scala index 56d78268f5..4cb2d2aba8 100644 --- a/akka-core/src/main/scala/actor/ActiveObject.scala +++ b/akka-core/src/main/scala/actor/ActiveObject.scala @@ -6,7 +6,7 @@ package se.scalablesolutions.akka.actor import se.scalablesolutions.akka.remote.protobuf.RemoteProtocol.RemoteRequest import se.scalablesolutions.akka.remote.{RemoteProtocolBuilder, RemoteClient, RemoteRequestIdFactory} -import se.scalablesolutions.akka.dispatch.{MessageDispatcher, FutureResult} +import se.scalablesolutions.akka.dispatch.{MessageDispatcher, Future} import se.scalablesolutions.akka.config.ScalaConfig._ import se.scalablesolutions.akka.serialization.Serializer import se.scalablesolutions.akka.util._ @@ -19,7 +19,7 @@ import java.net.InetSocketAddress import java.lang.reflect.{InvocationTargetException, Method} object Annotations { - import se.scalablesolutions.akka.annotation._ + import se.scalablesolutions.akka.actor.annotation._ val oneway = classOf[oneway] val transactionrequired = classOf[transactionrequired] val prerestart = classOf[prerestart] @@ -209,22 +209,24 @@ object ActiveObject { } private[akka] object AspectInitRegistry { - private val inits = new java.util.concurrent.ConcurrentHashMap[AnyRef, AspectInit] + private val initializations = new java.util.concurrent.ConcurrentHashMap[AnyRef, AspectInit] def initFor(target: AnyRef) = { - val init = inits.get(target) - inits.remove(target) + val init = initializations.get(target) + initializations.remove(target) init } - def register(target: AnyRef, init: AspectInit) = inits.put(target, init) + def register(target: AnyRef, init: AspectInit) = initializations.put(target, init) } private[akka] sealed case class AspectInit( val target: Class[_], val actor: Dispatcher, val remoteAddress: Option[InetSocketAddress], - val timeout: Long) + val timeout: Long) { + def this(target: Class[_],actor: Dispatcher, timeout: Long) = this(target, actor, None, timeout) +} /** * AspectWerkz Aspect that is turning POJOs into Active Object. @@ -300,7 +302,7 @@ private[akka] sealed class ActiveObjectAspect { } } - private def getResultOrThrowException[T](future: FutureResult): Option[T] = + private def getResultOrThrowException[T](future: Future): Option[T] = if (future.exception.isDefined) { val (_, cause) = future.exception.get throw cause @@ -351,20 +353,26 @@ private[akka] sealed class ActiveObjectAspect { } } +object Dispatcher { + val ZERO_ITEM_CLASS_ARRAY = Array[Class[_]]() + val ZERO_ITEM_OBJECT_ARRAY = Array[Object]() +} + /** * Generic Actor managing Invocation dispatch, transaction and error management. * * @author Jonas Bonér */ private[akka] class Dispatcher(transactionalRequired: Boolean, val callbacks: Option[RestartCallbacks]) extends Actor { - private val ZERO_ITEM_CLASS_ARRAY = Array[Class[_]]() - private val ZERO_ITEM_OBJECT_ARRAY = Array[Object]() + import Dispatcher._ private[actor] var target: Option[AnyRef] = None private var preRestart: Option[Method] = None private var postRestart: Option[Method] = None private var initTxState: Option[Method] = None + def this(transactionalRequired: Boolean) = this(transactionalRequired,None) + private[actor] def initialize(targetClass: Class[_], targetInstance: AnyRef) = { if (transactionalRequired || targetClass.isAnnotationPresent(Annotations.transactionrequired)) makeTransactionRequired id = targetClass.getName diff --git a/akka-core/src/main/scala/actor/Actor.scala b/akka-core/src/main/scala/actor/Actor.scala index 7a50100e8d..e8ef14d7fc 100644 --- a/akka-core/src/main/scala/actor/Actor.scala +++ b/akka-core/src/main/scala/actor/Actor.scala @@ -4,28 +4,32 @@ package se.scalablesolutions.akka.actor -import se.scalablesolutions.akka.Config._ import se.scalablesolutions.akka.dispatch._ +import se.scalablesolutions.akka.config.Config._ import se.scalablesolutions.akka.config.{AllForOneStrategy, OneForOneStrategy, FaultHandlingStrategy} import se.scalablesolutions.akka.config.ScalaConfig._ import se.scalablesolutions.akka.stm.Transaction._ import se.scalablesolutions.akka.stm.TransactionManagement._ -import se.scalablesolutions.akka.stm.{StmException, TransactionManagement} +import se.scalablesolutions.akka.stm.TransactionManagement import se.scalablesolutions.akka.remote.protobuf.RemoteProtocol.RemoteRequest import se.scalablesolutions.akka.remote.{RemoteProtocolBuilder, RemoteClient, RemoteRequestIdFactory} import se.scalablesolutions.akka.serialization.Serializer import se.scalablesolutions.akka.util.{HashCode, Logging, UUID} import org.multiverse.api.ThreadLocalTransaction._ +import org.multiverse.commitbarriers.CountDownCommitBarrier import java.util.{Queue, HashSet} import java.util.concurrent.ConcurrentLinkedQueue import java.net.InetSocketAddress +import java.util.concurrent.locks.{Lock, ReentrantLock} /** * Implements the Transactor abstraction. E.g. a transactional actor. *

* Equivalent to invoking the makeTransactionRequired method in the body of the ActorJonas Bonér */ trait Transactor extends Actor { makeTransactionRequired @@ -35,6 +39,8 @@ trait Transactor extends Actor { * Extend this abstract class to create a remote actor. *

* Equivalent to invoking the makeRemote(..) method in the body of the ActorJonas Bonér */ abstract class RemoteActor(hostname: String, port: Int) extends Actor { makeRemote(hostname, port) @@ -64,6 +70,8 @@ class ActorMessageInvoker(val actor: Actor) extends MessageInvoker { } /** + * Utility class with factory methods for creating Actors. + * * @author Jonas Bonér */ object Actor extends Logging { @@ -94,13 +102,28 @@ object Actor extends Logging { } /** - * Use to create an anonymous event-driven actor with both an init block and a message loop block. + * Use to create an anonymous transactional event-driven actor. * The actor is started when created. * Example: *

    * import Actor._
    *
-   * val a = actor  {
+   * val a = transactor  {
+   *   case msg => ... // handle message
+   * }
+   * 
+ */ + def transactor(body: PartialFunction[Any, Unit]): Actor = new Transactor() { + start + def receive: PartialFunction[Any, Unit] = body + } + + /** + * Use to create an anonymous event-driven actor with both an init block and a message loop block. + * The actor is started when created. + * Example: + *
+   * val a = Actor.init  {
    *   ... // init stuff
    * } receive  {
    *   case msg => ... // handle message
@@ -108,8 +131,8 @@ object Actor extends Logging {
    * 
* */ - def actor(body: => Unit) = { - def handler(body: => Unit) = new { + def init[A](body: => Unit) = { + def handler[A](body: => Unit) = new { def receive(handler: PartialFunction[Any, Unit]) = new Actor() { start body @@ -196,9 +219,9 @@ object Actor extends Logging { * * @author Jonas Bonér */ -trait Actor extends TransactionManagement { +trait Actor extends TransactionManagement with Logging { implicit protected val self: Option[Actor] = Some(this) - implicit protected val transactionFamily: String = this.getClass.getName + implicit protected val transactionFamilyName: String = this.getClass.getName // Only mutable for RemoteServer in order to maintain identity across nodes private[akka] var _uuid = UUID.newUuid.toString @@ -219,6 +242,12 @@ trait Actor extends TransactionManagement { private[akka] var _replyToAddress: Option[InetSocketAddress] = None private[akka] val _mailbox: Queue[MessageInvocation] = new ConcurrentLinkedQueue[MessageInvocation] + /** + * This lock ensures thread safety in the dispatching: only one message can + * be dispatched at once on the actor. + */ + private[akka] val _dispatcherLock: Lock = new ReentrantLock + // ==================================== // protected fields // ==================================== @@ -240,7 +269,7 @@ trait Actor extends TransactionManagement { * But it can be used for advanced use-cases when one might want to store away the future and * resolve it later and/or somewhere else. */ - protected var senderFuture: Option[CompletableFutureResult] = None + protected var senderFuture: Option[CompletableFuture] = None // ==================================== // ==== USER CALLBACKS TO OVERRIDE ==== @@ -309,9 +338,9 @@ trait Actor extends TransactionManagement { * If 'trapExit' is set for the actor to act as supervisor, then a faultHandler must be defined. * Can be one of: *
-   *  AllForOneStrategy(maxNrOfRetries: Int, withinTimeRange: Int)
+   *  faultHandler = Some(AllForOneStrategy(maxNrOfRetries, withinTimeRange))
    *
-   *  OneForOneStrategy(maxNrOfRetries: Int, withinTimeRange: Int)
+   *  faultHandler = Some(OneForOneStrategy(maxNrOfRetries, withinTimeRange))
    * 
*/ protected var faultHandler: Option[FaultHandlingStrategy] = None @@ -329,13 +358,13 @@ trait Actor extends TransactionManagement { * Set to true if messages should have REQUIRES_NEW semantics, e.g. a new transaction should * start if there is no one running, else it joins the existing transaction. */ - @volatile protected var isTransactionRequiresNew = false + @volatile protected var isTransactor = false /** * User overridable callback/setting. * - * Partial function implementing the server logic. - * To be implemented by subclassing server. + * Partial function implementing the actor logic. + * To be implemented by subclassing actor. *

* Example code: *

@@ -479,7 +508,7 @@ trait Actor extends TransactionManagement {
   /**
    * Same as the '!' method but does not take an implicit sender as second parameter.
    */
-  def send(message: Any) = !(message)(None)
+  def send(message: Any) = this.!(message)(None)
 
   /**
    * Sends a message asynchronously and waits on a future for a reply message.
@@ -496,8 +525,6 @@ trait Actor extends TransactionManagement {
   def !![T](message: Any, timeout: Long): Option[T] = {
     if (_isKilled) throw new ActorKilledException("Actor [" + toString + "] has been killed, can't respond to messages")
     if (_isRunning) {
-      val from = if (sender != null && sender.isInstanceOf[Actor]) Some(sender.asInstanceOf[Actor])
-      else None
       val future = postMessageToMailboxAndCreateFutureResultWithTimeout(message, timeout, None)
       val isActiveObject = message.isInstanceOf[Invocation]
       if (isActiveObject && message.asInstanceOf[Invocation].isVoid) future.completeWithResult(None)
@@ -528,7 +555,10 @@ trait Actor extends TransactionManagement {
    */
   def !![T](message: Any): Option[T] = !![T](message, timeout)
 
-  def !!!(message: Any): FutureResult = {
+  /**
+   * FIXME document !!!
+   */
+  def !!!(message: Any): Future = {
     if (_isKilled) throw new ActorKilledException("Actor [" + toString + "] has been killed, can't respond to messages")
     if (_isRunning) {
       postMessageToMailboxAndCreateFutureResultWithTimeout(message, timeout, None)
@@ -536,12 +566,6 @@ trait Actor extends TransactionManagement {
       "Actor has not been started, you need to invoke 'actor.start' before using it")
   }
   
-  /**
-   * This method is evil and has been removed. Use '!!' with a timeout instead.
-   */
-  def !?[T](message: Any): T = throw new UnsupportedOperationException(
-    "'!?' is evil and has been removed. Use '!!' with a timeout instead")
-
   /**
    * Forwards the message and passes the original sender actor as the sender.
    * 

@@ -637,7 +661,7 @@ trait Actor extends TransactionManagement { def makeTransactionRequired = synchronized { if (_isRunning) throw new IllegalArgumentException( "Can not make actor transaction required after it has been started") - else isTransactionRequiresNew = true + else isTransactor = true } /** @@ -651,9 +675,9 @@ trait Actor extends TransactionManagement { * To be invoked from within the actor itself. */ protected[this] def link(actor: Actor) = { - getLinkedActors.add(actor) if (actor._supervisor.isDefined) throw new IllegalStateException( "Actor can only have one supervisor [" + actor + "], e.g. link(actor) fails") + getLinkedActors.add(actor) actor._supervisor = Some(this) Actor.log.debug("Linking actor [%s] to actor [%s]", actor, this) } @@ -783,6 +807,8 @@ trait Actor extends TransactionManagement { } protected[akka] def postMessageToMailbox(message: Any, sender: Option[Actor]): Unit = { + joinTransaction(message) + if (_remoteAddress.isDefined) { val requestBuilder = RemoteRequest.newBuilder .setId(RemoteRequestIdFactory.nextId) @@ -794,19 +820,18 @@ trait Actor extends TransactionManagement { .setIsEscaped(false) val id = registerSupervisorAsRemoteActor - if(id.isDefined) - requestBuilder.setSupervisorUuid(id.get) + if (id.isDefined) requestBuilder.setSupervisorUuid(id.get) // set the source fields used to reply back to the original sender // (i.e. not the remote proxy actor) - if(sender.isDefined) { + if (sender.isDefined) { val s = sender.get requestBuilder.setSourceTarget(s.getClass.getName) requestBuilder.setSourceUuid(s.uuid) - val (host,port) = s._replyToAddress.map(a => (a.getHostName,a.getPort)).getOrElse((Actor.HOSTNAME,Actor.PORT)) + val (host, port) = s._replyToAddress.map(a => (a.getHostName,a.getPort)).getOrElse((Actor.HOSTNAME,Actor.PORT)) - log.debug("Setting sending actor as %s @ %s:%s", s.getClass.getName, host, port) + Actor.log.debug("Setting sending actor as %s @ %s:%s", s.getClass.getName, host, port) requestBuilder.setSourceHostname(host) requestBuilder.setSourcePort(port) @@ -814,20 +839,21 @@ trait Actor extends TransactionManagement { RemoteProtocolBuilder.setMessage(message, requestBuilder) RemoteClient.clientFor(_remoteAddress.get).send(requestBuilder.build, None) } else { - val invocation = new MessageInvocation(this, message, None, sender, currentTransaction.get) + val invocation = new MessageInvocation(this, message, None, sender, transactionSet.get) if (_isEventBased) { _mailbox.add(invocation) if (_isSuspended) invocation.send } - else - invocation.send + else invocation.send } } - + protected[akka] def postMessageToMailboxAndCreateFutureResultWithTimeout( message: Any, timeout: Long, - senderFuture: Option[CompletableFutureResult]): CompletableFutureResult = { + senderFuture: Option[CompletableFuture]): CompletableFuture = { + joinTransaction(message) + if (_remoteAddress.isDefined) { val requestBuilder = RemoteRequest.newBuilder .setId(RemoteRequestIdFactory.nextId) @@ -845,8 +871,8 @@ trait Actor extends TransactionManagement { else throw new IllegalStateException("Expected a future from remote call to actor " + toString) } else { val future = if (senderFuture.isDefined) senderFuture.get - else new DefaultCompletableFutureResult(timeout) - val invocation = new MessageInvocation(this, message, Some(future), None, currentTransaction.get) + else new DefaultCompletableFuture(timeout) + val invocation = new MessageInvocation(this, message, Some(future), None, transactionSet.get) if (_isEventBased) { _mailbox.add(invocation) invocation.send @@ -855,6 +881,13 @@ trait Actor extends TransactionManagement { } } + private def joinTransaction(message: Any) = if (isTransactionSetInScope) { + // FIXME test to run bench without this trace call + Actor.log.trace("Joining transaction set [%s];\n\tactor %s\n\twith message [%s]", + getTransactionSetInScope, toString, message) + getTransactionSetInScope.incParties + } + /** * Callback for the dispatcher. E.g. single entry point to the user code and all protected[this] methods. */ @@ -870,7 +903,7 @@ trait Actor extends TransactionManagement { } private def dispatch[T](messageHandle: MessageInvocation) = { - setTransaction(messageHandle.tx) + setTransactionSet(messageHandle.transactionSet) val message = messageHandle.message //serializeMessage(messageHandle.message) senderFuture = messageHandle.future @@ -892,47 +925,60 @@ trait Actor extends TransactionManagement { } private def transactionalDispatch[T](messageHandle: MessageInvocation) = { - setTransaction(messageHandle.tx) + var topLevelTransaction = false + val txSet: Option[CountDownCommitBarrier] = + if (messageHandle.transactionSet.isDefined) messageHandle.transactionSet + else { + topLevelTransaction = true // FIXME create a new internal atomic block that can wait for X seconds if top level tx + if (isTransactor) { + Actor.log.trace("Creating a new transaction set (top-level transaction)\n\tfor actor %s\n\twith message %s", + toString, messageHandle) + Some(createNewTransactionSet) + } else None + } + setTransactionSet(txSet) val message = messageHandle.message //serializeMessage(messageHandle.message) senderFuture = messageHandle.future sender = messageHandle.sender def proceed = { - try { - incrementTransaction - if (base.isDefinedAt(message)) base(message) // invoke user actor's receive partial function - else throw new IllegalArgumentException( - "Actor " + toString + " could not process message [" + message + "]" + - "\n\tsince no matching 'case' clause in its 'receive' method could be found") - } finally { - decrementTransaction - } + if (base.isDefinedAt(message)) base(message) // invoke user actor's receive partial function + else throw new IllegalArgumentException( + toString + " could not process message [" + message + "]" + + "\n\tsince no matching 'case' clause in its 'receive' method could be found") + setTransactionSet(txSet) // restore transaction set to allow atomic block to do commit } try { - if (isTransactionRequiresNew && !isTransactionInScope) { - if (senderFuture.isEmpty) throw new StmException( - "Can't continue transaction in a one-way fire-forget message send" + - "\n\tE.g. using Actor '!' method or Active Object 'void' method" + - "\n\tPlease use the Actor '!!' method or Active Object method with non-void return type") + if (isTransactor) { atomic { proceed } } else proceed } catch { + case e: IllegalStateException => {} case e => + // abort transaction set + if (isTransactionSetInScope) try { + getTransactionSetInScope.abort + } catch { case e: IllegalStateException => {} } Actor.log.error(e, "Exception when invoking \n\tactor [%s] \n\twith message [%s]", this, message) + if (senderFuture.isDefined) senderFuture.get.completeWithException(this, e) - clearTransaction // need to clear currentTransaction before call to supervisor + + clearTransaction + if (topLevelTransaction) clearTransactionSet + // FIXME to fix supervisor restart of remote actor for oneway calls, inject a supervisor proxy that can send notification back to client if (_supervisor.isDefined) _supervisor.get ! Exit(this, e) } finally { clearTransaction + if (topLevelTransaction) clearTransactionSet } } - private def getResultOrThrowException[T](future: FutureResult): Option[T] = + private def getResultOrThrowException[T](future: Future): Option[T] = if (future.exception.isDefined) throw future.exception.get._2 else future.result.asInstanceOf[Option[T]] @@ -1037,6 +1083,5 @@ trait Actor extends TransactionManagement { that.asInstanceOf[Actor]._uuid == _uuid } - override def toString(): String = "Actor[" + id + ":" + uuid + "]" - + override def toString = "Actor[" + id + ":" + uuid + "]" } diff --git a/akka-core/src/main/scala/actor/ActorRegistry.scala b/akka-core/src/main/scala/actor/ActorRegistry.scala index 509750340e..3a81ae5266 100644 --- a/akka-core/src/main/scala/actor/ActorRegistry.scala +++ b/akka-core/src/main/scala/actor/ActorRegistry.scala @@ -6,81 +6,152 @@ package se.scalablesolutions.akka.actor import se.scalablesolutions.akka.util.Logging -import scala.collection.mutable.{ListBuffer, HashMap} +import scala.collection.mutable.ListBuffer import scala.reflect.Manifest +import java.util.concurrent.{CopyOnWriteArrayList, ConcurrentHashMap} + +sealed trait ActorRegistryEvent +case class ActorRegistered(actor: Actor) extends ActorRegistryEvent +case class ActorUnregistered(actor: Actor) extends ActorRegistryEvent /** - * Registry holding all actor instances, mapped by class and the actor's id field (which can be set by user-code). + * Registry holding all Actor instances in the whole system. + * Mapped by: + *

    + *
  • the Actor's UUID
  • + *
  • the Actor's id field (which can be set by user-code)
  • + *
  • the Actor's class
  • + *
  • all Actors that are subtypes of a specific type
  • + *
      * * @author Jonas Bonér */ object ActorRegistry extends Logging { - private val actorsByClassName = new HashMap[String, List[Actor]] - private val actorsById = new HashMap[String, List[Actor]] + private val actorsByUUID = new ConcurrentHashMap[String, Actor] + private val actorsById = new ConcurrentHashMap[String, List[Actor]] + private val actorsByClassName = new ConcurrentHashMap[String, List[Actor]] + private val registrationListeners = new CopyOnWriteArrayList[Actor] /** * Returns all actors in the system. */ - def actors: List[Actor] = synchronized { + def actors: List[Actor] = { val all = new ListBuffer[Actor] - actorsById.values.foreach(all ++= _) + val elements = actorsByUUID.elements + while (elements.hasMoreElements) all += elements.nextElement all.toList } /** * Invokes a function for all actors. */ - def foreach(f: (Actor) => Unit) = actors.foreach(f) + def foreach(f: (Actor) => Unit) = { + val elements = actorsByUUID.elements + while (elements.hasMoreElements) f(elements.nextElement) + } /** * Finds all actors that are subtypes of the class passed in as the Manifest argument. */ - def actorsFor[T <: Actor](implicit manifest: Manifest[T]): List[T] = synchronized { - for (actor <- actors; if manifest.erasure.isAssignableFrom(actor.getClass)) yield actor.asInstanceOf[T] + def actorsFor[T <: Actor](implicit manifest: Manifest[T]): List[T] = { + val all = new ListBuffer[T] + val elements = actorsByUUID.elements + while (elements.hasMoreElements) { + val actor = elements.nextElement + if (manifest.erasure.isAssignableFrom(actor.getClass)) { + all += actor.asInstanceOf[T] + } + } + all.toList } /** * Finds all actors of the exact type specified by the class passed in as the Class argument. */ - def actorsFor[T <: Actor](clazz: Class[T]): List[T] = synchronized { - actorsByClassName.get(clazz.getName) match { - case None => Nil - case Some(instances) => instances.asInstanceOf[List[T]] - } + def actorsFor[T <: Actor](clazz: Class[T]): List[T] = { + if (actorsByClassName.containsKey(clazz.getName)) { + actorsByClassName.get(clazz.getName).asInstanceOf[List[T]] + } else Nil } /** * Finds all actors that has a specific id. */ - def actorsFor(id : String): List[Actor] = synchronized { - actorsById.get(id) match { - case None => Nil - case Some(instances) => instances - } + def actorsFor(id: String): List[Actor] = { + if (actorsById.containsKey(id)) actorsById.get(id).asInstanceOf[List[Actor]] + else Nil } - def register(actor: Actor) = synchronized { - val className = actor.getClass.getName - actorsByClassName.get(className) match { - case Some(instances) => actorsByClassName + (className -> (actor :: instances)) - case None => actorsByClassName + (className -> (actor :: Nil)) - } + /** + * Finds the actor that has a specific UUID. + */ + def actorFor(uuid: String): Option[Actor] = { + if (actorsByUUID.containsKey(uuid)) Some(actorsByUUID.get(uuid)) + else None + } + + /** + * Registers an actor in the ActorRegistry. + */ + def register(actor: Actor) = { + // UUID + actorsByUUID.put(actor.uuid, actor) + + // ID val id = actor.getId if (id eq null) throw new IllegalStateException("Actor.id is null " + actor) - actorsById.get(id) match { - case Some(instances) => actorsById + (id -> (actor :: instances)) - case None => actorsById + (id -> (actor :: Nil)) - } + if (actorsById.containsKey(id)) actorsById.put(id, actor :: actorsById.get(id)) + else actorsById.put(id, actor :: Nil) + + // Class name + val className = actor.getClass.getName + if (actorsByClassName.containsKey(className)) { + actorsByClassName.put(className, actor :: actorsByClassName.get(className)) + } else actorsByClassName.put(className, actor :: Nil) + + // notify listeners + foreachListener(_ send ActorRegistered(actor)) } - def unregister(actor: Actor) = synchronized { - actorsByClassName - actor.getClass.getName - actorsById - actor.getClass.getName + /** + * Unregisters an actor in the ActorRegistry. + */ + def unregister(actor: Actor) = { + actorsByUUID remove actor.uuid + actorsById remove actor.getId + actorsByClassName remove actor.getClass.getName + // notify listeners + foreachListener(_ send ActorUnregistered(actor)) } + /** + * Shuts down and unregisters all actors in the system. + */ def shutdownAll = { log.info("Shutting down all actors in the system...") - actorsById.foreach(entry => entry._2.map(_.stop)) - log.info("All actors have been shut down") + foreach(_.stop) + actorsByUUID.clear + actorsById.clear + actorsByClassName.clear + log.info("All actors have been shut down and unregistered from ActorRegistry") } -} + + /** + * Adds the registration listener this this registry's listener list. + */ + def addRegistrationListener(listener: Actor) = { + registrationListeners.add(listener) + } + + /** + * Removes the registration listener this this registry's listener list. + */ + def removeRegistrationListener(listener: Actor) = { + registrationListeners.remove(listener) + } + + private def foreachListener(f: (Actor) => Unit) { + val iterator = registrationListeners.iterator + while (iterator.hasNext) f(iterator.next) + } +} \ No newline at end of file diff --git a/akka-core/src/main/scala/actor/Agent.scala b/akka-core/src/main/scala/actor/Agent.scala new file mode 100644 index 0000000000..697b53d797 --- /dev/null +++ b/akka-core/src/main/scala/actor/Agent.scala @@ -0,0 +1,180 @@ +// Copyright © 2008-10 The original author or authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package se.scalablesolutions.akka.actor + +import se.scalablesolutions.akka.stm.Ref + +import java.util.concurrent.atomic.AtomicReference +import java.util.concurrent.CountDownLatch + +class AgentException private[akka](message: String) extends RuntimeException(message) + +/** +* The Agent class was strongly inspired by the agent principle in Clojure. +* Essentially, an agent wraps a shared mutable state and hides it behind +* a message-passing interface. Agents accept messages and process them on +* behalf of the wrapped state. +* +* Typically agents accept functions / commands as messages and ensure the +* submitted commands are executed against the internal agent's state in a +* thread-safe manner (sequentially). +* +* The submitted functions / commands take the internal state as a parameter +* and their output becomes the new internal state value. +* +* The code that is submitted to an agent doesn't need to pay attention to +* threading or synchronization, the agent will provide such guarantees by itself. +* +* If an Agent is used within an enclosing transaction, then it will participate +* in that transaction. +* +* Example of usage: +*
      +* val agent = Agent(5)
      +*
      +* agent update (_ + 1)
      +* agent update (_ * 2)
      +*
      +* val result = agent()
      +* ... // use result
      +*
      +* agent.close
      +* 
      +* +* NOTE: You can't call 'agent.get' or 'agent()' within an enclosing transaction since +* that will block the transaction indefinitely. But 'agent.update' or 'Agent(value)' +* is fine. +* +* Original author: +* @author Vaclav Pech +* +* Inital AKKA port by: +* @author Viktor Klang +* +* Modifications by: +* @author Jonas Bonér +*/ +sealed class Agent[T] private (initialValue: T) extends Transactor { + import Agent._ + private lazy val value = Ref[T]() + + start + this ! ValueHolder(initialValue) + + /** + * Periodically handles incoming messages. + */ + def receive = { + case ValueHolder(x: T) => updateData(x) + case FunctionHolder(fun: (T => T)) => updateData(fun(value.getOrWait)) + case ProcedureHolder(fun: (T => Unit)) => fun(copyStrategy(value.getOrWait)) + } + + /** + * Specifies how a copy of the value is made, defaults to using identity. + */ + protected def copyStrategy(t: T): T = t + + + /** + * Updates the internal state with the value provided as a by-name parameter. + */ + private final def updateData(newData: => T): Unit = value.swap(newData) + + /** + * Submits a request to read the internal state. + * + * A copy of the internal state will be returned, depending on the underlying + * effective copyStrategy. Internally leverages the asynchronous getValue() + * method and then waits for its result on a CountDownLatch. + */ + final def get: T = { + if (isTransactionInScope) throw new AgentException( + "Can't call Agent.get within an enclosing transaction.\n\tWould block indefinitely.\n\tPlease refactor your code.") + val ref = new AtomicReference[T] + val latch = new CountDownLatch(1) + get((x: T) => {ref.set(x); latch.countDown}) + latch.await + ref.get + } + + /** + * Asynchronously submits a request to read the internal state. The supplied function + * will be executed on the returned internal state value. A copy of the internal state + * will be used, depending on the underlying effective copyStrategy. + */ + final def get(message: (T => Unit)): Unit = this ! ProcedureHolder(message) + + /** + * Submits a request to read the internal state. A copy of the internal state will be + * returned, depending on the underlying effective copyStrategy. Internally leverages + * the asynchronous getValue() method and then waits for its result on a CountDownLatch. + */ + final def apply(): T = get + + /** + * Submits the provided function for execution against the internal agent's state. + */ + final def apply(message: (T => T)): Unit = this ! FunctionHolder(message) + + /** + * Submits a new value to be set as the new agent's internal state. + */ + final def apply(message: T): Unit = this ! ValueHolder(message) + + /** + * Submits the provided function for execution against the internal agent's state. + */ + final def update(message: (T => T)): Unit = this ! FunctionHolder(message) + + /** + * Submits a new value to be set as the new agent's internal state. + */ + // FIXME Change to 'send' when we have Scala 2.8 and we can remove the Actor.send method + final def update(message: T): Unit = this ! ValueHolder(message) + + /** + * Closes the agents and makes it eligable for garbage collection. + * + * A closed agent can never be used again. + */ + def close = stop +} + +/** +* Provides factory methods to create Agents. +*/ +object Agent { + + /* + * The internal messages for passing around requests. + */ + private case class ProcedureHolder[T](fun: ((T) => Unit)) + private case class FunctionHolder[T](fun: ((T) => T)) + private case class ValueHolder[T](value: T) + + /** + * Creates a new Agent of type T with the initial value of value. + */ + def apply[T](value: T): Agent[T] = new Agent(value) + + /** + * Creates a new Agent of type T with the initial value of value and with the + * specified copy function. + */ + def apply[T](value: T, newCopyStrategy: (T) => T) = new Agent(value) { + override def copyStrategy(t: T) = newCopyStrategy(t) + } +} \ No newline at end of file diff --git a/akka-core/src/main/scala/actor/BootableActorLoaderService.scala b/akka-core/src/main/scala/actor/BootableActorLoaderService.scala index 1bacbf6f59..5c80620d80 100644 --- a/akka-core/src/main/scala/actor/BootableActorLoaderService.scala +++ b/akka-core/src/main/scala/actor/BootableActorLoaderService.scala @@ -7,8 +7,8 @@ package se.scalablesolutions.akka.actor import java.io.File import java.net.URLClassLoader -import se.scalablesolutions.akka.util.{Bootable,Logging} -import se.scalablesolutions.akka.Config._ +import se.scalablesolutions.akka.util.{Bootable, Logging} +import se.scalablesolutions.akka.config.Config._ /** * Handles all modules in the deploy directory (load and unload) @@ -30,12 +30,8 @@ trait BootableActorLoaderService extends Bootable with Logging { } val toDeploy = for (f <- DEPLOY_DIR.listFiles().toArray.toList.asInstanceOf[List[File]]) yield f.toURL log.info("Deploying applications from [%s]: [%s]", DEPLOY, toDeploy.toArray.toList) - new URLClassLoader(toDeploy.toArray, ClassLoader.getSystemClassLoader) - } else if (getClass.getClassLoader.getResourceAsStream("akka.conf") ne null) { - getClass.getClassLoader - } else throw new IllegalStateException( - "AKKA_HOME is not defined and no 'akka.conf' can be found on the classpath, aborting") - ) + new URLClassLoader(toDeploy.toArray, getClass.getClassLoader) + } else getClass.getClassLoader) } abstract override def onLoad = { @@ -47,4 +43,4 @@ trait BootableActorLoaderService extends Bootable with Logging { } abstract override def onUnload = ActorRegistry.shutdownAll -} \ No newline at end of file +} diff --git a/akka-core/src/main/scala/actor/Scheduler.scala b/akka-core/src/main/scala/actor/Scheduler.scala index 7217af7574..f85511bd28 100644 --- a/akka-core/src/main/scala/actor/Scheduler.scala +++ b/akka-core/src/main/scala/actor/Scheduler.scala @@ -17,7 +17,7 @@ import java.util.concurrent._ import se.scalablesolutions.akka.config.ScalaConfig._ import se.scalablesolutions.akka.config.{AllForOneStrategy, OneForOneStrategy, FaultHandlingStrategy} -import se.scalablesolutions.akka.util.{Logging} +import se.scalablesolutions.akka.util.Logging case object UnSchedule case class SchedulerException(msg: String, e: Throwable) extends RuntimeException(msg, e) diff --git a/akka-core/src/main/scala/actor/Supervisor.scala b/akka-core/src/main/scala/actor/Supervisor.scala index ac5dc32303..bcb7223ddd 100644 --- a/akka-core/src/main/scala/actor/Supervisor.scala +++ b/akka-core/src/main/scala/actor/Supervisor.scala @@ -84,11 +84,14 @@ sealed class Supervisor private[akka] (handler: FaultHandlingStrategy, trapExcep faultHandler = Some(handler) dispatcher = Dispatchers.newThreadBasedDispatcher(this) - private val actors = new ConcurrentHashMap[String, Actor] + private val actors = new ConcurrentHashMap[String, List[Actor]] // Cheating, should really go through the dispatcher rather than direct access to a CHM - def getInstance[T](clazz: Class[T]) = actors.get(clazz.getName).asInstanceOf[T] - def getComponentInterfaces: List[Class[_]] = actors.values.toArray.toList.map(_.getClass) + def getInstance[T](clazz: Class[T]): List[T] = actors.get(clazz.getName).asInstanceOf[List[T]] + + def getComponentInterfaces: List[Class[_]] = List.flatten( + actors.values.toArray.toList.asInstanceOf[List[List[AnyRef]]]).map(_.getClass) + def isDefined(clazz: Class[_]): Boolean = actors.containsKey(clazz.getName) override def start: Actor = synchronized { @@ -106,7 +109,8 @@ sealed class Supervisor private[akka] (handler: FaultHandlingStrategy, trapExcep } def receive = { - case unknown => throw new IllegalArgumentException("Supervisor " + toString + " does not respond to any messages. Unknown message [" + unknown + "]") + case unknown => throw new IllegalArgumentException( + "Supervisor " + toString + " does not respond to any messages. Unknown message [" + unknown + "]") } def configure(config: SupervisorConfig, factory: SupervisorFactory) = config match { @@ -114,15 +118,29 @@ sealed class Supervisor private[akka] (handler: FaultHandlingStrategy, trapExcep servers.map(server => server match { case Supervise(actor, lifeCycle, remoteAddress) => - actors.put(actor.getClass.getName, actor) + val className = actor.getClass.getName + val currentActors = { + val list = actors.get(className) + if (list eq null) List[Actor]() + else list + } + actors.put(className, actor :: currentActors) actor.lifeCycle = Some(lifeCycle) startLink(actor) - remoteAddress.foreach(address => RemoteServer.actorsFor(RemoteServer.Address(address.hostname, address.port)).actors.put(actor.getId, actor)) + remoteAddress.foreach(address => RemoteServer.actorsFor( + RemoteServer.Address(address.hostname, address.port)) + .actors.put(actor.getId, actor)) case supervisorConfig @ SupervisorConfig(_, _) => // recursive supervisor configuration val supervisor = factory.newInstanceFor(supervisorConfig).start supervisor.lifeCycle = Some(LifeCycle(Permanent)) - actors.put(supervisor.getClass.getName, supervisor) + val className = supervisor.getClass.getName + val currentSupervisors = { + val list = actors.get(className) + if (list eq null) List[Actor]() + else list + } + actors.put(className, supervisor :: currentSupervisors) link(supervisor) }) } diff --git a/akka-core/src/main/scala/config/ActiveObjectConfigurator.scala b/akka-core/src/main/scala/config/ActiveObjectConfigurator.scala index 264b526002..8419c7fcd9 100644 --- a/akka-core/src/main/scala/config/ActiveObjectConfigurator.scala +++ b/akka-core/src/main/scala/config/ActiveObjectConfigurator.scala @@ -6,11 +6,10 @@ package se.scalablesolutions.akka.config import JavaConfig._ -import com.google.inject._ +import java.util.{List => JList} +import java.util.{ArrayList} -import java.util._ -//import org.apache.camel.impl.{JndiRegistry, DefaultCamelContext} -//import org.apache.camel.{Endpoint, Routes} +import com.google.inject._ /** * Configurator for the Active Objects. Used to do declarative configuration of supervision. @@ -23,16 +22,25 @@ import java.util._ * @author Jonas Bonér */ class ActiveObjectConfigurator { + import scala.collection.JavaConversions._ // TODO: make pluggable once we have f.e a SpringConfigurator private val INSTANCE = new ActiveObjectGuiceConfigurator /** - * Returns the active abject that has been put under supervision for the class specified. + * Returns the a list with all active objects that has been put under supervision for the class specified. + * + * @param clazz the class for the active object + * @return a list with all the active objects for the class + */ + def getInstances[T](clazz: Class[T]): JList[T] = INSTANCE.getInstance(clazz).foldLeft(new ArrayList[T]){ (l, i) => l add i ; l } + + /** + * Returns the first item in a list of all active objects that has been put under supervision for the class specified. * * @param clazz the class for the active object * @return the active object for the class */ - def getInstance[T](clazz: Class[T]): T = INSTANCE.getInstance(clazz) + def getInstance[T](clazz: Class[T]): T = INSTANCE.getInstance(clazz).head def configure(restartStrategy: RestartStrategy, components: Array[Component]): ActiveObjectConfigurator = { INSTANCE.configure( @@ -56,13 +64,7 @@ class ActiveObjectConfigurator { this } - //def addRoutes(routes: Routes): ActiveObjectConfigurator = { - // INSTANCE.addRoutes(routes) - // this - // } - - - def getComponentInterfaces: List[Class[_]] = { + def getComponentInterfaces: JList[Class[_]] = { val al = new ArrayList[Class[_]] for (c <- INSTANCE.getComponentInterfaces) al.add(c) al @@ -70,14 +72,8 @@ class ActiveObjectConfigurator { def getExternalDependency[T](clazz: Class[T]): T = INSTANCE.getExternalDependency(clazz) - //def getRoutingEndpoint(uri: String): Endpoint = INSTANCE.getRoutingEndpoint(uri) - - //def getRoutingEndpoints: java.util.Collection[Endpoint] = INSTANCE.getRoutingEndpoints - - //def getRoutingEndpoints(uri: String): java.util.Collection[Endpoint] = INSTANCE.getRoutingEndpoints(uri) - // TODO: should this be exposed? - def getGuiceModules: List[Module] = INSTANCE.getGuiceModules + def getGuiceModules: JList[Module] = INSTANCE.getGuiceModules def reset = INSTANCE.reset diff --git a/akka-core/src/main/scala/config/ActiveObjectGuiceConfigurator.scala b/akka-core/src/main/scala/config/ActiveObjectGuiceConfigurator.scala index 9868edb98b..e01f91f92a 100644 --- a/akka-core/src/main/scala/config/ActiveObjectGuiceConfigurator.scala +++ b/akka-core/src/main/scala/config/ActiveObjectGuiceConfigurator.scala @@ -11,22 +11,18 @@ import se.scalablesolutions.akka.actor.{Supervisor, ActiveObject, Dispatcher} import se.scalablesolutions.akka.remote.RemoteServer import se.scalablesolutions.akka.util.Logging -//import org.apache.camel.impl.{DefaultCamelContext} -//import org.apache.camel.{CamelContext, Endpoint, Routes} - import scala.collection.mutable.HashMap import java.net.InetSocketAddress import java.lang.reflect.Method /** - * This is an class for internal usage. Instead use the se.scalablesolutions.akka.config.ActiveObjectConfigurator class for creating ActiveObjects. + * This is an class for internal usage. Instead use the se.scalablesolutions.akka.config.ActiveObjectConfigurator + * class for creating ActiveObjects. * * @author Jonas Bonér */ -private[akka] class ActiveObjectGuiceConfigurator extends ActiveObjectConfiguratorBase with Logging { // with CamelConfigurator { - //val AKKA_CAMEL_ROUTING_SCHEME = "akka" - +private[akka] class ActiveObjectGuiceConfigurator extends ActiveObjectConfiguratorBase with Logging { private var injector: Injector = _ private var supervisor: Option[Supervisor] = None private var restartStrategy: RestartStrategy = _ @@ -35,7 +31,6 @@ private[akka] class ActiveObjectGuiceConfigurator extends ActiveObjectConfigurat private var bindings: List[DependencyBinding] = Nil private var configRegistry = new HashMap[Class[_], Component] // TODO is configRegistry needed? private var activeObjectRegistry = new HashMap[Class[_], Tuple3[AnyRef, AnyRef, Component]] - //private var camelContext = new DefaultCamelContext private var modules = new java.util.ArrayList[Module] private var methodToUriRegistry = new HashMap[Method, String] @@ -43,9 +38,9 @@ private[akka] class ActiveObjectGuiceConfigurator extends ActiveObjectConfigurat * Returns the active abject that has been put under supervision for the class specified. * * @param clazz the class for the active object - * @return the active object for the class + * @return the active objects for the class */ - override def getInstance[T](clazz: Class[T]): T = synchronized { + override def getInstance[T](clazz: Class[T]): List[T] = synchronized { log.debug("Retrieving active object [%s]", clazz.getName) if (injector eq null) throw new IllegalStateException( "inject() and/or supervise() must be called before invoking getInstance(clazz)") @@ -54,7 +49,7 @@ private[akka] class ActiveObjectGuiceConfigurator extends ActiveObjectConfigurat "Class [" + clazz.getName + "] has not been put under supervision" + "\n(by passing in the config to the 'configure' and then invoking 'supervise') method")) injector.injectMembers(targetInstance) - proxy.asInstanceOf[T] + List(proxy.asInstanceOf[T]) } override def isDefined(clazz: Class[_]): Boolean = synchronized { @@ -70,30 +65,15 @@ private[akka] class ActiveObjectGuiceConfigurator extends ActiveObjectConfigurat if (c.intf.isDefined) c.intf.get else c.target } - /* - override def getRoutingEndpoint(uri: String): Endpoint = synchronized { - camelContext.getEndpoint(uri) - } - override def getRoutingEndpoints: java.util.Collection[Endpoint] = synchronized { - camelContext.getEndpoints - } - - override def getRoutingEndpoints(uri: String): java.util.Collection[Endpoint] = synchronized { - camelContext.getEndpoints(uri) - } - */ override def configure(restartStrategy: RestartStrategy, components: List[Component]): ActiveObjectConfiguratorBase = synchronized { this.restartStrategy = restartStrategy this.components = components.toArray.toList.asInstanceOf[List[Component]] bindings = for (component <- this.components) yield { if (component.intf.isDefined) newDelegatingProxy(component) - else newSubclassingProxy(component) + else newSubclassingProxy(component) } - //camelContext.getRegistry.asInstanceOf[JndiRegistry].bind(component.name, activeObjectProxy) - //for (method <- component.intf.getDeclaredMethods.toList) registerMethodForUri(method, component.name) - //log.debug("Registering active object in Camel context under the name [%s]", component.target.getName) val deps = new java.util.ArrayList[DependencyBinding](bindings.size) for (b <- bindings) deps.add(b) modules.add(new ActiveObjectGuiceModule(deps)) @@ -105,7 +85,8 @@ private[akka] class ActiveObjectGuiceConfigurator extends ActiveObjectConfigurat val actor = new Dispatcher(component.transactionRequired, component.lifeCycle.callbacks) if (component.dispatcher.isDefined) actor.dispatcher = component.dispatcher.get val remoteAddress = - if (component.remoteAddress.isDefined) Some(new InetSocketAddress(component.remoteAddress.get.hostname, component.remoteAddress.get.port)) + if (component.remoteAddress.isDefined) + Some(new InetSocketAddress(component.remoteAddress.get.hostname, component.remoteAddress.get.port)) else None val proxy = ActiveObject.newInstance(targetClass, actor, remoteAddress, component.timeout).asInstanceOf[AnyRef] if (remoteAddress.isDefined) { @@ -125,9 +106,11 @@ private[akka] class ActiveObjectGuiceConfigurator extends ActiveObjectConfigurat val actor = new Dispatcher(component.transactionRequired, component.lifeCycle.callbacks) if (component.dispatcher.isDefined) actor.dispatcher = component.dispatcher.get val remoteAddress = - if (component.remoteAddress.isDefined) Some(new InetSocketAddress(component.remoteAddress.get.hostname, component.remoteAddress.get.port)) + if (component.remoteAddress.isDefined) + Some(new InetSocketAddress(component.remoteAddress.get.hostname, component.remoteAddress.get.port)) else None - val proxy = ActiveObject.newInstance(targetClass, targetInstance, actor, remoteAddress, component.timeout).asInstanceOf[AnyRef] + val proxy = ActiveObject.newInstance( + targetClass, targetInstance, actor, remoteAddress, component.timeout).asInstanceOf[AnyRef] if (remoteAddress.isDefined) { RemoteServer .actorsFor(RemoteServer.Address(component.remoteAddress.get.hostname, component.remoteAddress.get.port)) @@ -147,8 +130,6 @@ private[akka] class ActiveObjectGuiceConfigurator extends ActiveObjectConfigurat override def supervise: ActiveObjectConfiguratorBase = synchronized { if (injector eq null) inject supervisor = Some(ActiveObject.supervise(restartStrategy, supervised)) - //camelContext.addComponent(AKKA_CAMEL_ROUTING_SCHEME, new ActiveObjectComponent(this)) - //camelContext.start supervisor.get.start ConfiguratorRepository.registerConfigurator(this) this @@ -170,14 +151,7 @@ private[akka] class ActiveObjectGuiceConfigurator extends ActiveObjectConfigurat modules.add(module) this } - /* - override def addRoutes(routes: Routes): ActiveObjectConfiguratorBase = synchronized { - camelContext.addRoutes(routes) - this - } - override def getCamelContext: CamelContext = camelContext - */ def getGuiceModules: java.util.List[Module] = modules def reset = synchronized { @@ -187,21 +161,10 @@ private[akka] class ActiveObjectGuiceConfigurator extends ActiveObjectConfigurat methodToUriRegistry = new HashMap[Method, String] injector = null restartStrategy = null - //camelContext = new DefaultCamelContext } def stop = synchronized { - //camelContext.stop if (supervisor.isDefined) supervisor.get.stop } - -// def registerMethodForUri(method: Method, componentName: String) = -// methodToUriRegistry += method -> buildUri(method, componentName) - -// def lookupUriFor(method: Method): String = -// methodToUriRegistry.getOrElse(method, throw new IllegalStateException("Could not find URI for method [" + method.getName + "]")) - -// def buildUri(method: Method, componentName: String): String = -// AKKA_CAMEL_ROUTING_SCHEME + ":" + componentName + "." + method.getName } \ No newline at end of file diff --git a/akka-core/src/main/scala/config/Config.scala b/akka-core/src/main/scala/config/Config.scala index e993573972..82a1f54191 100644 --- a/akka-core/src/main/scala/config/Config.scala +++ b/akka-core/src/main/scala/config/Config.scala @@ -4,231 +4,71 @@ package se.scalablesolutions.akka.config -import se.scalablesolutions.akka.actor.Actor -import se.scalablesolutions.akka.dispatch.MessageDispatcher +import se.scalablesolutions.akka.util.Logging -sealed abstract class FaultHandlingStrategy -case class AllForOneStrategy(maxNrOfRetries: Int, withinTimeRange: Int) extends FaultHandlingStrategy -case class OneForOneStrategy(maxNrOfRetries: Int, withinTimeRange: Int) extends FaultHandlingStrategy - -/** - * Configuration classes - not to be used as messages. - * - * @author Jonas Bonér - */ -object ScalaConfig { - sealed abstract class ConfigElement - - abstract class Server extends ConfigElement - abstract class FailOverScheme extends ConfigElement - abstract class Scope extends ConfigElement - - case class SupervisorConfig(restartStrategy: RestartStrategy, worker: List[Server]) extends Server - - class Supervise(val actor: Actor, val lifeCycle: LifeCycle, _remoteAddress: RemoteAddress) extends Server { - val remoteAddress: Option[RemoteAddress] = if (_remoteAddress eq null) None else Some(_remoteAddress) - } - object Supervise { - def apply(actor: Actor, lifeCycle: LifeCycle, remoteAddress: RemoteAddress) = new Supervise(actor, lifeCycle, remoteAddress) - def apply(actor: Actor, lifeCycle: LifeCycle) = new Supervise(actor, lifeCycle, null) - def unapply(supervise: Supervise) = Some((supervise.actor, supervise.lifeCycle, supervise.remoteAddress)) - } - - case class RestartStrategy( - scheme: FailOverScheme, - maxNrOfRetries: Int, - withinTimeRange: Int, - trapExceptions: List[Class[_ <: Throwable]]) extends ConfigElement - - case object AllForOne extends FailOverScheme - case object OneForOne extends FailOverScheme - - case class LifeCycle(scope: Scope, callbacks: Option[RestartCallbacks]) extends ConfigElement - object LifeCycle { - def apply(scope: Scope) = new LifeCycle(scope, None) - } - case class RestartCallbacks(preRestart: String, postRestart: String) { - if ((preRestart eq null) || (postRestart eq null)) throw new IllegalArgumentException("Restart callback methods can't be null") - } - - case object Permanent extends Scope - case object Temporary extends Scope - - case class RemoteAddress(val hostname: String, val port: Int) extends ConfigElement - - class Component(_intf: Class[_], - val target: Class[_], - val lifeCycle: LifeCycle, - val timeout: Int, - val transactionRequired: Boolean, - _dispatcher: MessageDispatcher, // optional - _remoteAddress: RemoteAddress // optional - ) extends Server { - val intf: Option[Class[_]] = if (_intf eq null) None else Some(_intf) - val dispatcher: Option[MessageDispatcher] = if (_dispatcher eq null) None else Some(_dispatcher) - val remoteAddress: Option[RemoteAddress] = if (_remoteAddress eq null) None else Some(_remoteAddress) - } - object Component { - def apply(intf: Class[_], target: Class[_], lifeCycle: LifeCycle, timeout: Int) = - new Component(intf, target, lifeCycle, timeout, false, null, null) - - def apply(target: Class[_], lifeCycle: LifeCycle, timeout: Int) = - new Component(null, target, lifeCycle, timeout, false, null, null) - - def apply(intf: Class[_], target: Class[_], lifeCycle: LifeCycle, timeout: Int, dispatcher: MessageDispatcher) = - new Component(intf, target, lifeCycle, timeout, false, dispatcher, null) - - def apply(target: Class[_], lifeCycle: LifeCycle, timeout: Int, dispatcher: MessageDispatcher) = - new Component(null, target, lifeCycle, timeout, false, dispatcher, null) - - def apply(intf: Class[_], target: Class[_], lifeCycle: LifeCycle, timeout: Int, remoteAddress: RemoteAddress) = - new Component(intf, target, lifeCycle, timeout, false, null, remoteAddress) - - def apply(target: Class[_], lifeCycle: LifeCycle, timeout: Int, remoteAddress: RemoteAddress) = - new Component(null, target, lifeCycle, timeout, false, null, remoteAddress) - - def apply(intf: Class[_], target: Class[_], lifeCycle: LifeCycle, timeout: Int, dispatcher: MessageDispatcher, remoteAddress: RemoteAddress) = - new Component(intf, target, lifeCycle, timeout, false, dispatcher, remoteAddress) - - def apply(target: Class[_], lifeCycle: LifeCycle, timeout: Int, dispatcher: MessageDispatcher, remoteAddress: RemoteAddress) = - new Component(null, target, lifeCycle, timeout, false, dispatcher, remoteAddress) - - def apply(intf: Class[_], target: Class[_], lifeCycle: LifeCycle, timeout: Int, transactionRequired: Boolean) = - new Component(intf, target, lifeCycle, timeout, transactionRequired, null, null) - - def apply(target: Class[_], lifeCycle: LifeCycle, timeout: Int, transactionRequired: Boolean) = - new Component(null, target, lifeCycle, timeout, transactionRequired, null, null) - - def apply(intf: Class[_], target: Class[_], lifeCycle: LifeCycle, timeout: Int, transactionRequired: Boolean, dispatcher: MessageDispatcher) = - new Component(intf, target, lifeCycle, timeout, transactionRequired, dispatcher, null) - - def apply(target: Class[_], lifeCycle: LifeCycle, timeout: Int, transactionRequired: Boolean, dispatcher: MessageDispatcher) = - new Component(null, target, lifeCycle, timeout, transactionRequired, dispatcher, null) - - def apply(intf: Class[_], target: Class[_], lifeCycle: LifeCycle, timeout: Int, transactionRequired: Boolean, remoteAddress: RemoteAddress) = - new Component(intf, target, lifeCycle, timeout, transactionRequired, null, remoteAddress) - - def apply(target: Class[_], lifeCycle: LifeCycle, timeout: Int, transactionRequired: Boolean, remoteAddress: RemoteAddress) = - new Component(null, target, lifeCycle, timeout, transactionRequired, null, remoteAddress) - - def apply(intf: Class[_], target: Class[_], lifeCycle: LifeCycle, timeout: Int, transactionRequired: Boolean, dispatcher: MessageDispatcher, remoteAddress: RemoteAddress) = - new Component(intf, target, lifeCycle, timeout, transactionRequired, dispatcher, remoteAddress) - - def apply(target: Class[_], lifeCycle: LifeCycle, timeout: Int, transactionRequired: Boolean, dispatcher: MessageDispatcher, remoteAddress: RemoteAddress) = - new Component(null, target, lifeCycle, timeout, transactionRequired, dispatcher, remoteAddress) - } -} +import net.lag.configgy.{Configgy, ParseException} /** * @author Jonas Bonér */ -object JavaConfig { - import scala.reflect.BeanProperty +object Config extends Logging { + val VERSION = "0.7" - sealed abstract class ConfigElement + // Set Multiverse options for max speed + System.setProperty("org.multiverse.MuliverseConstants.sanityChecks", "false") + System.setProperty("org.multiverse.api.GlobalStmInstance.factorymethod", "org.multiverse.stms.alpha.AlphaStm.createFast") - class RestartStrategy( - @BeanProperty val scheme: FailOverScheme, - @BeanProperty val maxNrOfRetries: Int, - @BeanProperty val withinTimeRange: Int, - @BeanProperty val trapExceptions: Array[Class[_ <: Throwable]]) extends ConfigElement { - def transform = se.scalablesolutions.akka.config.ScalaConfig.RestartStrategy( - scheme.transform, maxNrOfRetries, withinTimeRange, trapExceptions.toList) + val HOME = { + val systemHome = System.getenv("AKKA_HOME") + if (systemHome == null || systemHome.length == 0 || systemHome == ".") { + val optionHome = System.getProperty("akka.home", "") + if (optionHome.length != 0) Some(optionHome) + else None + } else Some(systemHome) } - - class LifeCycle(@BeanProperty val scope: Scope, @BeanProperty val callbacks: RestartCallbacks) extends ConfigElement { - def this(scope: Scope) = this(scope, null) - def transform = { - val callbackOption = if (callbacks eq null) None else Some(callbacks.transform) - se.scalablesolutions.akka.config.ScalaConfig.LifeCycle(scope.transform, callbackOption) + + val config = { + if (HOME.isDefined) { + try { + val configFile = HOME.get + "/config/akka.conf" + Configgy.configure(configFile) + log.info("AKKA_HOME is defined to [%s], config loaded from [%s].", HOME.get, configFile) + } catch { + case e: ParseException => throw new IllegalStateException( + "'akka.conf' config file can not be found in [" + HOME + "/config/akka.conf] aborting." + + "\n\tEither add it in the 'config' directory or add it to the classpath.") + } + } else if (System.getProperty("akka.config", "") != "") { + val configFile = System.getProperty("akka.config", "") + try { + Configgy.configure(configFile) + log.info("Config loaded from -Dakka.config=%s", configFile) + } catch { + case e: ParseException => throw new IllegalStateException( + "Config could not be loaded from -Dakka.config=" + configFile) + } + } else { + try { + Configgy.configureFromResource("akka.conf", getClass.getClassLoader) + log.info("Config loaded from the application classpath.") + } catch { + case e: ParseException => throw new IllegalStateException( + "\nCan't find 'akka.conf' configuration file." + + "\nOne of the three ways of locating the 'akka.conf' file needs to be defined:" + + "\n\t1. Define 'AKKA_HOME' environment variable to the root of the Akka distribution." + + "\n\t2. Define the '-Dakka.config=...' system property option." + + "\n\t3. Put the 'akka.conf' file on the classpath." + + "\nI have no way of finding the 'akka.conf' configuration file." + + "\nAborting.") + } } + Configgy.config } - class RestartCallbacks(@BeanProperty val preRestart: String, @BeanProperty val postRestart: String) { - def transform = se.scalablesolutions.akka.config.ScalaConfig.RestartCallbacks(preRestart, postRestart) - } + val CONFIG_VERSION = config.getString("akka.version", "0") + if (VERSION != CONFIG_VERSION) throw new IllegalStateException( + "Akka JAR version [" + VERSION + "] is different than the provided config ('akka.conf') version [" + CONFIG_VERSION + "]") + val startTime = System.currentTimeMillis - abstract class Scope extends ConfigElement { - def transform: se.scalablesolutions.akka.config.ScalaConfig.Scope - } - class Permanent extends Scope { - override def transform = se.scalablesolutions.akka.config.ScalaConfig.Permanent - } - class Temporary extends Scope { - override def transform = se.scalablesolutions.akka.config.ScalaConfig.Temporary - } - - abstract class FailOverScheme extends ConfigElement { - def transform: se.scalablesolutions.akka.config.ScalaConfig.FailOverScheme - } - class AllForOne extends FailOverScheme { - override def transform = se.scalablesolutions.akka.config.ScalaConfig.AllForOne - } - class OneForOne extends FailOverScheme { - override def transform = se.scalablesolutions.akka.config.ScalaConfig.OneForOne - } - - class RemoteAddress(@BeanProperty val hostname: String, @BeanProperty val port: Int) - - abstract class Server extends ConfigElement - class Component(@BeanProperty val intf: Class[_], - @BeanProperty val target: Class[_], - @BeanProperty val lifeCycle: LifeCycle, - @BeanProperty val timeout: Int, - @BeanProperty val transactionRequired: Boolean, // optional - @BeanProperty val dispatcher: MessageDispatcher, // optional - @BeanProperty val remoteAddress: RemoteAddress // optional - ) extends Server { - - def this(intf: Class[_], target: Class[_], lifeCycle: LifeCycle, timeout: Int) = - this(intf, target, lifeCycle, timeout, false, null, null) - - def this(target: Class[_], lifeCycle: LifeCycle, timeout: Int) = - this(null, target, lifeCycle, timeout, false, null, null) - - def this(intf: Class[_], target: Class[_], lifeCycle: LifeCycle, timeout: Int, remoteAddress: RemoteAddress) = - this(intf, target, lifeCycle, timeout, false, null, remoteAddress) - - def this(target: Class[_], lifeCycle: LifeCycle, timeout: Int, remoteAddress: RemoteAddress) = - this(null, target, lifeCycle, timeout, false, null, remoteAddress) - - def this(intf: Class[_], target: Class[_], lifeCycle: LifeCycle, timeout: Int, dispatcher: MessageDispatcher) = - this(intf, target, lifeCycle, timeout, false, dispatcher, null) - - def this(target: Class[_], lifeCycle: LifeCycle, timeout: Int, dispatcher: MessageDispatcher) = - this(null, target, lifeCycle, timeout, false, dispatcher, null) - - def this(target: Class[_], lifeCycle: LifeCycle, timeout: Int, dispatcher: MessageDispatcher, remoteAddress: RemoteAddress) = - this(null, target, lifeCycle, timeout, false, dispatcher, remoteAddress) - - def this(intf: Class[_], target: Class[_], lifeCycle: LifeCycle, timeout: Int, transactionRequired: Boolean) = - this(intf, target, lifeCycle, timeout, transactionRequired, null, null) - - def this(target: Class[_], lifeCycle: LifeCycle, timeout: Int, transactionRequired: Boolean) = - this(null, target, lifeCycle, timeout, transactionRequired, null, null) - - def this(intf: Class[_], target: Class[_], lifeCycle: LifeCycle, timeout: Int, transactionRequired: Boolean, remoteAddress: RemoteAddress) = - this(intf, target, lifeCycle, timeout, transactionRequired, null, remoteAddress) - - def this(target: Class[_], lifeCycle: LifeCycle, timeout: Int, transactionRequired: Boolean, remoteAddress: RemoteAddress) = - this(null, target, lifeCycle, timeout, transactionRequired, null, remoteAddress) - - def this(intf: Class[_], target: Class[_], lifeCycle: LifeCycle, timeout: Int, transactionRequired: Boolean, dispatcher: MessageDispatcher) = - this(intf, target, lifeCycle, timeout, transactionRequired, dispatcher, null) - - def this(target: Class[_], lifeCycle: LifeCycle, timeout: Int, transactionRequired: Boolean, dispatcher: MessageDispatcher) = - this(null, target, lifeCycle, timeout, transactionRequired, dispatcher, null) - - def this(target: Class[_], lifeCycle: LifeCycle, timeout: Int, transactionRequired: Boolean, dispatcher: MessageDispatcher, remoteAddress: RemoteAddress) = - this(null, target, lifeCycle, timeout, transactionRequired, dispatcher, remoteAddress) - - def transform = - se.scalablesolutions.akka.config.ScalaConfig.Component( - intf, target, lifeCycle.transform, timeout, transactionRequired, dispatcher, - if (remoteAddress ne null) se.scalablesolutions.akka.config.ScalaConfig.RemoteAddress(remoteAddress.hostname, remoteAddress.port) else null) - - def newSupervised(actor: Actor) = - se.scalablesolutions.akka.config.ScalaConfig.Supervise(actor, lifeCycle.transform) - } - -} \ No newline at end of file + def uptime = (System.currentTimeMillis - startTime) / 1000 +} diff --git a/akka-core/src/main/scala/config/Configurator.scala b/akka-core/src/main/scala/config/Configurator.scala index 22ffd41214..fcb354a1f7 100644 --- a/akka-core/src/main/scala/config/Configurator.scala +++ b/akka-core/src/main/scala/config/Configurator.scala @@ -16,7 +16,7 @@ private[akka] trait Configurator { * @param clazz the class for the active object * @return the active object for the class */ - def getInstance[T](clazz: Class[T]): T + def getInstance[T](clazz: Class[T]): List[T] def getComponentInterfaces: List[Class[_]] diff --git a/akka-core/src/main/scala/config/SupervisionConfig.scala b/akka-core/src/main/scala/config/SupervisionConfig.scala new file mode 100644 index 0000000000..e993573972 --- /dev/null +++ b/akka-core/src/main/scala/config/SupervisionConfig.scala @@ -0,0 +1,234 @@ +/** + * Copyright (C) 2009-2010 Scalable Solutions AB + */ + +package se.scalablesolutions.akka.config + +import se.scalablesolutions.akka.actor.Actor +import se.scalablesolutions.akka.dispatch.MessageDispatcher + +sealed abstract class FaultHandlingStrategy +case class AllForOneStrategy(maxNrOfRetries: Int, withinTimeRange: Int) extends FaultHandlingStrategy +case class OneForOneStrategy(maxNrOfRetries: Int, withinTimeRange: Int) extends FaultHandlingStrategy + +/** + * Configuration classes - not to be used as messages. + * + * @author Jonas Bonér + */ +object ScalaConfig { + sealed abstract class ConfigElement + + abstract class Server extends ConfigElement + abstract class FailOverScheme extends ConfigElement + abstract class Scope extends ConfigElement + + case class SupervisorConfig(restartStrategy: RestartStrategy, worker: List[Server]) extends Server + + class Supervise(val actor: Actor, val lifeCycle: LifeCycle, _remoteAddress: RemoteAddress) extends Server { + val remoteAddress: Option[RemoteAddress] = if (_remoteAddress eq null) None else Some(_remoteAddress) + } + object Supervise { + def apply(actor: Actor, lifeCycle: LifeCycle, remoteAddress: RemoteAddress) = new Supervise(actor, lifeCycle, remoteAddress) + def apply(actor: Actor, lifeCycle: LifeCycle) = new Supervise(actor, lifeCycle, null) + def unapply(supervise: Supervise) = Some((supervise.actor, supervise.lifeCycle, supervise.remoteAddress)) + } + + case class RestartStrategy( + scheme: FailOverScheme, + maxNrOfRetries: Int, + withinTimeRange: Int, + trapExceptions: List[Class[_ <: Throwable]]) extends ConfigElement + + case object AllForOne extends FailOverScheme + case object OneForOne extends FailOverScheme + + case class LifeCycle(scope: Scope, callbacks: Option[RestartCallbacks]) extends ConfigElement + object LifeCycle { + def apply(scope: Scope) = new LifeCycle(scope, None) + } + case class RestartCallbacks(preRestart: String, postRestart: String) { + if ((preRestart eq null) || (postRestart eq null)) throw new IllegalArgumentException("Restart callback methods can't be null") + } + + case object Permanent extends Scope + case object Temporary extends Scope + + case class RemoteAddress(val hostname: String, val port: Int) extends ConfigElement + + class Component(_intf: Class[_], + val target: Class[_], + val lifeCycle: LifeCycle, + val timeout: Int, + val transactionRequired: Boolean, + _dispatcher: MessageDispatcher, // optional + _remoteAddress: RemoteAddress // optional + ) extends Server { + val intf: Option[Class[_]] = if (_intf eq null) None else Some(_intf) + val dispatcher: Option[MessageDispatcher] = if (_dispatcher eq null) None else Some(_dispatcher) + val remoteAddress: Option[RemoteAddress] = if (_remoteAddress eq null) None else Some(_remoteAddress) + } + object Component { + def apply(intf: Class[_], target: Class[_], lifeCycle: LifeCycle, timeout: Int) = + new Component(intf, target, lifeCycle, timeout, false, null, null) + + def apply(target: Class[_], lifeCycle: LifeCycle, timeout: Int) = + new Component(null, target, lifeCycle, timeout, false, null, null) + + def apply(intf: Class[_], target: Class[_], lifeCycle: LifeCycle, timeout: Int, dispatcher: MessageDispatcher) = + new Component(intf, target, lifeCycle, timeout, false, dispatcher, null) + + def apply(target: Class[_], lifeCycle: LifeCycle, timeout: Int, dispatcher: MessageDispatcher) = + new Component(null, target, lifeCycle, timeout, false, dispatcher, null) + + def apply(intf: Class[_], target: Class[_], lifeCycle: LifeCycle, timeout: Int, remoteAddress: RemoteAddress) = + new Component(intf, target, lifeCycle, timeout, false, null, remoteAddress) + + def apply(target: Class[_], lifeCycle: LifeCycle, timeout: Int, remoteAddress: RemoteAddress) = + new Component(null, target, lifeCycle, timeout, false, null, remoteAddress) + + def apply(intf: Class[_], target: Class[_], lifeCycle: LifeCycle, timeout: Int, dispatcher: MessageDispatcher, remoteAddress: RemoteAddress) = + new Component(intf, target, lifeCycle, timeout, false, dispatcher, remoteAddress) + + def apply(target: Class[_], lifeCycle: LifeCycle, timeout: Int, dispatcher: MessageDispatcher, remoteAddress: RemoteAddress) = + new Component(null, target, lifeCycle, timeout, false, dispatcher, remoteAddress) + + def apply(intf: Class[_], target: Class[_], lifeCycle: LifeCycle, timeout: Int, transactionRequired: Boolean) = + new Component(intf, target, lifeCycle, timeout, transactionRequired, null, null) + + def apply(target: Class[_], lifeCycle: LifeCycle, timeout: Int, transactionRequired: Boolean) = + new Component(null, target, lifeCycle, timeout, transactionRequired, null, null) + + def apply(intf: Class[_], target: Class[_], lifeCycle: LifeCycle, timeout: Int, transactionRequired: Boolean, dispatcher: MessageDispatcher) = + new Component(intf, target, lifeCycle, timeout, transactionRequired, dispatcher, null) + + def apply(target: Class[_], lifeCycle: LifeCycle, timeout: Int, transactionRequired: Boolean, dispatcher: MessageDispatcher) = + new Component(null, target, lifeCycle, timeout, transactionRequired, dispatcher, null) + + def apply(intf: Class[_], target: Class[_], lifeCycle: LifeCycle, timeout: Int, transactionRequired: Boolean, remoteAddress: RemoteAddress) = + new Component(intf, target, lifeCycle, timeout, transactionRequired, null, remoteAddress) + + def apply(target: Class[_], lifeCycle: LifeCycle, timeout: Int, transactionRequired: Boolean, remoteAddress: RemoteAddress) = + new Component(null, target, lifeCycle, timeout, transactionRequired, null, remoteAddress) + + def apply(intf: Class[_], target: Class[_], lifeCycle: LifeCycle, timeout: Int, transactionRequired: Boolean, dispatcher: MessageDispatcher, remoteAddress: RemoteAddress) = + new Component(intf, target, lifeCycle, timeout, transactionRequired, dispatcher, remoteAddress) + + def apply(target: Class[_], lifeCycle: LifeCycle, timeout: Int, transactionRequired: Boolean, dispatcher: MessageDispatcher, remoteAddress: RemoteAddress) = + new Component(null, target, lifeCycle, timeout, transactionRequired, dispatcher, remoteAddress) + } +} + +/** + * @author Jonas Bonér + */ +object JavaConfig { + import scala.reflect.BeanProperty + + sealed abstract class ConfigElement + + class RestartStrategy( + @BeanProperty val scheme: FailOverScheme, + @BeanProperty val maxNrOfRetries: Int, + @BeanProperty val withinTimeRange: Int, + @BeanProperty val trapExceptions: Array[Class[_ <: Throwable]]) extends ConfigElement { + def transform = se.scalablesolutions.akka.config.ScalaConfig.RestartStrategy( + scheme.transform, maxNrOfRetries, withinTimeRange, trapExceptions.toList) + } + + class LifeCycle(@BeanProperty val scope: Scope, @BeanProperty val callbacks: RestartCallbacks) extends ConfigElement { + def this(scope: Scope) = this(scope, null) + def transform = { + val callbackOption = if (callbacks eq null) None else Some(callbacks.transform) + se.scalablesolutions.akka.config.ScalaConfig.LifeCycle(scope.transform, callbackOption) + } + } + + class RestartCallbacks(@BeanProperty val preRestart: String, @BeanProperty val postRestart: String) { + def transform = se.scalablesolutions.akka.config.ScalaConfig.RestartCallbacks(preRestart, postRestart) + } + + abstract class Scope extends ConfigElement { + def transform: se.scalablesolutions.akka.config.ScalaConfig.Scope + } + class Permanent extends Scope { + override def transform = se.scalablesolutions.akka.config.ScalaConfig.Permanent + } + class Temporary extends Scope { + override def transform = se.scalablesolutions.akka.config.ScalaConfig.Temporary + } + + abstract class FailOverScheme extends ConfigElement { + def transform: se.scalablesolutions.akka.config.ScalaConfig.FailOverScheme + } + class AllForOne extends FailOverScheme { + override def transform = se.scalablesolutions.akka.config.ScalaConfig.AllForOne + } + class OneForOne extends FailOverScheme { + override def transform = se.scalablesolutions.akka.config.ScalaConfig.OneForOne + } + + class RemoteAddress(@BeanProperty val hostname: String, @BeanProperty val port: Int) + + abstract class Server extends ConfigElement + class Component(@BeanProperty val intf: Class[_], + @BeanProperty val target: Class[_], + @BeanProperty val lifeCycle: LifeCycle, + @BeanProperty val timeout: Int, + @BeanProperty val transactionRequired: Boolean, // optional + @BeanProperty val dispatcher: MessageDispatcher, // optional + @BeanProperty val remoteAddress: RemoteAddress // optional + ) extends Server { + + def this(intf: Class[_], target: Class[_], lifeCycle: LifeCycle, timeout: Int) = + this(intf, target, lifeCycle, timeout, false, null, null) + + def this(target: Class[_], lifeCycle: LifeCycle, timeout: Int) = + this(null, target, lifeCycle, timeout, false, null, null) + + def this(intf: Class[_], target: Class[_], lifeCycle: LifeCycle, timeout: Int, remoteAddress: RemoteAddress) = + this(intf, target, lifeCycle, timeout, false, null, remoteAddress) + + def this(target: Class[_], lifeCycle: LifeCycle, timeout: Int, remoteAddress: RemoteAddress) = + this(null, target, lifeCycle, timeout, false, null, remoteAddress) + + def this(intf: Class[_], target: Class[_], lifeCycle: LifeCycle, timeout: Int, dispatcher: MessageDispatcher) = + this(intf, target, lifeCycle, timeout, false, dispatcher, null) + + def this(target: Class[_], lifeCycle: LifeCycle, timeout: Int, dispatcher: MessageDispatcher) = + this(null, target, lifeCycle, timeout, false, dispatcher, null) + + def this(target: Class[_], lifeCycle: LifeCycle, timeout: Int, dispatcher: MessageDispatcher, remoteAddress: RemoteAddress) = + this(null, target, lifeCycle, timeout, false, dispatcher, remoteAddress) + + def this(intf: Class[_], target: Class[_], lifeCycle: LifeCycle, timeout: Int, transactionRequired: Boolean) = + this(intf, target, lifeCycle, timeout, transactionRequired, null, null) + + def this(target: Class[_], lifeCycle: LifeCycle, timeout: Int, transactionRequired: Boolean) = + this(null, target, lifeCycle, timeout, transactionRequired, null, null) + + def this(intf: Class[_], target: Class[_], lifeCycle: LifeCycle, timeout: Int, transactionRequired: Boolean, remoteAddress: RemoteAddress) = + this(intf, target, lifeCycle, timeout, transactionRequired, null, remoteAddress) + + def this(target: Class[_], lifeCycle: LifeCycle, timeout: Int, transactionRequired: Boolean, remoteAddress: RemoteAddress) = + this(null, target, lifeCycle, timeout, transactionRequired, null, remoteAddress) + + def this(intf: Class[_], target: Class[_], lifeCycle: LifeCycle, timeout: Int, transactionRequired: Boolean, dispatcher: MessageDispatcher) = + this(intf, target, lifeCycle, timeout, transactionRequired, dispatcher, null) + + def this(target: Class[_], lifeCycle: LifeCycle, timeout: Int, transactionRequired: Boolean, dispatcher: MessageDispatcher) = + this(null, target, lifeCycle, timeout, transactionRequired, dispatcher, null) + + def this(target: Class[_], lifeCycle: LifeCycle, timeout: Int, transactionRequired: Boolean, dispatcher: MessageDispatcher, remoteAddress: RemoteAddress) = + this(null, target, lifeCycle, timeout, transactionRequired, dispatcher, remoteAddress) + + def transform = + se.scalablesolutions.akka.config.ScalaConfig.Component( + intf, target, lifeCycle.transform, timeout, transactionRequired, dispatcher, + if (remoteAddress ne null) se.scalablesolutions.akka.config.ScalaConfig.RemoteAddress(remoteAddress.hostname, remoteAddress.port) else null) + + def newSupervised(actor: Actor) = + se.scalablesolutions.akka.config.ScalaConfig.Supervise(actor, lifeCycle.transform) + } + +} \ No newline at end of file diff --git a/akka-core/src/main/scala/dispatch/ExecutorBasedEventDrivenDispatcher.scala b/akka-core/src/main/scala/dispatch/ExecutorBasedEventDrivenDispatcher.scala index 7da13a10b3..b48e7717cf 100644 --- a/akka-core/src/main/scala/dispatch/ExecutorBasedEventDrivenDispatcher.scala +++ b/akka-core/src/main/scala/dispatch/ExecutorBasedEventDrivenDispatcher.scala @@ -57,18 +57,29 @@ class ExecutorBasedEventDrivenDispatcher(_name: String) extends MessageDispatche @volatile private var active: Boolean = false val name: String = "event-driven:executor:dispatcher:" + _name - init - + init + def dispatch(invocation: MessageInvocation) = if (active) { executor.execute(new Runnable() { def run = { - invocation.receiver.synchronized { - val messages = invocation.receiver._mailbox.iterator - while (messages.hasNext) { - messages.next.asInstanceOf[MessageInvocation].invoke - messages.remove + var lockAcquiredOnce = false + // this do-wile loop is required to prevent missing new messages between the end of the inner while + // loop and releasing the lock + do { + if (invocation.receiver._dispatcherLock.tryLock) { + lockAcquiredOnce = true + try { + // Only dispatch if we got the lock. Otherwise another thread is already dispatching. + var messageInvocation = invocation.receiver._mailbox.poll + while (messageInvocation != null) { + messageInvocation.invoke + messageInvocation = invocation.receiver._mailbox.poll + } + } finally { + invocation.receiver._dispatcherLock.unlock + } } - } + } while ((lockAcquiredOnce && !invocation.receiver._mailbox.isEmpty)) } }) } else throw new IllegalStateException("Can't submit invocations to dispatcher since it's not started") @@ -88,4 +99,4 @@ class ExecutorBasedEventDrivenDispatcher(_name: String) extends MessageDispatche "Can't build a new thread pool for a dispatcher that is already up and running") private[akka] def init = withNewThreadPoolWithLinkedBlockingQueueWithUnboundedCapacity.buildThreadPool -} \ No newline at end of file +} diff --git a/akka-core/src/main/scala/dispatch/Future.scala b/akka-core/src/main/scala/dispatch/Future.scala index c1e61695b8..0bf9723e31 100644 --- a/akka-core/src/main/scala/dispatch/Future.scala +++ b/akka-core/src/main/scala/dispatch/Future.scala @@ -2,22 +2,38 @@ * Copyright (C) 2009-2010 Scalable Solutions AB */ -/** - * Based on code from the actorom actor framework by Sergio Bossa [http://code.google.com/p/actorom/]. - */ package se.scalablesolutions.akka.dispatch import java.util.concurrent.locks.ReentrantLock -import java.util.concurrent.{SynchronousQueue, TimeUnit} +import java.util.concurrent.TimeUnit class FutureTimeoutException(message: String) extends RuntimeException(message) object Futures { - def awaitAll(futures: List[FutureResult]): Unit = futures.foreach(_.await) - def awaitOne(futures: List[FutureResult]): FutureResult = { - var future: Option[FutureResult] = None + /** + * FIXME document + *
      +   * val future = Futures.future(1000) {
      +   *  ... // do stuff
      +   * }
      +   * 
      + */ + def future(timeout: Long)(body: => Any): Future = { + val promise = new DefaultCompletableFuture(timeout) + try { + promise completeWithResult body + } catch { + case e => promise completeWithException (None, e) + } + promise + } + + def awaitAll(futures: List[Future]): Unit = futures.foreach(_.await) + + def awaitOne(futures: List[Future]): Future = { + var future: Option[Future] = None do { future = futures.find(_.isCompleted) } while (future.isEmpty) @@ -25,7 +41,7 @@ object Futures { } /* - def awaitEither(f1: FutureResult, f2: FutureResult): Option[Any] = { + def awaitEither(f1: Future, f2: Future): Option[Any] = { import Actor.Sender.Self import Actor.{spawn, actor} @@ -54,7 +70,7 @@ object Futures { */ } -sealed trait FutureResult { +sealed trait Future { def await def awaitBlocking def isCompleted: Boolean @@ -64,12 +80,13 @@ sealed trait FutureResult { def exception: Option[Tuple2[AnyRef, Throwable]] } -trait CompletableFutureResult extends FutureResult { +trait CompletableFuture extends Future { def completeWithResult(result: Any) def completeWithException(toBlame: AnyRef, exception: Throwable) } -class DefaultCompletableFutureResult(timeout: Long) extends CompletableFutureResult { +// Based on code from the actorom actor framework by Sergio Bossa [http://code.google.com/p/actorom/]. +class DefaultCompletableFuture(timeout: Long) extends CompletableFuture { private val TIME_UNIT = TimeUnit.MILLISECONDS def this() = this(0) diff --git a/akka-core/src/main/scala/dispatch/Reactor.scala b/akka-core/src/main/scala/dispatch/Reactor.scala index f7bfa52215..627d27aeac 100644 --- a/akka-core/src/main/scala/dispatch/Reactor.scala +++ b/akka-core/src/main/scala/dispatch/Reactor.scala @@ -7,16 +7,17 @@ package se.scalablesolutions.akka.dispatch import java.util.List import se.scalablesolutions.akka.util.{HashCode, Logging} -import se.scalablesolutions.akka.stm.Transaction import se.scalablesolutions.akka.actor.Actor import java.util.concurrent.ConcurrentHashMap +import org.multiverse.commitbarriers.CountDownCommitBarrier + final class MessageInvocation(val receiver: Actor, val message: Any, - val future: Option[CompletableFutureResult], + val future: Option[CompletableFuture], val sender: Option[Actor], - val tx: Option[Transaction]) { + val transactionSet: Option[CountDownCommitBarrier]) { if (receiver eq null) throw new IllegalArgumentException("receiver is null") def invoke = receiver.invoke(this) @@ -37,13 +38,13 @@ final class MessageInvocation(val receiver: Actor, that.asInstanceOf[MessageInvocation].message == message } - override def toString(): String = synchronized { + override def toString = synchronized { "MessageInvocation[" + "\n\tmessage = " + message + "\n\treceiver = " + receiver + "\n\tsender = " + sender + "\n\tfuture = " + future + - "\n\ttx = " + tx + + "\n\ttransactionSet = " + transactionSet + "\n]" } } diff --git a/akka-core/src/main/scala/dispatch/ThreadPoolBuilder.scala b/akka-core/src/main/scala/dispatch/ThreadPoolBuilder.scala index cb465907cb..1fedc1a5d7 100644 --- a/akka-core/src/main/scala/dispatch/ThreadPoolBuilder.scala +++ b/akka-core/src/main/scala/dispatch/ThreadPoolBuilder.scala @@ -4,11 +4,11 @@ package se.scalablesolutions.akka.dispatch +import java.util.Collection import java.util.concurrent._ import atomic.{AtomicLong, AtomicInteger} import ThreadPoolExecutor.CallerRunsPolicy -import java.util.Collection import se.scalablesolutions.akka.util.Logging trait ThreadPoolBuilder { diff --git a/akka-core/src/main/scala/remote/BootableRemoteActorService.scala b/akka-core/src/main/scala/remote/BootableRemoteActorService.scala index 429fdb61ec..8aaec0661b 100644 --- a/akka-core/src/main/scala/remote/BootableRemoteActorService.scala +++ b/akka-core/src/main/scala/remote/BootableRemoteActorService.scala @@ -5,8 +5,8 @@ package se.scalablesolutions.akka.remote import se.scalablesolutions.akka.actor.BootableActorLoaderService -import se.scalablesolutions.akka.util.{Bootable,Logging} -import se.scalablesolutions.akka.Config.config +import se.scalablesolutions.akka.util.{Bootable, Logging} +import se.scalablesolutions.akka.config.Config.config /** * This bundle/service is responsible for booting up and shutting down the remote actors facility @@ -23,26 +23,32 @@ trait BootableRemoteActorService extends Bootable with Logging { def startRemoteService = remoteServerThread.start abstract override def onLoad = { + super.onLoad //Initialize BootableActorLoaderService before remote service if(config.getBool("akka.remote.server.service", true)){ - log.info("Starting up Cluster Service") - Cluster.start - super.onLoad //Initialize BootableActorLoaderService before remote service + + if(config.getBool("akka.remote.cluster.service", true)) + Cluster.start(self.applicationLoader) + log.info("Initializing Remote Actors Service...") startRemoteService log.info("Remote Actors Service initialized!") } - else - super.onLoad + } - } - abstract override def onUnload = { - super.onUnload - if (remoteServerThread.isAlive) { - log.info("Shutting down Remote Actors Service") - RemoteNode.shutdown - remoteServerThread.join(1000) - } + log.info("Shutting down Remote Actors Service") + + RemoteNode.shutdown + + if (remoteServerThread.isAlive) + remoteServerThread.join(1000) + + log.info("Shutting down Cluster") Cluster.shutdown + + log.info("Remote Actors Service has been shut down") + + super.onUnload } -} \ No newline at end of file + +} diff --git a/akka-core/src/main/scala/remote/Cluster.scala b/akka-core/src/main/scala/remote/Cluster.scala index cb68b271ed..4a1d6012a7 100644 --- a/akka-core/src/main/scala/remote/Cluster.scala +++ b/akka-core/src/main/scala/remote/Cluster.scala @@ -4,7 +4,7 @@ package se.scalablesolutions.akka.remote -import se.scalablesolutions.akka.Config.config +import se.scalablesolutions.akka.config.Config.config import se.scalablesolutions.akka.config.ScalaConfig._ import se.scalablesolutions.akka.serialization.Serializer import se.scalablesolutions.akka.actor.{Supervisor, SupervisorFactory, Actor, ActorRegistry} @@ -17,17 +17,43 @@ import scala.collection.immutable.{Map, HashMap} * @author Viktor Klang */ trait Cluster { + + /** + * Specifies the cluster name + */ def name: String + /** + * Adds the specified hostname + port as a local node + * This information will be propagated to other nodes in the cluster + * and will be available at the other nodes through lookup and foreach + */ def registerLocalNode(hostname: String, port: Int): Unit + /** + * Removes the specified hostname + port from the local node + * This information will be propagated to other nodes in the cluster + * and will no longer be available at the other nodes through lookup and foreach + */ def deregisterLocalNode(hostname: String, port: Int): Unit + /** + * Sends the message to all Actors of the specified type on all other nodes in the cluster + */ def relayMessage(to: Class[_ <: Actor], msg: AnyRef): Unit + /** + * Traverses all known remote addresses avaiable at all other nodes in the cluster + * and applies the given PartialFunction on the first address that it's defined at + * The order of application is undefined and may vary + */ def lookup[T](pf: PartialFunction[RemoteAddress, T]): Option[T] - - def foreach(f : (RemoteAddress) => Unit) : Unit + + /** + * Applies the specified function to all known remote addresses on al other nodes in the cluster + * The order of application is undefined and may vary + */ + def foreach(f: (RemoteAddress) => Unit): Unit } /** @@ -37,6 +63,10 @@ trait Cluster { */ trait ClusterActor extends Actor with Cluster { val name = config.getString("akka.remote.cluster.name") getOrElse "default" + + @volatile protected var serializer : Serializer = _ + + private[remote] def setSerializer(s : Serializer) : Unit = serializer = s } /** @@ -44,12 +74,20 @@ trait ClusterActor extends Actor with Cluster { * * @author Viktor Klang */ -private[remote] object ClusterActor { +private[akka] object ClusterActor { sealed trait ClusterMessage - private[remote] case class RelayedMessage(actorClassFQN: String, msg: AnyRef) extends ClusterMessage - - private[remote] case class Node(endpoints: List[RemoteAddress]) + private[akka] case class RelayedMessage(actorClassFQN: String, msg: AnyRef) extends ClusterMessage + private[akka] case class Message[ADDR_T](sender: ADDR_T, msg: Array[Byte]) + private[akka] case object PapersPlease extends ClusterMessage + private[akka] case class Papers(addresses: List[RemoteAddress]) extends ClusterMessage + private[akka] case object Block extends ClusterMessage + private[akka] case object Unblock extends ClusterMessage + private[akka] case class View[ADDR_T](othersPresent: Set[ADDR_T]) extends ClusterMessage + private[akka] case class Zombie[ADDR_T](address: ADDR_T) extends ClusterMessage + private[akka] case class RegisterLocalNode(server: RemoteAddress) extends ClusterMessage + private[akka] case class DeregisterLocalNode(server: RemoteAddress) extends ClusterMessage + private[akka] case class Node(endpoints: List[RemoteAddress]) } /** @@ -59,76 +97,65 @@ private[remote] object ClusterActor { */ abstract class BasicClusterActor extends ClusterActor { import ClusterActor._ - - case class Message(sender : ADDR_T,msg : Array[Byte]) - case object PapersPlease extends ClusterMessage - case class Papers(addresses: List[RemoteAddress]) extends ClusterMessage - case object Block extends ClusterMessage - case object Unblock extends ClusterMessage - case class View(othersPresent : Set[ADDR_T]) extends ClusterMessage - case class Zombie(address: ADDR_T) extends ClusterMessage - case class RegisterLocalNode(server: RemoteAddress) extends ClusterMessage - case class DeregisterLocalNode(server: RemoteAddress) extends ClusterMessage - type ADDR_T - @volatile private var local: Node = Node(Nil) @volatile private var remotes: Map[ADDR_T, Node] = Map() override def init = { - remotes = new HashMap[ADDR_T, Node] + remotes = new HashMap[ADDR_T, Node] } override def shutdown = { - remotes = Map() + remotes = Map() } def receive = { - case v @ View(members) => { + case v: View[ADDR_T] => { // Not present in the cluster anymore = presumably zombies // Nodes we have no prior knowledge existed = unknowns - val zombies = Set[ADDR_T]() ++ remotes.keySet -- members - val unknown = members -- remotes.keySet + val zombies = Set[ADDR_T]() ++ remotes.keySet -- v.othersPresent + val unknown = v.othersPresent -- remotes.keySet log debug ("Updating view") - log debug ("Other memebers: [%s]",members) - log debug ("Zombies: [%s]",zombies) - log debug ("Unknowns: [%s]",unknown) + log debug ("Other memebers: [%s]", v.othersPresent) + log debug ("Zombies: [%s]", zombies) + log debug ("Unknowns: [%s]", unknown) // Tell the zombies and unknowns to provide papers and prematurely treat the zombies as dead broadcast(zombies ++ unknown, PapersPlease) remotes = remotes -- zombies } - case Zombie(x) => { //Ask the presumed zombie for papers and prematurely treat it as dead - log debug ("Killing Zombie Node: %s", x) - broadcast(x :: Nil, PapersPlease) - remotes = remotes - x + case z: Zombie[ADDR_T] => { //Ask the presumed zombie for papers and prematurely treat it as dead + log debug ("Killing Zombie Node: %s", z.address) + broadcast(z.address :: Nil, PapersPlease) + remotes = remotes - z.address } - case rm @ RelayedMessage(_, _) => { + case rm@RelayedMessage(_, _) => { log debug ("Relaying message: %s", rm) broadcast(rm) } - case m @ Message(src,msg) => { - (Cluster.serializer in (msg, None)) match { + case m: Message[ADDR_T] => { + val (src, msg) = (m.sender, m.msg) + (serializer in (msg, None)) match { - case PapersPlease => { - log debug ("Asked for papers by %s", src) - broadcast(src :: Nil, Papers(local.endpoints)) + case PapersPlease => { + log debug ("Asked for papers by %s", src) + broadcast(src :: Nil, Papers(local.endpoints)) - if (remotes.get(src).isEmpty) // If we were asked for papers from someone we don't know, ask them! - broadcast(src :: Nil, PapersPlease) - } - - case Papers(x) => remotes = remotes + (src -> Node(x)) - - case RelayedMessage(c, m) => ActorRegistry.actorsFor(c).foreach(_ send m) - - case unknown => log debug ("Unknown message: %s", unknown.toString) + if (remotes.get(src).isEmpty) // If we were asked for papers from someone we don't know, ask them! + broadcast(src :: Nil, PapersPlease) } + + case Papers(x) => remotes = remotes + (src -> Node(x)) + + case RelayedMessage(c, m) => ActorRegistry.actorsFor(c).foreach(_ send m) + + case unknown => log debug ("Unknown message: %s", unknown.toString) + } } case RegisterLocalNode(s) => { @@ -147,20 +174,20 @@ abstract class BasicClusterActor extends ClusterActor { /** * Implement this in a subclass to add node-to-node messaging */ - protected def toOneNode(dest : ADDR_T, msg : Array[Byte]) : Unit + protected def toOneNode(dest: ADDR_T, msg: Array[Byte]): Unit /** * Implement this in a subclass to add node-to-many-nodes messaging */ - protected def toAllNodes(msg : Array[Byte]) : Unit + protected def toAllNodes(msg: Array[Byte]): Unit /** * Sends the specified message to the given recipients using the serializer * that's been set in the akka-conf */ protected def broadcast[T <: AnyRef](recipients: Iterable[ADDR_T], msg: T): Unit = { - lazy val m = Cluster.serializer out msg - for (r <- recipients) toOneNode(r,m) + lazy val m = serializer out msg + for (r <- recipients) toOneNode(r, m) } /** @@ -168,18 +195,18 @@ abstract class BasicClusterActor extends ClusterActor { * that's been set in the akka-conf */ protected def broadcast[T <: AnyRef](msg: T): Unit = - if (!remotes.isEmpty) toAllNodes(Cluster.serializer out msg) + if (!remotes.isEmpty) toAllNodes(serializer out msg) /** * Applies the given PartialFunction to all known RemoteAddresses */ def lookup[T](handleRemoteAddress: PartialFunction[RemoteAddress, T]): Option[T] = remotes.values.toList.flatMap(_.endpoints).find(handleRemoteAddress isDefinedAt _).map(handleRemoteAddress) - + /** * Applies the given function to all remote addresses known */ - def foreach(f : (RemoteAddress) => Unit) : Unit = remotes.values.toList.flatMap(_.endpoints).foreach(f) + def foreach(f: (RemoteAddress) => Unit): Unit = remotes.values.toList.flatMap(_.endpoints).foreach(f) /** * Registers a local endpoint @@ -206,36 +233,36 @@ abstract class BasicClusterActor extends ClusterActor { * Loads a specified ClusterActor and delegates to that instance. */ object Cluster extends Cluster with Logging { - @volatile private[remote] var clusterActor: Option[ClusterActor] = None - @volatile private[remote] var supervisor: Option[Supervisor] = None - - private[remote] lazy val serializer: Serializer = { - val className = config.getString("akka.remote.cluster.serializer", Serializer.Java.getClass.getName) - Class.forName(className).newInstance.asInstanceOf[Serializer] - } + lazy val DEFAULT_SERIALIZER_CLASS_NAME = Serializer.Java.getClass.getName - private[remote] def createClusterActor : Option[ClusterActor] = { + @volatile private[remote] var clusterActor: Option[ClusterActor] = None + + private[remote] def createClusterActor(loader : ClassLoader): Option[ClusterActor] = { val name = config.getString("akka.remote.cluster.actor") - + if (name.isEmpty) throw new IllegalArgumentException( + "Can't start cluster since the 'akka.remote.cluster.actor' configuration option is not defined") + + val serializer = Class.forName(config.getString("akka.remote.cluster.serializer", DEFAULT_SERIALIZER_CLASS_NAME)).newInstance.asInstanceOf[Serializer] + serializer.classLoader = Some(loader) try { - name map { fqn => - val a = Class.forName(fqn).newInstance.asInstanceOf[ClusterActor] - a.start - a + name map { + fqn => + val a = Class.forName(fqn).newInstance.asInstanceOf[ClusterActor] + a setSerializer serializer + a } } catch { - case e => log.error(e,"Couldn't load Cluster provider: [%s]",name.getOrElse("Not specified")); None + case e => log.error(e, "Couldn't load Cluster provider: [%s]", name.getOrElse("Not specified")); None } } - private[remote] def createSupervisor(actor : ClusterActor) : Option[Supervisor] = { + private[akka] def createSupervisor(actor: ClusterActor): Option[Supervisor] = { val sup = SupervisorFactory( SupervisorConfig( RestartStrategy(OneForOne, 5, 1000, List(classOf[Exception])), Supervise(actor, LifeCycle(Permanent)) :: Nil) ).newInstance - sup.start Some(sup) } @@ -249,22 +276,28 @@ object Cluster extends Cluster with Logging { def deregisterLocalNode(hostname: String, port: Int): Unit = clusterActor.foreach(_.deregisterLocalNode(hostname, port)) def relayMessage(to: Class[_ <: Actor], msg: AnyRef): Unit = clusterActor.foreach(_.relayMessage(to, msg)) - - def foreach(f : (RemoteAddress) => Unit) : Unit = clusterActor.foreach(_.foreach(f)) - def start : Unit = synchronized { - if(supervisor.isEmpty) { - for(actor <- createClusterActor; - sup <- createSupervisor(actor)) { - clusterActor = Some(actor) - supervisor = Some(sup) + def foreach(f: (RemoteAddress) => Unit): Unit = clusterActor.foreach(_.foreach(f)) + + def start: Unit = start(None) + + def start(serializerClassLoader : Option[ClassLoader]): Unit = synchronized { + log.info("Starting up Cluster Service...") + if (clusterActor.isEmpty) { + for{ actor <- createClusterActor(serializerClassLoader getOrElse getClass.getClassLoader) + sup <- createSupervisor(actor) } { + clusterActor = Some(actor) + sup.start } } } - def shutdown : Unit = synchronized { - supervisor.foreach(_.stop) - supervisor = None + def shutdown: Unit = synchronized { + log.info("Shutting down Cluster Service...") + for{ + c <- clusterActor + s <- c._supervisor + } s.stop clusterActor = None } } diff --git a/akka-core/src/main/scala/remote/RemoteClient.scala b/akka-core/src/main/scala/remote/RemoteClient.scala index 7d22f4289b..2c54bac80d 100644 --- a/akka-core/src/main/scala/remote/RemoteClient.scala +++ b/akka-core/src/main/scala/remote/RemoteClient.scala @@ -6,9 +6,9 @@ package se.scalablesolutions.akka.remote import se.scalablesolutions.akka.remote.protobuf.RemoteProtocol.{RemoteRequest, RemoteReply} import se.scalablesolutions.akka.actor.{Exit, Actor} -import se.scalablesolutions.akka.dispatch.{DefaultCompletableFutureResult, CompletableFutureResult} +import se.scalablesolutions.akka.dispatch.{DefaultCompletableFuture, CompletableFuture} import se.scalablesolutions.akka.util.{UUID, Logging} -import se.scalablesolutions.akka.Config.config +import se.scalablesolutions.akka.config.Config.config import org.jboss.netty.channel._ import group.DefaultChannelGroup @@ -86,7 +86,7 @@ object RemoteClient extends Logging { override def postMessageToMailboxAndCreateFutureResultWithTimeout( message: Any, timeout: Long, - senderFuture: Option[CompletableFutureResult]): CompletableFutureResult = { + senderFuture: Option[CompletableFuture]): CompletableFuture = { val requestBuilder = RemoteRequest.newBuilder .setId(RemoteRequestIdFactory.nextId) .setTarget(className) @@ -168,7 +168,7 @@ class RemoteClient(hostname: String, port: Int) extends Logging { val name = "RemoteClient@" + hostname + "::" + port @volatile private[remote] var isRunning = false - private val futures = new ConcurrentHashMap[Long, CompletableFutureResult] + private val futures = new ConcurrentHashMap[Long, CompletableFuture] private val supervisors = new ConcurrentHashMap[String, Actor] private val channelFactory = new NioClientSocketChannelFactory( @@ -208,14 +208,14 @@ class RemoteClient(hostname: String, port: Int) extends Logging { } } - def send(request: RemoteRequest, senderFuture: Option[CompletableFutureResult]): Option[CompletableFutureResult] = if (isRunning) { + def send(request: RemoteRequest, senderFuture: Option[CompletableFuture]): Option[CompletableFuture] = if (isRunning) { if (request.getIsOneWay) { connection.getChannel.write(request) None } else { futures.synchronized { val futureResult = if (senderFuture.isDefined) senderFuture.get - else new DefaultCompletableFutureResult(request.getTimeout) + else new DefaultCompletableFuture(request.getTimeout) futures.put(request.getId, futureResult) connection.getChannel.write(request) Some(futureResult) @@ -238,7 +238,7 @@ class RemoteClient(hostname: String, port: Int) extends Logging { * @author Jonas Bonér */ class RemoteClientPipelineFactory(name: String, - futures: ConcurrentMap[Long, CompletableFutureResult], + futures: ConcurrentMap[Long, CompletableFuture], supervisors: ConcurrentMap[String, Actor], bootstrap: ClientBootstrap, remoteAddress: SocketAddress, @@ -269,7 +269,7 @@ class RemoteClientPipelineFactory(name: String, */ @ChannelPipelineCoverage(value = "all") class RemoteClientHandler(val name: String, - val futures: ConcurrentMap[Long, CompletableFutureResult], + val futures: ConcurrentMap[Long, CompletableFuture], val supervisors: ConcurrentMap[String, Actor], val bootstrap: ClientBootstrap, val remoteAddress: SocketAddress, diff --git a/akka-core/src/main/scala/remote/RemoteProtocolBuilder.scala b/akka-core/src/main/scala/remote/RemoteProtocolBuilder.scala index ac0a3986c6..1156a34b27 100644 --- a/akka-core/src/main/scala/remote/RemoteProtocolBuilder.scala +++ b/akka-core/src/main/scala/remote/RemoteProtocolBuilder.scala @@ -18,13 +18,10 @@ object RemoteProtocolBuilder { private var SERIALIZER_PROTOBUF: Serializer.Protobuf = Serializer.Protobuf - def setClassLoader(classLoader: ClassLoader) = { - SERIALIZER_JAVA = new Serializer.Java - SERIALIZER_JAVA_JSON = new Serializer.JavaJSON - SERIALIZER_SCALA_JSON = new Serializer.ScalaJSON - SERIALIZER_JAVA.setClassLoader(classLoader) - SERIALIZER_JAVA_JSON.setClassLoader(classLoader) - SERIALIZER_SCALA_JSON.setClassLoader(classLoader) + def setClassLoader(cl: ClassLoader) = { + SERIALIZER_JAVA.classLoader = Some(cl) + SERIALIZER_JAVA_JSON.classLoader = Some(cl) + SERIALIZER_SCALA_JSON.classLoader = Some(cl) } def getMessage(request: RemoteRequest): Any = { diff --git a/akka-core/src/main/scala/remote/RemoteServer.scala b/akka-core/src/main/scala/remote/RemoteServer.scala index a6cc143d8f..05c069c9e2 100644 --- a/akka-core/src/main/scala/remote/RemoteServer.scala +++ b/akka-core/src/main/scala/remote/RemoteServer.scala @@ -12,7 +12,7 @@ import java.util.{Map => JMap} import se.scalablesolutions.akka.actor._ import se.scalablesolutions.akka.util._ import se.scalablesolutions.akka.remote.protobuf.RemoteProtocol.{RemoteReply, RemoteRequest} -import se.scalablesolutions.akka.Config.config +import se.scalablesolutions.akka.config.Config.config import org.jboss.netty.bootstrap.ServerBootstrap import org.jboss.netty.channel._ diff --git a/akka-core/src/main/scala/serialization/Serializable.scala b/akka-core/src/main/scala/serialization/Serializable.scala index 0111cdfc8c..813f4fd12f 100644 --- a/akka-core/src/main/scala/serialization/Serializable.scala +++ b/akka-core/src/main/scala/serialization/Serializable.scala @@ -5,10 +5,14 @@ package se.scalablesolutions.akka.serialization import org.codehaus.jackson.map.ObjectMapper + import com.google.protobuf.Message + import reflect.Manifest //import sbinary.DefaultProtocol + import java.io.{StringWriter, ByteArrayOutputStream, ObjectOutputStream} + import sjson.json.{Serializer=>SJSONSerializer} object SerializationProtocol { diff --git a/akka-core/src/main/scala/serialization/Serializer.scala b/akka-core/src/main/scala/serialization/Serializer.scala index 55f695a5c4..5363664ad4 100644 --- a/akka-core/src/main/scala/serialization/Serializer.scala +++ b/akka-core/src/main/scala/serialization/Serializer.scala @@ -18,8 +18,12 @@ import sjson.json.{Serializer => SJSONSerializer} * @author Jonas Bonér */ trait Serializer { + var classLoader: Option[ClassLoader] = None + def deepClone(obj: AnyRef): AnyRef + def out(obj: AnyRef): Array[Byte] + def in(bytes: Array[Byte], clazz: Option[Class[_]]): AnyRef } @@ -51,11 +55,7 @@ object Serializer { * @author Jonas Bonér */ object Java extends Java - class Java extends Serializer { - private var classLoader: Option[ClassLoader] = None - - def setClassLoader(cl: ClassLoader) = classLoader = Some(cl) - + trait Java extends Serializer { def deepClone(obj: AnyRef): AnyRef = in(out(obj), None) def out(obj: AnyRef): Array[Byte] = { @@ -67,8 +67,9 @@ object Serializer { } def in(bytes: Array[Byte], clazz: Option[Class[_]]): AnyRef = { - val in = if (classLoader.isDefined) new ClassLoaderObjectInputStream(classLoader.get, new ByteArrayInputStream(bytes)) - else new ObjectInputStream(new ByteArrayInputStream(bytes)) + val in = + if (classLoader.isDefined) new ClassLoaderObjectInputStream(classLoader.get, new ByteArrayInputStream(bytes)) + else new ObjectInputStream(new ByteArrayInputStream(bytes)) val obj = in.readObject in.close obj @@ -79,18 +80,21 @@ object Serializer { * @author Jonas Bonér */ object Protobuf extends Protobuf - class Protobuf extends Serializer { + trait Protobuf extends Serializer { def deepClone(obj: AnyRef): AnyRef = in(out(obj), Some(obj.getClass)) def out(obj: AnyRef): Array[Byte] = { - if (!obj.isInstanceOf[Message]) throw new IllegalArgumentException("Can't serialize a non-protobuf message using protobuf [" + obj + "]") + if (!obj.isInstanceOf[Message]) throw new IllegalArgumentException( + "Can't serialize a non-protobuf message using protobuf [" + obj + "]") obj.asInstanceOf[Message].toByteArray } def in(bytes: Array[Byte], clazz: Option[Class[_]]): AnyRef = { - if (!clazz.isDefined) throw new IllegalArgumentException("Need a protobuf message class to be able to serialize bytes using protobuf") + if (!clazz.isDefined) throw new IllegalArgumentException( + "Need a protobuf message class to be able to serialize bytes using protobuf") // TODO: should we cache this method lookup? - val message = clazz.get.getDeclaredMethod("getDefaultInstance", EMPTY_CLASS_ARRAY: _*).invoke(null, EMPTY_ANY_REF_ARRAY: _*).asInstanceOf[Message] + val message = clazz.get.getDeclaredMethod( + "getDefaultInstance", EMPTY_CLASS_ARRAY: _*).invoke(null, EMPTY_ANY_REF_ARRAY: _*).asInstanceOf[Message] message.toBuilder().mergeFrom(bytes).build } @@ -104,13 +108,9 @@ object Serializer { * @author Jonas Bonér */ object JavaJSON extends JavaJSON - class JavaJSON extends Serializer { + trait JavaJSON extends Serializer { private val mapper = new ObjectMapper - private var classLoader: Option[ClassLoader] = None - - def setClassLoader(cl: ClassLoader) = classLoader = Some(cl) - def deepClone(obj: AnyRef): AnyRef = in(out(obj), Some(obj.getClass)) def out(obj: AnyRef): Array[Byte] = { @@ -122,9 +122,11 @@ object Serializer { } def in(bytes: Array[Byte], clazz: Option[Class[_]]): AnyRef = { - if (!clazz.isDefined) throw new IllegalArgumentException("Can't deserialize JSON to instance if no class is provided") - val in = if (classLoader.isDefined) new ClassLoaderObjectInputStream(classLoader.get, new ByteArrayInputStream(bytes)) - else new ObjectInputStream(new ByteArrayInputStream(bytes)) + if (!clazz.isDefined) throw new IllegalArgumentException( + "Can't deserialize JSON to instance if no class is provided") + val in = + if (classLoader.isDefined) new ClassLoaderObjectInputStream(classLoader.get, new ByteArrayInputStream(bytes)) + else new ObjectInputStream(new ByteArrayInputStream(bytes)) val obj = mapper.readValue(in, clazz.get).asInstanceOf[AnyRef] in.close obj @@ -140,13 +142,9 @@ object Serializer { * @author Jonas Bonér */ object ScalaJSON extends ScalaJSON - class ScalaJSON extends Serializer { + trait ScalaJSON extends Serializer { def deepClone(obj: AnyRef): AnyRef = in(out(obj), None) - private var classLoader: Option[ClassLoader] = None - - def setClassLoader(cl: ClassLoader) = classLoader = Some(cl) - def out(obj: AnyRef): Array[Byte] = SJSONSerializer.SJSON.out(obj) // FIXME set ClassLoader on SJSONSerializer.SJSON diff --git a/akka-core/src/main/scala/stm/DataFlowVariable.scala b/akka-core/src/main/scala/stm/DataFlowVariable.scala index a99b4a1e58..a9e42f2c48 100644 --- a/akka-core/src/main/scala/stm/DataFlowVariable.scala +++ b/akka-core/src/main/scala/stm/DataFlowVariable.scala @@ -2,13 +2,13 @@ * Copyright (C) 2009-2010 Scalable Solutions AB */ -package se.scalablesolutions.akka.state +package se.scalablesolutions.akka.stm import java.util.concurrent.atomic.AtomicReference import java.util.concurrent.{ConcurrentLinkedQueue, LinkedBlockingQueue} import se.scalablesolutions.akka.actor.Actor -import se.scalablesolutions.akka.dispatch.CompletableFutureResult +import se.scalablesolutions.akka.dispatch.CompletableFuture /** * Implements Oz-style dataflow (single assignment) variables. @@ -19,6 +19,12 @@ object DataFlow { case object Start case object Exit +import java.util.concurrent.atomic.AtomicReference +import java.util.concurrent.{ConcurrentLinkedQueue, LinkedBlockingQueue} + +import se.scalablesolutions.akka.actor.Actor +import se.scalablesolutions.akka.dispatch.CompletableFuture + def thread(body: => Unit) = { val thread = new IsolatedEventBasedThread(body).start thread send Start @@ -74,7 +80,7 @@ object DataFlow { private class Out[T <: Any](dataFlow: DataFlowVariable[T]) extends Actor { timeout = TIME_OUT start - private var readerFuture: Option[CompletableFutureResult] = None + private var readerFuture: Option[CompletableFuture] = None def receive = { case Get => val ref = dataFlow.value.get diff --git a/akka-core/src/main/scala/stm/HashTrie.scala b/akka-core/src/main/scala/stm/HashTrie.scala index 2147507153..4b4ea4be78 100644 --- a/akka-core/src/main/scala/stm/HashTrie.scala +++ b/akka-core/src/main/scala/stm/HashTrie.scala @@ -32,7 +32,7 @@ POSSIBILITY OF SUCH DAMAGE. **/ -package se.scalablesolutions.akka.collection +package se.scalablesolutions.akka.stm trait PersistentDataStructure @@ -77,7 +77,7 @@ object HashTrie { // nodes @serializable -private[collection] sealed trait Node[K, +V] { +private[stm] sealed trait Node[K, +V] { val size: Int def apply(key: K, hash: Int): Option[V] @@ -90,7 +90,7 @@ private[collection] sealed trait Node[K, +V] { } @serializable -private[collection] class EmptyNode[K] extends Node[K, Nothing] { +private[stm] class EmptyNode[K] extends Node[K, Nothing] { val size = 0 def apply(key: K, hash: Int) = None @@ -106,12 +106,12 @@ private[collection] class EmptyNode[K] extends Node[K, Nothing] { } } -private[collection] abstract class SingleNode[K, +V] extends Node[K, V] { +private[stm] abstract class SingleNode[K, +V] extends Node[K, V] { val hash: Int } -private[collection] class LeafNode[K, +V](key: K, val hash: Int, value: V) extends SingleNode[K, V] { +private[stm] class LeafNode[K, +V](key: K, val hash: Int, value: V) extends SingleNode[K, V] { val size = 1 def apply(key: K, hash: Int) = if (this.key == key) Some(value) else None @@ -141,7 +141,7 @@ private[collection] class LeafNode[K, +V](key: K, val hash: Int, value: V) exten } -private[collection] class CollisionNode[K, +V](val hash: Int, bucket: List[(K, V)]) extends SingleNode[K, V] { +private[stm] class CollisionNode[K, +V](val hash: Int, bucket: List[(K, V)]) extends SingleNode[K, V] { lazy val size = bucket.length def this(hash: Int, pairs: (K, V)*) = this(hash, pairs.toList) @@ -187,7 +187,7 @@ private[collection] class CollisionNode[K, +V](val hash: Int, bucket: List[(K, V override def toString = "CollisionNode(" + bucket.toString + ")" } -private[collection] class BitmappedNode[K, +V](shift: Int)(table: Array[Node[K, V]], bits: Int) extends Node[K, V] { +private[stm] class BitmappedNode[K, +V](shift: Int)(table: Array[Node[K, V]], bits: Int) extends Node[K, V] { lazy val size = { val sizes = for { n <- table @@ -286,7 +286,7 @@ private[collection] class BitmappedNode[K, +V](shift: Int)(table: Array[Node[K, } -private[collection] object BitmappedNode { +private[stm] object BitmappedNode { def apply[K, V](shift: Int)(node: SingleNode[K, V], key: K, hash: Int, value: V) = { val table = new Array[Node[K, V]](Math.max((hash >>> shift) & 0x01f, (node.hash >>> shift) & 0x01f) + 1) @@ -314,7 +314,7 @@ private[collection] object BitmappedNode { } -private[collection] class FullNode[K, +V](shift: Int)(table: Array[Node[K, V]]) extends Node[K, V] { +private[stm] class FullNode[K, +V](shift: Int)(table: Array[Node[K, V]]) extends Node[K, V] { lazy val size = table.foldLeft(0) { _ + _.size } def apply(key: K, hash: Int) = table((hash >>> shift) & 0x01f)(key, hash) diff --git a/akka-core/src/main/scala/stm/ResultOrFailure.scala b/akka-core/src/main/scala/stm/ResultOrFailure.scala index a8b5090a68..ced5572104 100644 --- a/akka-core/src/main/scala/stm/ResultOrFailure.scala +++ b/akka-core/src/main/scala/stm/ResultOrFailure.scala @@ -2,9 +2,7 @@ * Copyright (C) 2009-2010 Scalable Solutions AB */ -package se.scalablesolutions.akka.util - -import se.scalablesolutions.akka.stm.Transaction +package se.scalablesolutions.akka.stm /** * Reference that can hold either a typed value or an exception. diff --git a/akka-core/src/main/scala/stm/Transaction.scala b/akka-core/src/main/scala/stm/Transaction.scala index 1637b4c906..72f97a2d0b 100644 --- a/akka-core/src/main/scala/stm/Transaction.scala +++ b/akka-core/src/main/scala/stm/Transaction.scala @@ -6,16 +6,18 @@ package se.scalablesolutions.akka.stm import java.util.concurrent.atomic.AtomicLong import java.util.concurrent.atomic.AtomicInteger +import java.util.concurrent.TimeUnit + +import scala.collection.mutable.HashMap -import se.scalablesolutions.akka.state.Committable import se.scalablesolutions.akka.util.Logging import org.multiverse.api.{Transaction => MultiverseTransaction} import org.multiverse.api.GlobalStmInstance.getGlobalStmInstance import org.multiverse.api.ThreadLocalTransaction._ -import org.multiverse.templates.OrElseTemplate - -import scala.collection.mutable.HashMap +import org.multiverse.templates.{TransactionTemplate, OrElseTemplate} +import org.multiverse.utils.backoff.ExponentialBackoffPolicy +import org.multiverse.stms.alpha.AlphaStm class NoTransactionInScopeException extends RuntimeException class TransactionRetryException(message: String) extends RuntimeException(message) @@ -30,8 +32,8 @@ class TransactionRetryException(message: String) extends RuntimeException(messag * Here are some examples (assuming implicit transaction family name in scope): *
        * import se.scalablesolutions.akka.stm.Transaction._
      - * 
      - * atomic {
      + *
      + * atomic  {
        *   .. // do something within a transaction
        * }
        * 
      @@ -39,8 +41,8 @@ class TransactionRetryException(message: String) extends RuntimeException(messag * Example of atomic transaction management using atomic block with retry count: *
        * import se.scalablesolutions.akka.stm.Transaction._
      - * 
      - * atomic(maxNrOfRetries) {
      + *
      + * atomic(maxNrOfRetries)  {
        *   .. // do something within a transaction
        * }
        * 
      @@ -49,10 +51,10 @@ class TransactionRetryException(message: String) extends RuntimeException(messag * Which is a good way to reduce contention and transaction collisions. *
        * import se.scalablesolutions.akka.stm.Transaction._
      - * 
      - * atomically {
      + *
      + * atomically  {
        *   .. // try to do something
      - * } orElse {
      + * } orElse  {
        *   .. // if transaction clashes try do do something else to minimize contention
        * }
        * 
      @@ -61,11 +63,11 @@ class TransactionRetryException(message: String) extends RuntimeException(messag * *
        * import se.scalablesolutions.akka.stm.Transaction._
      - * for (tx <- Transaction) {
      + * for (tx <- Transaction)  {
        *   ... // do transactional stuff
        * }
        *
      - * val result = for (tx <- Transaction) yield {
      + * val result = for (tx <- Transaction) yield  {
        *   ... // do transactional stuff yielding a result
        * }
        * 
      @@ -78,120 +80,86 @@ class TransactionRetryException(message: String) extends RuntimeException(messag * * // You can use them together with Transaction in a for comprehension since * // TransactionalRef is also monadic - * for { + * for { * tx <- Transaction * ref <- refs * } { * ... // use the ref inside a transaction * } * - * val result = for { + * val result = for { * tx <- Transaction * ref <- refs - * } yield { + * } yield { * ... // use the ref inside a transaction, yield a result * } *
* * @author Jonas Bonér */ -object Transaction extends TransactionManagement { +object Transaction extends TransactionManagement with Logging { val idFactory = new AtomicLong(-1L) /** - * See ScalaDoc on class. + * See ScalaDoc on Transaction class. */ - def map[T](f: Transaction => T)(implicit transactionFamilyName: String): T = atomic { f(getTransactionInScope) } + def map[T](f: => T)(implicit transactionFamilyName: String): T = + atomic {f} /** - * See ScalaDoc on class. + * See ScalaDoc on Transaction class. */ - def flatMap[T](f: Transaction => T)(implicit transactionFamilyName: String): T = atomic { f(getTransactionInScope) } + def flatMap[T](f: => T)(implicit transactionFamilyName: String): T = + atomic {f} /** - * See ScalaDoc on class. + * See ScalaDoc on Transaction class. */ - def foreach(f: Transaction => Unit)(implicit transactionFamilyName: String): Unit = atomic { f(getTransactionInScope) } + def foreach(f: => Unit)(implicit transactionFamilyName: String): Unit = + atomic {f} /** - * Creates a "pure" STM atomic transaction and by-passes all transactions hooks - * such as persistence etc. - * Only for internal usage. + * See ScalaDoc on Transaction class. */ - private[akka] def pureAtomic[T](body: => T): T = new AtomicTemplate[T]( - getGlobalStmInstance, "internal", false, false, TransactionManagement.MAX_NR_OF_RETRIES) { - def execute(mtx: MultiverseTransaction): T = body - }.execute() + def atomic[T](body: => T)(implicit transactionFamilyName: String): T = { + // FIXME use Transaction Builder and set the transactionFamilyName + // defaultTxBuilder.setFamilyName(transactionFamilyName) + // new TransactionTemplate[T](defaultTxBuilder.build) { + var isTopLevelTransaction = true + new TransactionTemplate[T]() { + def execute(mtx: MultiverseTransaction): T = { + val result = body - /** - * See ScalaDoc on class. - */ - def atomic[T](body: => T)(implicit transactionFamilyName: String): T = new AtomicTemplate[T]( - getGlobalStmInstance, transactionFamilyName, false, false, TransactionManagement.MAX_NR_OF_RETRIES) { - def execute(mtx: MultiverseTransaction): T = body - override def postStart(mtx: MultiverseTransaction) = { - val tx = new Transaction - tx.transaction = Some(mtx) - setTransaction(Some(tx)) - } - override def postCommit = { - if (isTransactionInScope) getTransactionInScope.commit - else throw new IllegalStateException("No transaction in scope") - } - }.execute() + val txSet = getTransactionSetInScope + log.trace("Committing transaction [%s]\n\twith family name [%s]\n\tby joining transaction set [%s]", + mtx, transactionFamilyName, txSet) + txSet.joinCommit(mtx) - /** - * See ScalaDoc on class. - */ - def atomic[T](retryCount: Int)(body: => T)(implicit transactionFamilyName: String): T = { - new AtomicTemplate[T](getGlobalStmInstance, transactionFamilyName, false, false, retryCount) { - def execute(mtx: MultiverseTransaction): T = body - override def postStart(mtx: MultiverseTransaction) = { + // FIXME tryJoinCommit(mtx, TransactionManagement.TRANSACTION_TIMEOUT, TimeUnit.MILLISECONDS) + //getTransactionSetInScope.tryJoinCommit(mtx, TransactionManagement.TRANSACTION_TIMEOUT, TimeUnit.MILLISECONDS) + + clearTransaction + result + } + + override def onStart(mtx: MultiverseTransaction) = { + val txSet = + if (!isTransactionSetInScope) { + isTopLevelTransaction = true + createNewTransactionSet + } else getTransactionSetInScope val tx = new Transaction tx.transaction = Some(mtx) setTransaction(Some(tx)) - } - override def postCommit = { - if (isTransactionInScope) getTransactionInScope.commit - else throw new IllegalStateException("No transaction in scope") - } - }.execute - } - /** - * See ScalaDoc on class. - */ - def atomicReadOnly[T](retryCount: Int)(body: => T)(implicit transactionFamilyName: String): T = { - new AtomicTemplate[T](getGlobalStmInstance, transactionFamilyName, false, true, retryCount) { - def execute(mtx: MultiverseTransaction): T = body - override def postStart(mtx: MultiverseTransaction) = { - val tx = new Transaction - tx.transaction = Some(mtx) - setTransaction(Some(tx)) + txSet.registerOnCommitTask(new Runnable() { + def run = tx.commit + }) + txSet.registerOnAbortTask(new Runnable() { + def run = tx.abort + }) } - override def postCommit = { - if (isTransactionInScope) getTransactionInScope.commit - else throw new IllegalStateException("No transaction in scope") - } - }.execute - } - - /** - * See ScalaDoc on class. - */ - def atomicReadOnly[T](body: => T): T = { - new AtomicTemplate[T](true) { - def execute(mtx: MultiverseTransaction): T = body - override def postStart(mtx: MultiverseTransaction) = { - val tx = new Transaction - tx.transaction = Some(mtx) - setTransaction(Some(tx)) - } - override def postCommit = { - if (isTransactionInScope) getTransactionInScope.commit - else throw new IllegalStateException("No transaction in scope") - } - }.execute + }.execute() } /** @@ -209,6 +177,16 @@ object Transaction extends TransactionManagement { def orelserun(t: MultiverseTransaction) = secondBody }.execute() } + + /** + * Creates a STM atomic transaction and by-passes all transactions hooks + * such as persistence etc. + * + * Only for internal usage. + */ + private[akka] def atomic0[T](body: => T): T = new TransactionTemplate[T]() { + def execute(mtx: MultiverseTransaction): T = body + }.execute() } /** @@ -216,23 +194,29 @@ object Transaction extends TransactionManagement { */ @serializable class Transaction extends Logging { import Transaction._ - + val id = Transaction.idFactory.incrementAndGet @volatile private[this] var status: TransactionStatus = TransactionStatus.New private[akka] var transaction: Option[MultiverseTransaction] = None private[this] val persistentStateMap = new HashMap[String, Committable] private[akka] val depth = new AtomicInteger(0) - + + log.trace("Creating %s", toString) + // --- public methods --------- def commit = synchronized { - pureAtomic { + log.trace("Committing transaction %s", toString) + atomic0 { persistentStateMap.values.foreach(_.commit) - TransactionManagement.clearTransaction } status = TransactionStatus.Completed } + def abort = synchronized { + log.trace("Aborting transaction %s", toString) + } + def isNew = synchronized { status == TransactionStatus.New } def isActive = synchronized { status == TransactionStatus.Active } @@ -259,32 +243,32 @@ object Transaction extends TransactionManagement { private def ensureIsActiveOrAborted = if (!(status == TransactionStatus.Active || status == TransactionStatus.Aborted)) - throw new IllegalStateException( - "Expected ACTIVE or ABORTED transaction - current status [" + status + "]: " + toString) + throw new IllegalStateException( + "Expected ACTIVE or ABORTED transaction - current status [" + status + "]: " + toString) private def ensureIsActiveOrNew = if (!(status == TransactionStatus.Active || status == TransactionStatus.New)) - throw new IllegalStateException( - "Expected ACTIVE or NEW transaction - current status [" + status + "]: " + toString) + throw new IllegalStateException( + "Expected ACTIVE or NEW transaction - current status [" + status + "]: " + toString) // For reinitialize transaction after sending it over the wire - private[akka] def reinit = synchronized { +/* private[akka] def reinit = synchronized { import net.lag.logging.{Logger, Level} if (log eq null) { log = Logger.get(this.getClass.getName) log.setLevel(Level.ALL) // TODO: preserve logging level } } - +*/ override def equals(that: Any): Boolean = synchronized { - that != null && - that.isInstanceOf[Transaction] && + that != null && + that.isInstanceOf[Transaction] && that.asInstanceOf[Transaction].id == this.id } - - override def hashCode(): Int = synchronized { id.toInt } - - override def toString(): String = synchronized { "Transaction[" + id + ", " + status + "]" } + + override def hashCode: Int = synchronized { id.toInt } + + override def toString = synchronized { "Transaction[" + id + ", " + status + "]" } } /** diff --git a/akka-core/src/main/scala/stm/TransactionManagement.scala b/akka-core/src/main/scala/stm/TransactionManagement.scala index 2dd7ed9c79..48c8c7dd95 100644 --- a/akka-core/src/main/scala/stm/TransactionManagement.scala +++ b/akka-core/src/main/scala/stm/TransactionManagement.scala @@ -6,54 +6,81 @@ package se.scalablesolutions.akka.stm import java.util.concurrent.atomic.AtomicBoolean -import se.scalablesolutions.akka.util.Logging - import org.multiverse.api.ThreadLocalTransaction._ +import org.multiverse.commitbarriers.CountDownCommitBarrier class StmException(msg: String) extends RuntimeException(msg) -class TransactionAwareWrapperException( - val cause: Throwable, val tx: Option[Transaction]) extends RuntimeException(cause) { - override def toString(): String = "TransactionAwareWrapperException[" + cause + ", " + tx + "]" +class TransactionAwareWrapperException(val cause: Throwable, val tx: Option[Transaction]) extends RuntimeException(cause) { + override def toString = "TransactionAwareWrapperException[" + cause + ", " + tx + "]" } object TransactionManagement extends TransactionManagement { - import se.scalablesolutions.akka.Config._ - - val MAX_NR_OF_RETRIES = config.getInt("akka.stm.max-nr-of-retries", 100) - val TRANSACTION_ENABLED = new AtomicBoolean(config.getBool("akka.stm.service", false)) + import se.scalablesolutions.akka.config.Config._ + val TRANSACTION_ENABLED = new AtomicBoolean(config.getBool("akka.stm.service", false)) + val FAIR_TRANSACTIONS = config.getBool("akka.stm.fair", true) + val INTERRUPTIBLE = config.getBool("akka.stm.interruptible", true) + val MAX_NR_OF_RETRIES = config.getInt("akka.stm.max-nr-of-retries", 1000) + val TRANSACTION_TIMEOUT = config.getInt("akka.stm.timeout", 10000) + val SMART_TX_LENGTH_SELECTOR = config.getBool("akka.stm.smart-tx-length-selector", true) def isTransactionalityEnabled = TRANSACTION_ENABLED.get + def disableTransactions = TRANSACTION_ENABLED.set(false) - private[akka] val currentTransaction: ThreadLocal[Option[Transaction]] = new ThreadLocal[Option[Transaction]]() { + private[akka] val transactionSet = new ThreadLocal[Option[CountDownCommitBarrier]]() { + override protected def initialValue: Option[CountDownCommitBarrier] = None + } + + private[akka] val transaction = new ThreadLocal[Option[Transaction]]() { override protected def initialValue: Option[Transaction] = None } + + private[akka] def getTransactionSet: CountDownCommitBarrier = { + val option = transactionSet.get + if ((option eq null) || option.isEmpty) throw new IllegalStateException("No Transaction set in scope") + else option.get + } + + private[akka] def getTransaction: Transaction = { + val option = transaction.get + if ((option eq null) || option.isEmpty) throw new IllegalStateException("No Transaction in scope") + option.get + } } -trait TransactionManagement extends Logging { - import TransactionManagement.currentTransaction +trait TransactionManagement { - private[akka] def createNewTransaction = currentTransaction.set(Some(new Transaction)) - - private[akka] def setTransaction(transaction: Option[Transaction]) = if (transaction.isDefined) { - val tx = transaction.get - currentTransaction.set(transaction) - if (tx.transaction.isDefined) setThreadLocalTransaction(tx.transaction.get) - else throw new IllegalStateException("No transaction defined") + private[akka] def createNewTransactionSet: CountDownCommitBarrier = { + val txSet = new CountDownCommitBarrier(1, TransactionManagement.FAIR_TRANSACTIONS) + TransactionManagement.transactionSet.set(Some(txSet)) + txSet } + private[akka] def setTransactionSet(txSet: Option[CountDownCommitBarrier]) = + if (txSet.isDefined) TransactionManagement.transactionSet.set(txSet) + + private[akka] def setTransaction(tx: Option[Transaction]) = + if (tx.isDefined) TransactionManagement.transaction.set(tx) + + private[akka] def clearTransactionSet = TransactionManagement.transactionSet.set(None) + private[akka] def clearTransaction = { - currentTransaction.set(None) + TransactionManagement.transaction.set(None) setThreadLocalTransaction(null) } - private[akka] def getTransactionInScope = currentTransaction.get.get - - private[akka] def isTransactionInScope = currentTransaction.get.isDefined + private[akka] def getTransactionSetInScope = TransactionManagement.getTransactionSet - private[akka] def incrementTransaction = if (isTransactionInScope) getTransactionInScope.increment + private[akka] def getTransactionInScope = TransactionManagement.getTransaction - private[akka] def decrementTransaction = if (isTransactionInScope) getTransactionInScope.decrement -} + private[akka] def isTransactionSetInScope = { + val option = TransactionManagement.transactionSet.get + (option ne null) && option.isDefined + } + private[akka] def isTransactionInScope = { + val option = TransactionManagement.transaction.get + (option ne null) && option.isDefined + } +} \ No newline at end of file diff --git a/akka-core/src/main/scala/stm/TransactionalState.scala b/akka-core/src/main/scala/stm/TransactionalState.scala index 0407dbe433..426474f791 100644 --- a/akka-core/src/main/scala/stm/TransactionalState.scala +++ b/akka-core/src/main/scala/stm/TransactionalState.scala @@ -2,14 +2,12 @@ * Copyright (C) 2009-2010 Scalable Solutions AB */ -package se.scalablesolutions.akka.state +package se.scalablesolutions.akka.stm import se.scalablesolutions.akka.stm.Transaction.atomic -import se.scalablesolutions.akka.stm.NoTransactionInScopeException -import se.scalablesolutions.akka.collection._ import se.scalablesolutions.akka.util.UUID -import org.multiverse.datastructures.refs.manual.Ref; +import org.multiverse.stms.alpha.AlphaRef /** * Example Scala usage: @@ -55,6 +53,17 @@ trait Committable { } /** + * Alias to TransactionalRef. + * + * @author Jonas Bonér + */ +object Ref { + def apply[T]() = new Ref[T] +} + +/** + * Alias to Ref. + * * @author Jonas Bonér */ object TransactionalRef { @@ -67,8 +76,17 @@ object TransactionalRef { def apply[T]() = new TransactionalRef[T] } +/** + * Implements a transactional managed reference. + * Alias to TransactionalRef. + * + * @author Jonas Bonér + */ +class Ref[T] extends TransactionalRef[T] + /** * Implements a transactional managed reference. + * Alias to Ref. * * @author Jonas Bonér */ @@ -78,7 +96,7 @@ class TransactionalRef[T] extends Transactional { implicit val txInitName = "TransactionalRef:Init" val uuid = UUID.newUuid.toString - private[this] val ref: Ref[T] = atomic { new Ref } + private[this] lazy val ref: AlphaRef[T] = new AlphaRef def swap(elem: T) = { ensureIsInTransaction diff --git a/akka-core/src/main/scala/stm/Vector.scala b/akka-core/src/main/scala/stm/Vector.scala index e21d01d9e6..4eec750591 100644 --- a/akka-core/src/main/scala/stm/Vector.scala +++ b/akka-core/src/main/scala/stm/Vector.scala @@ -32,7 +32,7 @@ POSSIBILITY OF SUCH DAMAGE. **/ -package se.scalablesolutions.akka.collection +package se.scalablesolutions.akka.stm import Vector._ @@ -54,7 +54,7 @@ class Vector[+T] private (val length: Int, shift: Int, root: Array[AnyRef], tail * (somewhat dynamically-typed) implementation in place. */ - private[collection] def this() = this(0, 5, EmptyArray, EmptyArray) + private[stm] def this() = this(0, 5, EmptyArray, EmptyArray) def apply(i: Int): T = { if (i >= 0 && i < length) { @@ -317,14 +317,14 @@ class Vector[+T] private (val length: Int, shift: Int, root: Array[AnyRef], tail } object Vector { - private[collection] val EmptyArray = new Array[AnyRef](0) + private[stm] val EmptyArray = new Array[AnyRef](0) def apply[T](elems: T*) = elems.foldLeft(EmptyVector:Vector[T]) { _ + _ } def unapplySeq[T](vec: Vector[T]): Option[Seq[T]] = Some(vec) @inline - private[collection] def array(elems: AnyRef*) = { + private[stm] def array(elems: AnyRef*) = { val back = new Array[AnyRef](elems.length) Array.copy(elems, 0, back, 0, back.length) @@ -334,7 +334,7 @@ object Vector { object EmptyVector extends Vector[Nothing] -private[collection] abstract class VectorProjection[+T] extends Vector[T] { +private[stm] abstract class VectorProjection[+T] extends Vector[T] { override val length: Int override def apply(i: Int): T diff --git a/akka-core/src/test/scala/ActorRegistryTest.scala b/akka-core/src/test/scala/ActorRegistryTest.scala new file mode 100644 index 0000000000..ada0c027d5 --- /dev/null +++ b/akka-core/src/test/scala/ActorRegistryTest.scala @@ -0,0 +1,160 @@ +package se.scalablesolutions.akka.actor + +import org.scalatest.junit.JUnitSuite +import org.junit.Test + +class ActorRegistryTest extends JUnitSuite { + var record = "" + class TestActor extends Actor { + id = "MyID" + def receive = { + case "ping" => + record = "pong" + record + reply("got ping") + } + } + + @Test def shouldGetActorByIdFromActorRegistry = { + ActorRegistry.shutdownAll + val actor = new TestActor + actor.start + val actors = ActorRegistry.actorsFor("MyID") + assert(actors.size === 1) + assert(actors.head.isInstanceOf[TestActor]) + assert(actors.head.getId == "MyID") + actor.stop + } + + @Test def shouldGetActorByUUIDFromActorRegistry = { + ActorRegistry.shutdownAll + val actor = new TestActor + val uuid = actor.uuid + actor.start + val actorOrNone = ActorRegistry.actorFor(uuid) + assert(actorOrNone.isDefined) + assert(actorOrNone.get.uuid === uuid) + actor.stop + } + + @Test def shouldGetActorByClassFromActorRegistry = { + ActorRegistry.shutdownAll + val actor = new TestActor + actor.start + val actors = ActorRegistry.actorsFor(classOf[TestActor]) + assert(actors.size === 1) + assert(actors.head.isInstanceOf[TestActor]) + assert(actors.head.getId === "MyID") + actor.stop + } + + @Test def shouldGetActorByManifestFromActorRegistry = { + ActorRegistry.shutdownAll + val actor = new TestActor + actor.start + val actors: List[TestActor] = ActorRegistry.actorsFor[TestActor] + assert(actors.size === 1) + assert(actors.head.isInstanceOf[TestActor]) + assert(actors.head.getId === "MyID") + actor.stop + } + + @Test def shouldGetActorsByIdFromActorRegistry = { + ActorRegistry.shutdownAll + val actor1 = new TestActor + actor1.start + val actor2 = new TestActor + actor2.start + val actors = ActorRegistry.actorsFor("MyID") + assert(actors.size === 2) + assert(actors.head.isInstanceOf[TestActor]) + assert(actors.head.getId === "MyID") + assert(actors.last.isInstanceOf[TestActor]) + assert(actors.last.getId === "MyID") + actor1.stop + actor2.stop + } + + @Test def shouldGetActorsByClassFromActorRegistry = { + ActorRegistry.shutdownAll + val actor1 = new TestActor + actor1.start + val actor2 = new TestActor + actor2.start + val actors = ActorRegistry.actorsFor(classOf[TestActor]) + assert(actors.size === 2) + assert(actors.head.isInstanceOf[TestActor]) + assert(actors.head.getId === "MyID") + assert(actors.last.isInstanceOf[TestActor]) + assert(actors.last.getId === "MyID") + actor1.stop + actor2.stop + } + + @Test def shouldGetActorsByManifestFromActorRegistry = { + ActorRegistry.shutdownAll + val actor1 = new TestActor + actor1.start + val actor2 = new TestActor + actor2.start + val actors: List[TestActor] = ActorRegistry.actorsFor[TestActor] + assert(actors.size === 2) + assert(actors.head.isInstanceOf[TestActor]) + assert(actors.head.getId === "MyID") + assert(actors.last.isInstanceOf[TestActor]) + assert(actors.last.getId === "MyID") + actor1.stop + actor2.stop + } + + @Test def shouldGetAllActorsFromActorRegistry = { + ActorRegistry.shutdownAll + val actor1 = new TestActor + actor1.start + val actor2 = new TestActor + actor2.start + val actors = ActorRegistry.actors + assert(actors.size === 2) + assert(actors.head.isInstanceOf[TestActor]) + assert(actors.head.getId === "MyID") + assert(actors.last.isInstanceOf[TestActor]) + assert(actors.last.getId === "MyID") + actor1.stop + actor2.stop + } + + @Test def shouldGetResponseByAllActorsInActorRegistryWhenInvokingForeach = { + ActorRegistry.shutdownAll + val actor1 = new TestActor + actor1.start + val actor2 = new TestActor + actor2.start + record = "" + ActorRegistry.foreach(actor => actor !! "ping") + assert(record === "pongpong") + actor1.stop + actor2.stop + } + + @Test def shouldShutdownAllActorsInActorRegistry = { + ActorRegistry.shutdownAll + val actor1 = new TestActor + actor1.start + val actor2 = new TestActor + actor2.start + ActorRegistry.shutdownAll + assert(ActorRegistry.actors.size === 0) + } + + @Test def shouldRemoveUnregisterActorInActorRegistry = { + ActorRegistry.shutdownAll + val actor1 = new TestActor + actor1.start + val actor2 = new TestActor + actor2.start + assert(ActorRegistry.actors.size === 2) + ActorRegistry.unregister(actor1) + assert(ActorRegistry.actors.size === 1) + ActorRegistry.unregister(actor2) + assert(ActorRegistry.actors.size === 0) + } +} diff --git a/akka-core/src/test/scala/AgentTest.scala b/akka-core/src/test/scala/AgentTest.scala new file mode 100644 index 0000000000..a81a945439 --- /dev/null +++ b/akka-core/src/test/scala/AgentTest.scala @@ -0,0 +1,78 @@ +package se.scalablesolutions.akka.actor + +import se.scalablesolutions.akka.actor.Actor.transactor +import se.scalablesolutions.akka.stm.Transaction.atomic +import se.scalablesolutions.akka.util.Logging + +import org.scalatest.Suite +import org.scalatest.junit.JUnitRunner +import org.scalatest.matchers.MustMatchers + +import org.junit.runner.RunWith +import org.junit.{Test} + +@RunWith(classOf[JUnitRunner]) +class AgentTest extends junit.framework.TestCase +with Suite with MustMatchers +with ActorTestUtil with Logging { + + implicit val txFamilyName = "test" + + @Test def testSendFun = verify(new TestActor { + def test = { + val agent = Agent(5) + handle(agent) { + agent update (_ + 1) + agent update (_ * 2) + + val result = agent() + result must be(12) + } + } + }) + + @Test def testSendValue = verify(new TestActor { + def test = { + val agent = Agent(5) + handle(agent) { + agent update 6 + val result = agent() + result must be(6) + } + } + }) + + @Test def testOneAgentUpdateWithinEnlosingTransactionSuccess = { + case object Go + val agent = Agent(5) + val tx = transactor { + case Go => agent update (_ + 1) + } + tx send Go + Thread.sleep(5000) + val result = agent() + result must be(6) + agent.close + tx.stop + } + + @Test def testDoingAgentGetInEnlosingTransactionShouldYieldException = { + import java.util.concurrent.CountDownLatch + case object Go + val latch = new CountDownLatch(1) + val agent = Agent(5) + val tx = transactor { + case Go => + agent update (_ * 2) + try { agent() } + catch { + case _ => latch.countDown + } + } + tx send Go + latch.await // FIXME should await with timeout and fail if timeout + agent.close + tx.stop + assert(true) + } +} diff --git a/akka-core/src/test/scala/ClientInitiatedRemoteActorTest.scala b/akka-core/src/test/scala/ClientInitiatedRemoteActorTest.scala index 6fbb8b8481..ff2843efe8 100644 --- a/akka-core/src/test/scala/ClientInitiatedRemoteActorTest.scala +++ b/akka-core/src/test/scala/ClientInitiatedRemoteActorTest.scala @@ -49,7 +49,7 @@ class RemoteActorSpecActorAsyncSender extends Actor { class ClientInitiatedRemoteActorTest extends JUnitSuite { import Actor.Sender.Self - se.scalablesolutions.akka.Config.config + akka.config.Config.config val HOSTNAME = "localhost" val PORT1 = 9990 diff --git a/akka-core/src/test/scala/ExecutorBasedEventDrivenDispatcherActorsTest.scala b/akka-core/src/test/scala/ExecutorBasedEventDrivenDispatcherActorsTest.scala new file mode 100644 index 0000000000..b3e04f3244 --- /dev/null +++ b/akka-core/src/test/scala/ExecutorBasedEventDrivenDispatcherActorsTest.scala @@ -0,0 +1,85 @@ +package se.scalablesolutions.akka.actor + +import org.scalatest.junit.JUnitSuite +import org.junit.Test +import se.scalablesolutions.akka.dispatch.Dispatchers +import org.scalatest.matchers.MustMatchers +import java.util.concurrent.CountDownLatch + +/** + * Tests the behaviour of the executor based event driven dispatcher when multiple actors are being dispatched on it. + * + * @author Jan Van Besien + */ +class ExecutorBasedEventDrivenDispatcherActorsTest extends JUnitSuite with MustMatchers with ActorTestUtil { + class SlowActor(finishedCounter: CountDownLatch) extends Actor { + messageDispatcher = Dispatchers.globalExecutorBasedEventDrivenDispatcher + id = "SlowActor" + + def receive = { + case x: Int => { + Thread.sleep(50) // slow actor + finishedCounter.countDown + } + } + } + + class FastActor(finishedCounter: CountDownLatch) extends Actor { + messageDispatcher = Dispatchers.globalExecutorBasedEventDrivenDispatcher + id = "FastActor" + + def receive = { + case x: Int => { + finishedCounter.countDown + } + } + } + + @Test def slowActorShouldntBlockFastActor = verify(new TestActor { + def test = { + val sFinished = new CountDownLatch(50) + val fFinished = new CountDownLatch(10) + val s = new SlowActor(sFinished) + val f = new FastActor(fFinished) + + handle(s, f) { + // send a lot of stuff to s + for (i <- 1 to 50) { + s ! i + } + + // send some messages to f + for (i <- 1 to 10) { + f ! i + } + + // now assert that f is finished while s is still busy + fFinished.await + assert(sFinished.getCount > 0) + } + } + }) + +} + +trait ActorTestUtil { + def handle[T](actors: Actor*)(test: => T): T = { + for (a <- actors) a.start + try { + test + } + finally { + for (a <- actors) a.stop + } + } + + def verify(actor: TestActor): Unit = handle(actor) { + actor.test + } +} + +abstract class TestActor extends Actor with ActorTestUtil { + def test: Unit + + def receive = {case _ =>} +} diff --git a/akka-core/src/test/scala/InMemoryActorTest.scala b/akka-core/src/test/scala/InMemoryActorTest.scala index cd06b80d0a..5692d7b01f 100644 --- a/akka-core/src/test/scala/InMemoryActorTest.scala +++ b/akka-core/src/test/scala/InMemoryActorTest.scala @@ -3,7 +3,7 @@ package se.scalablesolutions.akka.actor import org.scalatest.junit.JUnitSuite import org.junit.Test -import se.scalablesolutions.akka.state.{TransactionalState, TransactionalMap, TransactionalRef, TransactionalVector} +import se.scalablesolutions.akka.stm.{TransactionalState, TransactionalMap, TransactionalRef, TransactionalVector} case class GetMapState(key: String) case object GetVectorState @@ -23,7 +23,7 @@ case class SuccessOneWay(key: String, value: String) case class FailureOneWay(key: String, value: String, failer: Actor) class InMemStatefulActor extends Actor { - timeout = 100000 + timeout = 5000 makeTransactionRequired private lazy val mapState = TransactionalState.newMap[String, String] @@ -86,8 +86,8 @@ class InMemFailerActor extends Actor { } class InMemoryActorTest extends JUnitSuite { + import Actor.Sender.Self - /* @Test def shouldOneWayMapShouldNotRollbackStateForStatefulServerInCaseOfSuccess = { val stateful = new InMemStatefulActor @@ -98,7 +98,7 @@ class InMemoryActorTest extends JUnitSuite { Thread.sleep(1000) assert("new state" === (stateful !! GetMapState("testShouldNotRollbackStateForStatefulServerInCaseOfSuccess")).get) } - */ + @Test def shouldMapShouldNotRollbackStateForStatefulServerInCaseOfSuccess = { val stateful = new InMemStatefulActor @@ -107,7 +107,7 @@ class InMemoryActorTest extends JUnitSuite { stateful !! Success("testShouldNotRollbackStateForStatefulServerInCaseOfSuccess", "new state") // transactionrequired assert("new state" === (stateful !! GetMapState("testShouldNotRollbackStateForStatefulServerInCaseOfSuccess")).get) } - /* + @Test def shouldOneWayMapShouldRollbackStateForStatefulServerInCaseOfFailure = { val stateful = new InMemStatefulActor @@ -120,7 +120,7 @@ class InMemoryActorTest extends JUnitSuite { Thread.sleep(1000) assert("init" === (stateful !! GetMapState("testShouldRollbackStateForStatefulServerInCaseOfFailure")).get) // check that state is == init state } - */ + @Test def shouldMapShouldRollbackStateForStatefulServerInCaseOfFailure = { val stateful = new InMemStatefulActor @@ -134,7 +134,7 @@ class InMemoryActorTest extends JUnitSuite { } catch {case e: RuntimeException => {}} assert("init" === (stateful !! GetMapState("testShouldRollbackStateForStatefulServerInCaseOfFailure")).get) // check that state is == init state } - /* + @Test def shouldOneWayVectorShouldNotRollbackStateForStatefulServerInCaseOfSuccess = { val stateful = new InMemStatefulActor @@ -145,7 +145,7 @@ class InMemoryActorTest extends JUnitSuite { Thread.sleep(1000) assert(2 === (stateful !! GetVectorSize).get) } - */ + @Test def shouldVectorShouldNotRollbackStateForStatefulServerInCaseOfSuccess = { val stateful = new InMemStatefulActor @@ -154,7 +154,7 @@ class InMemoryActorTest extends JUnitSuite { stateful !! Success("testShouldNotRollbackStateForStatefulServerInCaseOfSuccess", "new state") // transactionrequired assert(2 === (stateful !! GetVectorSize).get) } - /* + @Test def shouldOneWayVectorShouldRollbackStateForStatefulServerInCaseOfFailure = { val stateful = new InMemStatefulActor @@ -167,7 +167,7 @@ class InMemoryActorTest extends JUnitSuite { Thread.sleep(1000) assert(1 === (stateful !! GetVectorSize).get) } - */ + @Test def shouldVectorShouldRollbackStateForStatefulServerInCaseOfFailure = { val stateful = new InMemStatefulActor @@ -181,7 +181,7 @@ class InMemoryActorTest extends JUnitSuite { } catch {case e: RuntimeException => {}} assert(1 === (stateful !! GetVectorSize).get) } - /* + @Test def shouldOneWayRefShouldNotRollbackStateForStatefulServerInCaseOfSuccess = { val stateful = new InMemStatefulActor @@ -192,7 +192,7 @@ class InMemoryActorTest extends JUnitSuite { Thread.sleep(1000) assert("new state" === (stateful !! GetRefState).get) } - */ + @Test def shouldRefShouldNotRollbackStateForStatefulServerInCaseOfSuccess = { val stateful = new InMemStatefulActor @@ -201,7 +201,7 @@ class InMemoryActorTest extends JUnitSuite { stateful !! Success("testShouldNotRollbackStateForStatefulServerInCaseOfSuccess", "new state") // transactionrequired assert("new state" === (stateful !! GetRefState).get) } - /* + @Test def shouldOneWayRefShouldRollbackStateForStatefulServerInCaseOfFailure = { val stateful = new InMemStatefulActor @@ -212,9 +212,9 @@ class InMemoryActorTest extends JUnitSuite { failer.start stateful ! FailureOneWay("testShouldRollbackStateForStatefulServerInCaseOfFailure", "new state", failer) // call failing transactionrequired method Thread.sleep(1000) - assert("init" === (stateful !! GetRefState).get) // check that state is == init state + assert("init" === (stateful !! (GetRefState, 1000000)).get) // check that state is == init state } - */ + @Test def shouldRefShouldRollbackStateForStatefulServerInCaseOfFailure = { val stateful = new InMemStatefulActor diff --git a/akka-core/src/test/scala/MemoryTest.scala b/akka-core/src/test/scala/MemoryTest.scala index 2a56d61465..5684496c02 100644 --- a/akka-core/src/test/scala/MemoryTest.scala +++ b/akka-core/src/test/scala/MemoryTest.scala @@ -22,9 +22,11 @@ class MemoryFootprintTest extends JUnitSuite { // Actors are put in AspectRegistry when created so they won't be GCd here val totalMem = Runtime.getRuntime.totalMemory - Runtime.getRuntime.freeMemory + println("Memory before " + totalMem) (1 until NR_OF_ACTORS).foreach(i => new Mem) val newTotalMem = Runtime.getRuntime.totalMemory - Runtime.getRuntime.freeMemory + println("Memory aftor " + newTotalMem) val memPerActor = (newTotalMem - totalMem) / NR_OF_ACTORS println("Memory footprint per actor is : " + memPerActor) diff --git a/akka-core/src/test/scala/PerformanceTest.scala b/akka-core/src/test/scala/PerformanceTest.scala index cda74ad2d2..2e5d33183b 100644 --- a/akka-core/src/test/scala/PerformanceTest.scala +++ b/akka-core/src/test/scala/PerformanceTest.scala @@ -1,4 +1,4 @@ -package test +package se.scalablesolutions.akka import org.scalatest.junit.JUnitSuite import org.junit.Test @@ -16,6 +16,9 @@ import net.lag.logging.Logger class PerformanceTest extends JUnitSuite { @Test + def dummyTest = assert(true) + +// @Test def benchAkkaActorsVsScalaActors = { def stressTestAkkaActors(nrOfMessages: Int, nrOfActors: Int, sleepTime: Int): Long = { @@ -279,7 +282,7 @@ class PerformanceTest extends JUnitSuite { var nrOfMessages = 2000000 var nrOfActors = 4 - var akkaTime = stressTestAkkaActors(nrOfMessages, nrOfActors, 1000 * 20) + var akkaTime = stressTestAkkaActors(nrOfMessages, nrOfActors, 1000 * 30) var scalaTime = stressTestScalaActors(nrOfMessages, nrOfActors, 1000 * 40) var ratio: Double = scalaTime.toDouble / akkaTime.toDouble diff --git a/akka-core/src/test/scala/RemoteClientShutdownTest.scala b/akka-core/src/test/scala/RemoteClientShutdownTest.scala index f6fbea1bb9..d330dce5ce 100644 --- a/akka-core/src/test/scala/RemoteClientShutdownTest.scala +++ b/akka-core/src/test/scala/RemoteClientShutdownTest.scala @@ -6,7 +6,7 @@ import Actor.Sender.Self import org.scalatest.junit.JUnitSuite import org.junit.Test - +/* class RemoteClientShutdownTest extends JUnitSuite { @Test def shouldShutdownRemoteClient = { RemoteNode.start("localhost", 9999) @@ -28,3 +28,4 @@ class TravelingActor extends RemoteActor("localhost", 9999) { case _ => log.info("message received") } } +*/ \ No newline at end of file diff --git a/akka-core/src/test/scala/RemoteSupervisorTest.scala b/akka-core/src/test/scala/RemoteSupervisorTest.scala index 6009f0d5e5..841e4a996a 100644 --- a/akka-core/src/test/scala/RemoteSupervisorTest.scala +++ b/akka-core/src/test/scala/RemoteSupervisorTest.scala @@ -79,7 +79,8 @@ object BinaryString{ class RemoteSupervisorTest extends JUnitSuite { import Actor.Sender.Self - se.scalablesolutions.akka.Config.config + akka.config.Config.config + new Thread(new Runnable() { def run = { RemoteNode.start diff --git a/akka-core/src/test/scala/ServerInitiatedRemoteActorTest.scala b/akka-core/src/test/scala/ServerInitiatedRemoteActorTest.scala index ab4f0d2cdd..22ea078b1e 100644 --- a/akka-core/src/test/scala/ServerInitiatedRemoteActorTest.scala +++ b/akka-core/src/test/scala/ServerInitiatedRemoteActorTest.scala @@ -58,8 +58,9 @@ object ServerInitiatedRemoteActorTest { class ServerInitiatedRemoteActorTest extends JUnitSuite { import ServerInitiatedRemoteActorTest._ - - se.scalablesolutions.akka.Config.config + + import Actor.Sender.Self + akka.config.Config.config private val unit = TimeUnit.MILLISECONDS diff --git a/akka-core/src/test/scala/ShutdownSpec.scala b/akka-core/src/test/scala/ShutdownSpec.scala index ba03fbe902..20927bbfb1 100644 --- a/akka-core/src/test/scala/ShutdownSpec.scala +++ b/akka-core/src/test/scala/ShutdownSpec.scala @@ -2,9 +2,8 @@ package se.scalablesolutions.akka.remote import se.scalablesolutions.akka.actor.Actor -object ActorShutdownSpec { +object ActorShutdownRunner { def main(args: Array[String]) { - class MyActor extends Actor { def receive = { case "test" => println("received test") @@ -22,7 +21,7 @@ object ActorShutdownSpec { // case 2 -object RemoteServerAndClusterShutdownSpec { +object RemoteServerAndClusterShutdownRunner { def main(args: Array[String]) { val s1 = new RemoteServer val s2 = new RemoteServer diff --git a/akka-core/src/test/scala/ThreadBasedDispatcherTest.scala b/akka-core/src/test/scala/ThreadBasedDispatcherTest.scala index b9663352c7..c848c56991 100644 --- a/akka-core/src/test/scala/ThreadBasedDispatcherTest.scala +++ b/akka-core/src/test/scala/ThreadBasedDispatcherTest.scala @@ -78,7 +78,7 @@ class ThreadBasedDispatcherTest extends JUnitSuite { }) dispatcher.start for (i <- 0 until 100) { - dispatcher.dispatch(new MessageInvocation(key1, new Integer(i), None, None, None)) + dispatcher.dispatch(new MessageInvocation(key1, i, None, None, None)) } assert(handleLatch.await(5, TimeUnit.SECONDS)) assert(!threadingIssueDetected.get) diff --git a/akka-fun-test-java/pom.xml b/akka-fun-test-java/pom.xml index beb19f25c5..c4851757c1 100644 --- a/akka-fun-test-java/pom.xml +++ b/akka-fun-test-java/pom.xml @@ -5,31 +5,43 @@ Akka Functional Tests in Java akka-fun-test-java - + se.scalablesolutions.akka + 0.7 jar - - akka - se.scalablesolutions.akka - 0.7-SNAPSHOT - + + 2.7.7 + 0.5.2 + 1.1.5 + 1.9.18-i + - akka-kernel - ${project.groupId} - ${project.version} + se.scalablesolutions.akka + akka-kernel_2.7.7 + 0.7 - ${project.groupId} - akka-persistence-cassandra - ${project.version} + se.scalablesolutions.akka + akka-persistence-cassandra_2.7.7 + 0.7 com.google.protobuf protobuf-java 2.2.0 + + org.codehaus.jackson + jackson-core-asl + 1.2.1 + + + org.codehaus.jackson + jackson-mapper-asl + 1.2.1 + com.sun.grizzly grizzly-servlet-webserver @@ -94,7 +106,6 @@ maven-surefire-plugin - **/InMemNestedStateTest* **/*Persistent* @@ -117,7 +128,4 @@ - - - diff --git a/akka-fun-test-java/src/test/java/se/scalablesolutions/akka/api/ActiveObjectGuiceConfiguratorTest.java b/akka-fun-test-java/src/test/java/se/scalablesolutions/akka/api/ActiveObjectGuiceConfiguratorTest.java index d328f2452d..69f74ec537 100644 --- a/akka-fun-test-java/src/test/java/se/scalablesolutions/akka/api/ActiveObjectGuiceConfiguratorTest.java +++ b/akka-fun-test-java/src/test/java/se/scalablesolutions/akka/api/ActiveObjectGuiceConfiguratorTest.java @@ -9,7 +9,7 @@ import com.google.inject.Scopes; import junit.framework.TestCase; -import se.scalablesolutions.akka.Config; +import se.scalablesolutions.akka.config.Config; import se.scalablesolutions.akka.config.ActiveObjectConfigurator; import static se.scalablesolutions.akka.config.JavaConfig.*; import se.scalablesolutions.akka.dispatch.*; diff --git a/akka-fun-test-java/src/test/java/se/scalablesolutions/akka/api/Bar.java b/akka-fun-test-java/src/test/java/se/scalablesolutions/akka/api/Bar.java index 9a3ff80aca..3d85d89a17 100644 --- a/akka-fun-test-java/src/test/java/se/scalablesolutions/akka/api/Bar.java +++ b/akka-fun-test-java/src/test/java/se/scalablesolutions/akka/api/Bar.java @@ -1,6 +1,6 @@ package se.scalablesolutions.akka.api; -import se.scalablesolutions.akka.annotation.oneway; +import se.scalablesolutions.akka.actor.annotation.oneway; public interface Bar { @oneway diff --git a/akka-fun-test-java/src/test/java/se/scalablesolutions/akka/api/Foo.java b/akka-fun-test-java/src/test/java/se/scalablesolutions/akka/api/Foo.java index bb9cfd83d4..962f0b9424 100644 --- a/akka-fun-test-java/src/test/java/se/scalablesolutions/akka/api/Foo.java +++ b/akka-fun-test-java/src/test/java/se/scalablesolutions/akka/api/Foo.java @@ -1,7 +1,7 @@ package se.scalablesolutions.akka.api; import com.google.inject.Inject; -import se.scalablesolutions.akka.annotation.oneway; +import se.scalablesolutions.akka.actor.annotation.oneway; public class Foo extends se.scalablesolutions.akka.serialization.Serializable.JavaJSON { @Inject diff --git a/akka-fun-test-java/src/test/java/se/scalablesolutions/akka/api/InMemNestedStateTest.java b/akka-fun-test-java/src/test/java/se/scalablesolutions/akka/api/InMemNestedStateTest.java index 8a51feed6b..992c188fa1 100644 --- a/akka-fun-test-java/src/test/java/se/scalablesolutions/akka/api/InMemNestedStateTest.java +++ b/akka-fun-test-java/src/test/java/se/scalablesolutions/akka/api/InMemNestedStateTest.java @@ -4,14 +4,14 @@ package se.scalablesolutions.akka.api; -import se.scalablesolutions.akka.Config; import se.scalablesolutions.akka.config.*; +import se.scalablesolutions.akka.config.Config; import se.scalablesolutions.akka.config.ActiveObjectConfigurator; import static se.scalablesolutions.akka.config.JavaConfig.*; import se.scalablesolutions.akka.actor.*; -import se.scalablesolutions.akka.Kernel; +import se.scalablesolutions.akka.kernel.Kernel; import junit.framework.TestCase; -/* + public class InMemNestedStateTest extends TestCase { static String messageLog = ""; @@ -133,4 +133,3 @@ public class InMemNestedStateTest extends TestCase { assertEquals("init", nested.getRefState()); // check that state is == init state } } -*/ \ No newline at end of file diff --git a/akka-fun-test-java/src/test/java/se/scalablesolutions/akka/api/InMemStateful.java b/akka-fun-test-java/src/test/java/se/scalablesolutions/akka/api/InMemStateful.java index 60b2008716..afe2f2e232 100644 --- a/akka-fun-test-java/src/test/java/se/scalablesolutions/akka/api/InMemStateful.java +++ b/akka-fun-test-java/src/test/java/se/scalablesolutions/akka/api/InMemStateful.java @@ -1,10 +1,10 @@ package se.scalablesolutions.akka.api; -import se.scalablesolutions.akka.annotation.transactionrequired; -import se.scalablesolutions.akka.annotation.prerestart; -import se.scalablesolutions.akka.annotation.postrestart; -import se.scalablesolutions.akka.annotation.inittransactionalstate; -import se.scalablesolutions.akka.state.*; +import se.scalablesolutions.akka.actor.annotation.transactionrequired; +import se.scalablesolutions.akka.actor.annotation.prerestart; +import se.scalablesolutions.akka.actor.annotation.postrestart; +import se.scalablesolutions.akka.actor.annotation.inittransactionalstate; +import se.scalablesolutions.akka.stm.*; @transactionrequired public class InMemStateful { diff --git a/akka-fun-test-java/src/test/java/se/scalablesolutions/akka/api/InMemStatefulNested.java b/akka-fun-test-java/src/test/java/se/scalablesolutions/akka/api/InMemStatefulNested.java index abaedf8ae9..932dc2c162 100644 --- a/akka-fun-test-java/src/test/java/se/scalablesolutions/akka/api/InMemStatefulNested.java +++ b/akka-fun-test-java/src/test/java/se/scalablesolutions/akka/api/InMemStatefulNested.java @@ -1,8 +1,8 @@ package se.scalablesolutions.akka.api; -import se.scalablesolutions.akka.annotation.transactionrequired; -import se.scalablesolutions.akka.annotation.inittransactionalstate; -import se.scalablesolutions.akka.state.*; +import se.scalablesolutions.akka.actor.annotation.transactionrequired; +import se.scalablesolutions.akka.actor.annotation.inittransactionalstate; +import se.scalablesolutions.akka.stm.*; @transactionrequired public class InMemStatefulNested { diff --git a/akka-fun-test-java/src/test/java/se/scalablesolutions/akka/api/InMemoryStateTest.java b/akka-fun-test-java/src/test/java/se/scalablesolutions/akka/api/InMemoryStateTest.java index aa2704685f..740bfd892c 100644 --- a/akka-fun-test-java/src/test/java/se/scalablesolutions/akka/api/InMemoryStateTest.java +++ b/akka-fun-test-java/src/test/java/se/scalablesolutions/akka/api/InMemoryStateTest.java @@ -6,14 +6,14 @@ package se.scalablesolutions.akka.api; import junit.framework.TestCase; -import se.scalablesolutions.akka.Config; +import se.scalablesolutions.akka.config.Config; import se.scalablesolutions.akka.config.*; import se.scalablesolutions.akka.config.ActiveObjectConfigurator; import static se.scalablesolutions.akka.config.JavaConfig.*; import se.scalablesolutions.akka.actor.*; -import se.scalablesolutions.akka.Kernel; +import se.scalablesolutions.akka.kernel.Kernel; public class InMemoryStateTest extends TestCase { static String messageLog = ""; diff --git a/akka-fun-test-java/src/test/java/se/scalablesolutions/akka/api/PersistenceManager.java b/akka-fun-test-java/src/test/java/se/scalablesolutions/akka/api/PersistenceManager.java index cd856b64df..080c1cbd0b 100644 --- a/akka-fun-test-java/src/test/java/se/scalablesolutions/akka/api/PersistenceManager.java +++ b/akka-fun-test-java/src/test/java/se/scalablesolutions/akka/api/PersistenceManager.java @@ -4,7 +4,7 @@ public class PersistenceManager { private static volatile boolean isRunning = false; public static void init() { if (!isRunning) { - se.scalablesolutions.akka.Kernel$.MODULE$.startRemoteService(); + se.scalablesolutions.akka.kernel.Kernel$.MODULE$.startRemoteService(); isRunning = true; } } diff --git a/akka-fun-test-java/src/test/java/se/scalablesolutions/akka/api/PersistentClasher.java b/akka-fun-test-java/src/test/java/se/scalablesolutions/akka/api/PersistentClasher.java index d5360da3bc..d5c1bdf00c 100644 --- a/akka-fun-test-java/src/test/java/se/scalablesolutions/akka/api/PersistentClasher.java +++ b/akka-fun-test-java/src/test/java/se/scalablesolutions/akka/api/PersistentClasher.java @@ -1,7 +1,8 @@ package se.scalablesolutions.akka.api; -import se.scalablesolutions.akka.state.*; -import se.scalablesolutions.akka.annotation.inittransactionalstate; +import se.scalablesolutions.akka.persistence.common.*; +import se.scalablesolutions.akka.persistence.cassandra.*; +import se.scalablesolutions.akka.actor.annotation.inittransactionalstate; public class PersistentClasher { private PersistentMap state; diff --git a/akka-fun-test-java/src/test/java/se/scalablesolutions/akka/api/PersistentNestedStateTest.java b/akka-fun-test-java/src/test/java/se/scalablesolutions/akka/api/PersistentNestedStateTest.java index 7fd3a65dfb..796d3d913a 100644 --- a/akka-fun-test-java/src/test/java/se/scalablesolutions/akka/api/PersistentNestedStateTest.java +++ b/akka-fun-test-java/src/test/java/se/scalablesolutions/akka/api/PersistentNestedStateTest.java @@ -8,7 +8,7 @@ import se.scalablesolutions.akka.config.*; import se.scalablesolutions.akka.config.ActiveObjectConfigurator; import static se.scalablesolutions.akka.config.JavaConfig.*; import se.scalablesolutions.akka.actor.*; - import se.scalablesolutions.akka.Kernel; +import se.scalablesolutions.akka.kernel.Kernel; import junit.framework.TestCase; diff --git a/akka-fun-test-java/src/test/java/se/scalablesolutions/akka/api/PersistentStateful.java b/akka-fun-test-java/src/test/java/se/scalablesolutions/akka/api/PersistentStateful.java index 3cac0ae062..6a8d3353b7 100644 --- a/akka-fun-test-java/src/test/java/se/scalablesolutions/akka/api/PersistentStateful.java +++ b/akka-fun-test-java/src/test/java/se/scalablesolutions/akka/api/PersistentStateful.java @@ -1,8 +1,9 @@ package se.scalablesolutions.akka.api; -import se.scalablesolutions.akka.annotation.inittransactionalstate; -import se.scalablesolutions.akka.annotation.transactionrequired; -import se.scalablesolutions.akka.state.*; +import se.scalablesolutions.akka.actor.annotation.inittransactionalstate; +import se.scalablesolutions.akka.actor.annotation.transactionrequired; +import se.scalablesolutions.akka.persistence.common.*; +import se.scalablesolutions.akka.persistence.cassandra.*; @transactionrequired public class PersistentStateful { diff --git a/akka-fun-test-java/src/test/java/se/scalablesolutions/akka/api/PersistentStatefulNested.java b/akka-fun-test-java/src/test/java/se/scalablesolutions/akka/api/PersistentStatefulNested.java index 50e9b7ae1d..bd931ef108 100644 --- a/akka-fun-test-java/src/test/java/se/scalablesolutions/akka/api/PersistentStatefulNested.java +++ b/akka-fun-test-java/src/test/java/se/scalablesolutions/akka/api/PersistentStatefulNested.java @@ -1,8 +1,9 @@ package se.scalablesolutions.akka.api; -import se.scalablesolutions.akka.annotation.inittransactionalstate; -import se.scalablesolutions.akka.annotation.transactionrequired; -import se.scalablesolutions.akka.state.*; +import se.scalablesolutions.akka.actor.annotation.inittransactionalstate; +import se.scalablesolutions.akka.actor.annotation.transactionrequired; +import se.scalablesolutions.akka.persistence.common.*; +import se.scalablesolutions.akka.persistence.cassandra.*; @transactionrequired public class PersistentStatefulNested { diff --git a/akka-fun-test-java/src/test/java/se/scalablesolutions/akka/api/RemoteInMemoryStateTest.java b/akka-fun-test-java/src/test/java/se/scalablesolutions/akka/api/RemoteInMemoryStateTest.java index d2f67e4bc7..d0c22470e2 100644 --- a/akka-fun-test-java/src/test/java/se/scalablesolutions/akka/api/RemoteInMemoryStateTest.java +++ b/akka-fun-test-java/src/test/java/se/scalablesolutions/akka/api/RemoteInMemoryStateTest.java @@ -4,7 +4,7 @@ package se.scalablesolutions.akka.api; -import se.scalablesolutions.akka.Config; +import se.scalablesolutions.akka.config.Config; import se.scalablesolutions.akka.actor.ActiveObject; import se.scalablesolutions.akka.config.ActiveObjectConfigurator; import se.scalablesolutions.akka.remote.RemoteNode; diff --git a/akka-kernel/pom.xml b/akka-kernel/pom.xml deleted file mode 100644 index b553cb7a79..0000000000 --- a/akka-kernel/pom.xml +++ /dev/null @@ -1,136 +0,0 @@ - - 4.0.0 - - akka-kernel - Akka Kernel Module - - jar - - - akka - se.scalablesolutions.akka - 0.7-SNAPSHOT - - - - - - akka-rest - ${project.groupId} - ${project.version} - - - akka-amqp - ${project.groupId} - ${project.version} - - - akka-security - ${project.groupId} - ${project.version} - - - akka-persistence-cassandra - ${project.groupId} - ${project.version} - - - akka-persistence-mongo - ${project.groupId} - ${project.version} - - - - akka-comet - ${project.groupId} - ${project.version} - - - akka-cluster-jgroups - ${project.groupId} - ${project.version} - - - - - - com.sun.jersey - jersey-server - ${jersey.version} - - - org.atmosphere - atmosphere-annotations - ${atmosphere.version} - - - org.atmosphere - atmosphere-jersey - ${atmosphere.version} - - - org.atmosphere - atmosphere-runtime - ${atmosphere.version} - - - - - - - org.apache.maven.plugins - maven-shade-plugin - 1.2.1 - - - install - - shade - - - - - junit:junit - - - - - - - se.scalablesolutions.akka.Main - - - - - - - - maven-antrun-plugin - - - install - - - - - - - run - - - - - - - diff --git a/akka-kernel/src/main/scala/Kernel.scala b/akka-kernel/src/main/scala/Kernel.scala index f63a50a0a7..6c0cd87058 100644 --- a/akka-kernel/src/main/scala/Kernel.scala +++ b/akka-kernel/src/main/scala/Kernel.scala @@ -2,11 +2,14 @@ * Copyright (C) 2009-2010 Scalable Solutions AB */ -package se.scalablesolutions.akka +package se.scalablesolutions.akka.kernel import se.scalablesolutions.akka.remote.BootableRemoteActorService +import se.scalablesolutions.akka.comet.BootableCometActorService import se.scalablesolutions.akka.actor.BootableActorLoaderService -import se.scalablesolutions.akka.util.{Logging,Bootable} +import se.scalablesolutions.akka.camel.service.CamelService +import se.scalablesolutions.akka.config.Config +import se.scalablesolutions.akka.util.{Logging, Bootable} import javax.servlet.{ServletContextListener, ServletContextEvent} @@ -27,12 +30,16 @@ object Kernel extends Logging { /** * Holds a reference to the services that has been booted */ - @volatile private var bundles : Option[Bootable] = None + @volatile private var bundles: Option[Bootable] = None /** - * Boots up the Kernel with default bootables + * Boots up the Kernel with default bootables */ - def boot : Unit = boot(true, new BootableActorLoaderService with BootableRemoteActorService with BootableCometActorService) + def boot: Unit = boot(true, + new BootableActorLoaderService + with BootableRemoteActorService + with BootableCometActorService + with CamelService) /** * Boots up the Kernel. @@ -63,8 +70,8 @@ object Kernel extends Logging { } //For testing purposes only - def startRemoteService : Unit = bundles.foreach( _ match { - case x : BootableRemoteActorService => x.startRemoteService + def startRemoteService: Unit = bundles.foreach( _ match { + case x: BootableRemoteActorService => x.startRemoteService case _ => }) @@ -79,16 +86,18 @@ object Kernel extends Logging { (____ /__|_ \__|_ \(____ / \/ \/ \/ \/ """) - log.info(" Running version %s", Config.VERSION) + log.info(" Running version %s", Config.VERSION) log.info("==============================") } } - /* - And this one can be added to web.xml mappings as a listener to boot and shutdown Akka - */ - + /** + * This class can be added to web.xml mappings as a listener to boot and shutdown Akka. + */ class Kernel extends ServletContextListener { - def contextDestroyed(e : ServletContextEvent) : Unit = Kernel.shutdown - def contextInitialized(e : ServletContextEvent) : Unit = Kernel.boot(true,new BootableActorLoaderService with BootableRemoteActorService) + def contextDestroyed(e: ServletContextEvent): Unit = + Kernel.shutdown + + def contextInitialized(e: ServletContextEvent): Unit = + Kernel.boot(true, new BootableActorLoaderService with BootableRemoteActorService) } \ No newline at end of file diff --git a/akka-patterns/pom.xml b/akka-patterns/pom.xml deleted file mode 100644 index c4f528c20f..0000000000 --- a/akka-patterns/pom.xml +++ /dev/null @@ -1,39 +0,0 @@ - - 4.0.0 - - akka-patterns - Akka Patterns Module - - jar - - - akka - se.scalablesolutions.akka - 0.7-SNAPSHOT - - - - - - akka-core - ${project.groupId} - ${project.version} - - - - - org.scalatest - scalatest - ${scalatest.version} - test - - - junit - junit - 4.5 - test - - - diff --git a/akka-patterns/src/main/scala/Agent.scala b/akka-patterns/src/main/scala/Agent.scala deleted file mode 100644 index 4dd8640c32..0000000000 --- a/akka-patterns/src/main/scala/Agent.scala +++ /dev/null @@ -1,146 +0,0 @@ -// ScalaAgent -// -// Copyright © 2008-9 The original author or authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package se.scalablesolutions.akka.actor - -import se.scalablesolutions.akka.state.TransactionalState -import se.scalablesolutions.akka.stm.Transaction.atomic - -import java.util.concurrent.atomic.AtomicReference -import java.util.concurrent.{CountDownLatch} - -/** -* The Agent class was strongly inspired by the agent principle in Clojure. Essentially, an agent wraps a shared mutable state -* and hides it behind a message-passing interface. Agents accept messages and process them on behalf of the wrapped state. -* Typically agents accept functions / commands as messages and ensure the submitted commands are executed against the internal -* agent's state in a thread-safe manner (sequentially). -* The submitted functions / commands take the internal state as a parameter and their output becomes the new internal state value. -* The code that is submitted to an agent doesn't need to pay attention to threading or synchronization, the agent will -* provide such guarantees by itself. -* See the examples of use for more details. -* -* @author Vaclav Pech -* Date: Oct 18, 2009 -* -* AKKA retrofit by -* @author Viktor Klang -* Date: Jan 24 2010 -*/ -sealed class Agent[T] private (initialValue: T) extends Actor { - import Agent._ - - private val value = TransactionalState.newRef[T] - - updateData(initialValue) - - /** - * Periodically handles incoming messages - */ - def receive = { - case FunctionHolder(fun: (T => T)) => atomic { updateData(fun(value.getOrWait)) } - - case ValueHolder(x: T) => updateData(x) - - case ProcedureHolder(fun: (T => Unit)) => atomic { fun(copyStrategy(value.getOrWait)) } - } - - /** - * Specifies how a copy of the value is made, defaults to using identity - */ - protected def copyStrategy(t : T) : T = t - - - /** - * Updates the internal state with the value provided as a by-name parameter - */ - private final def updateData(newData: => T) : Unit = atomic { value.swap(newData) } - - /** - * Submits a request to read the internal state. - * A copy of the internal state will be returned, depending on the underlying effective copyStrategy. - * Internally leverages the asynchronous getValue() method and then waits for its result on a CountDownLatch. - */ - final def get : T = { - val ref = new AtomicReference[T] - val latch = new CountDownLatch(1) - get((x: T) => {ref.set(x); latch.countDown}) - latch.await - ref.get - } - - /** - * Asynchronously submits a request to read the internal state. The supplied function will be executed on the returned internal state value. - * A copy of the internal state will be used, depending on the underlying effective copyStrategy. - */ - final def get(message: (T => Unit)) : Unit = this ! ProcedureHolder(message) - - /** - * Submits a request to read the internal state. - * A copy of the internal state will be returned, depending on the underlying effective copyStrategy. - * Internally leverages the asynchronous getValue() method and then waits for its result on a CountDownLatch. - */ - final def apply() : T = get - - /** - * Asynchronously submits a request to read the internal state. The supplied function will be executed on the returned internal state value. - * A copy of the internal state will be used, depending on the underlying effective copyStrategy. - */ -// final def apply(message: (T => Unit)) : Unit = get(message) - - /** - * Submits the provided function for execution against the internal agent's state - */ - final def apply(message: (T => T)) : Unit = this ! FunctionHolder(message) - - /** - * Submits a new value to be set as the new agent's internal state - */ - final def apply(message: T) : Unit = this ! ValueHolder(message) - - /** - * Submits the provided function for execution against the internal agent's state - */ - final def update(message: (T => T)) : Unit = this ! FunctionHolder(message) - - /** - * Submits a new value to be set as the new agent's internal state - */ - final def update(message: T) : Unit = this ! ValueHolder(message) -} - -/** -* Provides factory methods to create Agents. -*/ -object Agent { - /** - * The internal messages for passing around requests - */ - private case class ProcedureHolder[T](val fun: ((T) => Unit)) - private case class FunctionHolder[T](val fun: ((T) => T)) - private case class ValueHolder[T](val value: T) - - /** - * Creates a new Agent of type T with the initial value of value - */ - def apply[T](value:T) : Agent[T] = new Agent(value) - - /** - * Creates a new Agent of type T with the initial value of value and with the specified copy function - */ - def apply[T](value:T, newCopyStrategy: (T) => T) = new Agent(value) { - override def copyStrategy(t : T) = newCopyStrategy(t) - } -} diff --git a/akka-patterns/src/main/scala/Patterns.scala b/akka-patterns/src/main/scala/Patterns.scala index b967c07df7..3b7982148e 100644 --- a/akka-patterns/src/main/scala/Patterns.scala +++ b/akka-patterns/src/main/scala/Patterns.scala @@ -1,16 +1,16 @@ -package se.scalablesolutions.akka.actor.patterns +package se.scalablesolutions.akka.patterns import se.scalablesolutions.akka.actor.Actor object Patterns { - type PF[A,B] = PartialFunction[A,B] + type PF[A, B] = PartialFunction[A, B] /** * Creates a new PartialFunction whose isDefinedAt is a combination * of the two parameters, and whose apply is first to call filter.apply and then filtered.apply */ - def filter[A,B](filter : PF[A,Unit],filtered : PF[A,B]) : PF[A,B] = { - case a : A if filtered.isDefinedAt(a) && filter.isDefinedAt(a) => + def filter[A, B](filter: PF[A, Unit], filtered: PF[A, B]): PF[A, B] = { + case a: A if filtered.isDefinedAt(a) && filter.isDefinedAt(a) => filter(a) filtered(a) } @@ -18,61 +18,58 @@ object Patterns { /** * Interceptor is a filter(x,y) where x.isDefinedAt is considered to be always true */ - def intercept[A,B](interceptor : (A) => Unit, interceptee : PF[A,B]) : PF[A,B] = filter( - { case a if a.isInstanceOf[A] => interceptor(a) }, - interceptee - ) - + def intercept[A, B](interceptor: (A) => Unit, interceptee: PF[A, B]): PF[A, B] = + filter({case a if a.isInstanceOf[A] => interceptor(a)}, interceptee) + //FIXME 2.8, use default params with CyclicIterator - def loadBalancerActor(actors : => InfiniteIterator[Actor]) : Actor = new Actor with LoadBalancer { + def loadBalancerActor(actors: => InfiniteIterator[Actor]): Actor = new Actor with LoadBalancer { val seq = actors } - def dispatcherActor(routing : PF[Any,Actor], msgTransformer : (Any) => Any) : Actor = new Actor with Dispatcher { - override def transform(msg : Any) = msgTransformer(msg) + def dispatcherActor(routing: PF[Any, Actor], msgTransformer: (Any) => Any): Actor = + new Actor with Dispatcher { + override def transform(msg: Any) = msgTransformer(msg) def routes = routing } - - def dispatcherActor(routing : PF[Any,Actor]) : Actor = new Actor with Dispatcher { - def routes = routing + + def dispatcherActor(routing: PF[Any, Actor]): Actor = new Actor with Dispatcher { + def routes = routing } - def loggerActor(actorToLog : Actor, logger : (Any) => Unit) : Actor = dispatcherActor ( - { case _ => actorToLog }, - logger - ) + def loggerActor(actorToLog: Actor, logger: (Any) => Unit): Actor = + dispatcherActor({case _ => actorToLog}, logger) } -trait Dispatcher { self : Actor => +trait Dispatcher { self: Actor => - protected def transform(msg : Any) : Any = msg - protected def routes : PartialFunction[Any,Actor] - - protected def dispatch : PartialFunction[Any,Unit] = { - case a if routes.isDefinedAt(a) => { - if(self.sender.isDefined) - routes(a) forward transform(a) - else - routes(a) send transform(a) - } + protected def transform(msg: Any): Any = msg + + protected def routes: PartialFunction[Any, Actor] + + protected def dispatch: PartialFunction[Any, Unit] = { + case a if routes.isDefinedAt(a) => + if (self.sender.isDefined) routes(a) forward transform(a) + else routes(a) send transform(a) } def receive = dispatch } -trait LoadBalancer extends Dispatcher { self : Actor => - protected def seq : InfiniteIterator[Actor] +trait LoadBalancer extends Dispatcher { self: Actor => + protected def seq: InfiniteIterator[Actor] protected def routes = { case x if seq.hasNext => seq.next } } trait InfiniteIterator[T] extends Iterator[T] -class CyclicIterator[T](items : List[T]) extends InfiniteIterator[T] { - @volatile private[this] var current : List[T] = items +class CyclicIterator[T](items: List[T]) extends InfiniteIterator[T] { + @volatile private[this] var current: List[T] = items + def hasNext = items != Nil + def next = { - val nc = if(current == Nil) items else current + val nc = if (current == Nil) items else current current = nc.tail nc.head } diff --git a/akka-patterns/src/test/scala/ActorPatternsTest.scala b/akka-patterns/src/test/scala/ActorPatternsTest.scala index 800feda277..3019af0436 100644 --- a/akka-patterns/src/test/scala/ActorPatternsTest.scala +++ b/akka-patterns/src/test/scala/ActorPatternsTest.scala @@ -1,11 +1,11 @@ -package se.scalablesolutions.akka.actor - +package se.scalablesolutions.akka.patterns import se.scalablesolutions.akka.config.ScalaConfig._ +import se.scalablesolutions.akka.actor.Actor +import se.scalablesolutions.akka.actor.Actor._ +import se.scalablesolutions.akka.util.Logging import org.scalatest.Suite -import patterns.Patterns -import se.scalablesolutions.akka.util.Logging import org.junit.runner.RunWith import org.scalatest.junit.JUnitRunner import org.scalatest.matchers.MustMatchers @@ -14,19 +14,18 @@ import scala.collection.mutable.HashSet @RunWith(classOf[JUnitRunner]) class ActorPatternsTest extends junit.framework.TestCase with Suite with MustMatchers with ActorTestUtil with Logging { - import Actor._ import Patterns._ @Test def testDispatcher = verify(new TestActor { def test = { val (testMsg1,testMsg2,testMsg3,testMsg4) = ("test1","test2","test3","test4") var targetOk = 0 - val t1 = actor() receive { + val t1: Actor = actor { case `testMsg1` => targetOk += 2 case `testMsg2` => targetOk += 4 } - val t2 = actor() receive { + val t2: Actor = actor { case `testMsg3` => targetOk += 8 } @@ -48,7 +47,7 @@ class ActorPatternsTest extends junit.framework.TestCase with Suite with MustMat @Test def testLogger = verify(new TestActor { def test = { val msgs = new HashSet[Any] - val t1 = actor() receive { + val t1: Actor = actor { case _ => } val l = loggerActor(t1,(x) => msgs += x) diff --git a/akka-patterns/src/test/scala/AgentTest.scala b/akka-patterns/src/test/scala/AgentTest.scala deleted file mode 100644 index 17ccce8e0a..0000000000 --- a/akka-patterns/src/test/scala/AgentTest.scala +++ /dev/null @@ -1,24 +0,0 @@ -package se.scalablesolutions.akka.actor - -import org.scalatest.Suite -import se.scalablesolutions.akka.util.Logging -import org.junit.runner.RunWith -import org.scalatest.junit.JUnitRunner -import org.scalatest.matchers.MustMatchers -import org.junit.{Test} - -@RunWith(classOf[JUnitRunner]) -class AgentTest extends junit.framework.TestCase with Suite with MustMatchers with ActorTestUtil with Logging { - @Test def testAgent = verify(new TestActor { - def test = { - val t = Agent(5) - handle(t){ - t.update( _ + 1 ) - t.update( _ * 2 ) - - val r = t() - r must be (12) - } - } - }) -} \ No newline at end of file diff --git a/akka-persistence/akka-persistence-cassandra/pom.xml b/akka-persistence/akka-persistence-cassandra/pom.xml deleted file mode 100644 index d8490382d5..0000000000 --- a/akka-persistence/akka-persistence-cassandra/pom.xml +++ /dev/null @@ -1,86 +0,0 @@ - - 4.0.0 - - akka-persistence-cassandra - Akka Persistence Cassandra Module - - jar - - - akka-persistence-parent - se.scalablesolutions.akka - 0.7-SNAPSHOT - - - - - akka-persistence-common - ${project.groupId} - ${project.version} - - - com.google.code.google-collections - google-collect - - - - - - - org.apache.cassandra - cassandra - 0.5.0 - - - org.apache.cassandra - high-scale-lib - 0.5.0 - test - - - org.apache.cassandra - clhm-production - 0.5.0 - test - - - com.google.collections - google-collections - 1.0-rc1 - test - - - commons-collections - commons-collections - 3.2.1 - test - - - commons-lang - commons-lang - 2.4 - test - - - org.slf4j - slf4j-api - 1.5.8 - test - - - org.slf4j - slf4j-log4j12 - 1.5.8 - test - - - - - log4j - log4j - 1.2.13 - - - - diff --git a/akka-persistence/akka-persistence-cassandra/src/main/scala/CassandraSession.scala b/akka-persistence/akka-persistence-cassandra/src/main/scala/CassandraSession.scala index 71b334be0a..db831d318b 100644 --- a/akka-persistence/akka-persistence-cassandra/src/main/scala/CassandraSession.scala +++ b/akka-persistence/akka-persistence-cassandra/src/main/scala/CassandraSession.scala @@ -2,14 +2,15 @@ * Copyright (C) 2009-2010 Scalable Solutions AB */ -package se.scalablesolutions.akka.state +package se.scalablesolutions.akka.persistence.cassandra import java.io.{Flushable, Closeable} +import se.scalablesolutions.akka.persistence.common._ import se.scalablesolutions.akka.util.Logging import se.scalablesolutions.akka.util.Helpers._ import se.scalablesolutions.akka.serialization.Serializer -import se.scalablesolutions.akka.Config.config +import se.scalablesolutions.akka.config.Config.config import scala.collection.mutable.Map diff --git a/akka-persistence/akka-persistence-cassandra/src/main/scala/CassandraStorage.scala b/akka-persistence/akka-persistence-cassandra/src/main/scala/CassandraStorage.scala index 59a27963f4..be5fc4f4c7 100644 --- a/akka-persistence/akka-persistence-cassandra/src/main/scala/CassandraStorage.scala +++ b/akka-persistence/akka-persistence-cassandra/src/main/scala/CassandraStorage.scala @@ -2,16 +2,18 @@ * Copyright (C) 2009-2010 Scalable Solutions AB */ -package se.scalablesolutions.akka.state +package se.scalablesolutions.akka.persistence.cassandra -import org.codehaus.aspectwerkz.proxy.Uuid +import se.scalablesolutions.akka.util.UUID +import se.scalablesolutions.akka.stm._ +import se.scalablesolutions.akka.persistence.common._ object CassandraStorage extends Storage { type ElementType = Array[Byte] - def newMap: PersistentMap[ElementType, ElementType] = newMap(Uuid.newUuid.toString) - def newVector: PersistentVector[ElementType] = newVector(Uuid.newUuid.toString) - def newRef: PersistentRef[ElementType] = newRef(Uuid.newUuid.toString) + def newMap: PersistentMap[ElementType, ElementType] = newMap(UUID.newUuid.toString) + def newVector: PersistentVector[ElementType] = newVector(UUID.newUuid.toString) + def newRef: PersistentRef[ElementType] = newRef(UUID.newUuid.toString) def getMap(id: String): PersistentMap[ElementType, ElementType] = newMap(id) def getVector(id: String): PersistentVector[ElementType] = newVector(id) diff --git a/akka-persistence/akka-persistence-cassandra/src/main/scala/CassandraStorageBackend.scala b/akka-persistence/akka-persistence-cassandra/src/main/scala/CassandraStorageBackend.scala index d3c011ef79..8e91753211 100644 --- a/akka-persistence/akka-persistence-cassandra/src/main/scala/CassandraStorageBackend.scala +++ b/akka-persistence/akka-persistence-cassandra/src/main/scala/CassandraStorageBackend.scala @@ -2,11 +2,13 @@ * Copyright (C) 2009-2010 Scalable Solutions AB */ -package se.scalablesolutions.akka.state +package se.scalablesolutions.akka.persistence.cassandra +import se.scalablesolutions.akka.stm._ +import se.scalablesolutions.akka.persistence.common._ import se.scalablesolutions.akka.util.Logging import se.scalablesolutions.akka.util.Helpers._ -import se.scalablesolutions.akka.Config.config +import se.scalablesolutions.akka.config.Config.config import org.apache.cassandra.service._ diff --git a/akka-persistence/akka-persistence-cassandra/src/test/scala/CassandraPersistentActorSpec.scala b/akka-persistence/akka-persistence-cassandra/src/test/scala/CassandraPersistentActorSpec.scala index 0e232f5ce9..46d1b48a2d 100644 --- a/akka-persistence/akka-persistence-cassandra/src/test/scala/CassandraPersistentActorSpec.scala +++ b/akka-persistence/akka-persistence-cassandra/src/test/scala/CassandraPersistentActorSpec.scala @@ -1,13 +1,9 @@ -package se.scalablesolutions.akka.state +package se.scalablesolutions.akka.persistence.cassandra -import se.scalablesolutions.akka.actor.Actor - -import junit.framework.TestCase +import se.scalablesolutions.akka.actor.{Actor, Transactor} import org.junit.Test import org.junit.Assert._ -import org.apache.cassandra.service.CassandraDaemon -import org.junit.BeforeClass import org.junit.Before import org.scalatest.junit.JUnitSuite @@ -28,9 +24,8 @@ case class SetRefStateOneWay(key: String) case class SuccessOneWay(key: String, value: String) case class FailureOneWay(key: String, value: String, failer: Actor) -class CassandraPersistentActor extends Actor { +class CassandraPersistentActor extends Transactor { timeout = 100000 - makeTransactionRequired private lazy val mapState = CassandraStorage.newMap private lazy val vectorState = CassandraStorage.newVector @@ -66,8 +61,7 @@ class CassandraPersistentActor extends Actor { } } -@serializable class PersistentFailerActor extends Actor { - makeTransactionRequired +@serializable class PersistentFailerActor extends Transactor { def receive = { case "Failure" => throw new RuntimeException("expected") @@ -76,8 +70,8 @@ class CassandraPersistentActor extends Actor { class CassandraPersistentActorSpec extends JUnitSuite { - @Before - def startCassandra = EmbeddedCassandraService.start + //@Before + //def startCassandra = EmbeddedCassandraService.start @Test def testMapShouldNotRollbackStateForStatefulServerInCaseOfSuccess = { diff --git a/akka-persistence/akka-persistence-common/pom.xml b/akka-persistence/akka-persistence-common/pom.xml deleted file mode 100644 index 623fbea571..0000000000 --- a/akka-persistence/akka-persistence-common/pom.xml +++ /dev/null @@ -1,29 +0,0 @@ - - 4.0.0 - - akka-persistence-common - Akka Persistence Common Module - - jar - - - akka-persistence-parent - se.scalablesolutions.akka - 0.7-SNAPSHOT - - - - - com.facebook - thrift - 1.0 - - - commons-pool - commons-pool - 1.5.1 - - - - diff --git a/akka-persistence/akka-persistence-common/src/main/scala/Pool.scala b/akka-persistence/akka-persistence-common/src/main/scala/Pool.scala index d290455cad..73b64f3dd5 100644 --- a/akka-persistence/akka-persistence-common/src/main/scala/Pool.scala +++ b/akka-persistence/akka-persistence-common/src/main/scala/Pool.scala @@ -2,7 +2,7 @@ * Copyright (C) 2009-2010 Scalable Solutions AB */ -package se.scalablesolutions.akka.state +package se.scalablesolutions.akka.persistence.common import org.apache.commons.pool._ import org.apache.commons.pool.impl._ diff --git a/akka-persistence/akka-persistence-common/src/main/scala/Storage.scala b/akka-persistence/akka-persistence-common/src/main/scala/Storage.scala index 9051a2fdda..a728baeaad 100644 --- a/akka-persistence/akka-persistence-common/src/main/scala/Storage.scala +++ b/akka-persistence/akka-persistence-common/src/main/scala/Storage.scala @@ -2,16 +2,17 @@ * Copyright (C) 2009-2010 Scalable Solutions AB */ -package se.scalablesolutions.akka.state +package se.scalablesolutions.akka.persistence.common -import se.scalablesolutions.akka.stm.TransactionManagement.currentTransaction -import se.scalablesolutions.akka.collection._ +import se.scalablesolutions.akka.stm._ +import se.scalablesolutions.akka.stm.TransactionManagement.transaction import se.scalablesolutions.akka.util.Logging -import org.codehaus.aspectwerkz.proxy.Uuid - +// FIXME move to 'stm' package + add message with more info class NoTransactionInScopeException extends RuntimeException +class StorageException(message: String) extends RuntimeException(message) + /** * Example Scala usage. *

@@ -50,24 +51,26 @@ trait Storage { def newRef: PersistentRef[ElementType] def newQueue: PersistentQueue[ElementType] = // only implemented for redis throw new UnsupportedOperationException + def newSortedSet: PersistentSortedSet[ElementType] = // only implemented for redis + throw new UnsupportedOperationException def getMap(id: String): PersistentMap[ElementType, ElementType] def getVector(id: String): PersistentVector[ElementType] def getRef(id: String): PersistentRef[ElementType] def getQueue(id: String): PersistentQueue[ElementType] = // only implemented for redis throw new UnsupportedOperationException + def getSortedSet(id: String): PersistentSortedSet[ElementType] = // only implemented for redis + throw new UnsupportedOperationException def newMap(id: String): PersistentMap[ElementType, ElementType] def newVector(id: String): PersistentVector[ElementType] def newRef(id: String): PersistentRef[ElementType] def newQueue(id: String): PersistentQueue[ElementType] = // only implemented for redis throw new UnsupportedOperationException + def newSortedSet(id: String): PersistentSortedSet[ElementType] = // only implemented for redis + throw new UnsupportedOperationException } - - - - /** * Implementation of PersistentMap for every concrete * storage will have the same workflow. This abstracts the workflow. @@ -176,8 +179,8 @@ trait PersistentMap[K, V] extends scala.collection.mutable.Map[K, V] } private def register = { - if (currentTransaction.get.isEmpty) throw new NoTransactionInScopeException - currentTransaction.get.get.register(uuid, this) + if (transaction.get.isEmpty) throw new NoTransactionInScopeException + transaction.get.get.register(uuid, this) } } @@ -250,8 +253,8 @@ trait PersistentVector[T] extends RandomAccessSeq[T] with Transactional with Com def length: Int = storage.getVectorStorageSizeFor(uuid) + newElems.length private def register = { - if (currentTransaction.get.isEmpty) throw new NoTransactionInScopeException - currentTransaction.get.get.register(uuid, this) + if (transaction.get.isEmpty) throw new NoTransactionInScopeException + transaction.get.get.register(uuid, this) } } @@ -286,8 +289,8 @@ trait PersistentRef[T] extends Transactional with Committable { } private def register = { - if (currentTransaction.get.isEmpty) throw new NoTransactionInScopeException - currentTransaction.get.get.register(uuid, this) + if (transaction.get.isEmpty) throw new NoTransactionInScopeException + transaction.get.get.register(uuid, this) } } @@ -319,7 +322,7 @@ trait PersistentRef[T] extends Transactional with Committable { trait PersistentQueue[A] extends scala.collection.mutable.Queue[A] with Transactional with Committable with Logging { - abstract case class QueueOp + sealed trait QueueOp case object ENQ extends QueueOp case object DEQ extends QueueOp @@ -417,7 +420,123 @@ trait PersistentQueue[A] extends scala.collection.mutable.Queue[A] throw new UnsupportedOperationException("dequeueAll not supported") private def register = { - if (currentTransaction.get.isEmpty) throw new NoTransactionInScopeException - currentTransaction.get.get.register(uuid, this) + if (transaction.get.isEmpty) throw new NoTransactionInScopeException + transaction.get.get.register(uuid, this) + } +} + +/** + * Implements a template for a concrete persistent transactional sorted set based storage. + *

+ * Sorting is done based on a zscore. But the computation of zscore has been kept + * outside the abstraction. + *

+ * zscore can be implemented in a variety of ways by the calling class: + *

+ * trait ZScorable {
+ *   def toZScore: Float
+ * }
+ *
+ * class Foo extends ZScorable {
+ *   //.. implemnetation
+ * }
+ * 
+ * Or we can also use views: + *
+ * class Foo {
+ *   //..
+ * }
+ * 
+ * implicit def Foo2Scorable(foo: Foo): ZScorable = new ZScorable {
+ *   def toZScore = {
+ *     //..
+ *   }
+ * }
+ * 
+ * + * and use foo.toZScore to compute the zscore and pass to the APIs. + * + * @author + */ +trait PersistentSortedSet[A] + extends Transactional + with Committable { + + protected val newElems = TransactionalState.newMap[A, Float] + protected val removedElems = TransactionalState.newVector[A] + + val storage: SortedSetStorageBackend[A] + + def commit = { + for ((element, score) <- newElems) storage.zadd(uuid, String.valueOf(score), element) + for (element <- removedElems) storage.zrem(uuid, element) + newElems.clear + removedElems.clear + } + + def +(elem: A, score: Float) = add(elem, score) + + def add(elem: A, score: Float) = { + register + newElems.put(elem, score) + } + + def -(elem: A) = remove(elem) + + def remove(elem: A) = { + register + removedElems.add(elem) + } + + private def inStorage(elem: A): Option[Float] = storage.zscore(uuid, elem) match { + case Some(s) => Some(s.toFloat) + case None => None + } + + def contains(elem: A): Boolean = { + if (newElems contains elem) true + else { + inStorage(elem) match { + case Some(f) => true + case None => false + } + } + } + + def size: Int = newElems.size + storage.zcard(uuid) - removedElems.size + + def zscore(elem: A): Float = { + if (newElems contains elem) newElems.get(elem).get + inStorage(elem) match { + case Some(f) => f + case None => + throw new Predef.NoSuchElementException(elem + " not present") + } + } + + implicit def order(x: (A, Float)) = new Ordered[(A, Float)] { + def compare(that: (A, Float)) = x._2 compare that._2 + } + + def zrange(start: Int, end: Int): List[(A, Float)] = { + // need to operate on the whole range + // get all from the underlying storage + val fromStore = storage.zrangeWithScore(uuid, 0, -1) + val ts = scala.collection.immutable.TreeSet(fromStore: _*) ++ newElems.toList + val l = ts.size + + // -1 means the last element, -2 means the second last + val s = if (start < 0) start + l else start + val e = + if (end < 0) end + l + else if (end >= l) (l - 1) + else end + // slice is open at the end, we need a closed end range + ts.elements.slice(s, e + 1).toList + } + + private def register = { + if (transaction.get.isEmpty) throw new NoTransactionInScopeException + transaction.get.get.register(uuid, this) } } diff --git a/akka-persistence/akka-persistence-common/src/main/scala/StorageBackend.scala b/akka-persistence/akka-persistence-common/src/main/scala/StorageBackend.scala index 94233acd0a..ab0cfaf4d3 100644 --- a/akka-persistence/akka-persistence-common/src/main/scala/StorageBackend.scala +++ b/akka-persistence/akka-persistence-common/src/main/scala/StorageBackend.scala @@ -2,7 +2,7 @@ * Copyright (C) 2009-2010 Scalable Solutions AB */ -package se.scalablesolutions.akka.state +package se.scalablesolutions.akka.persistence.common // abstracts persistence storage trait StorageBackend @@ -33,6 +33,14 @@ trait VectorStorageBackend[T] extends StorageBackend { trait RefStorageBackend[T] extends StorageBackend { def insertRefStorageFor(name: String, element: T) def getRefStorageFor(name: String): Option[T] + def incrementAtomically(name: String): Option[Int] = + throw new UnsupportedOperationException // only for redis + def incrementByAtomically(name: String, by: Int): Option[Int] = + throw new UnsupportedOperationException // only for redis + def decrementAtomically(name: String): Option[Int] = + throw new UnsupportedOperationException // only for redis + def decrementByAtomically(name: String, by: Int): Option[Int] = + throw new UnsupportedOperationException // only for redis } // for Queue @@ -61,11 +69,15 @@ trait SortedSetStorageBackend[T] extends StorageBackend { // remove item from sorted set identified by name def zrem(name: String, item: T): Boolean - // cardinality of the set idnetified by name + // cardinality of the set identified by name def zcard(name: String): Int - def zscore(name: String, item: T): String + // zscore of the item from sorted set identified by name + def zscore(name: String, item: T): Option[Float] + // zrange from the sorted set identified by name def zrange(name: String, start: Int, end: Int): List[T] -} + // zrange with score from the sorted set identified by name + def zrangeWithScore(name: String, start: Int, end: Int): List[(T, Float)] +} diff --git a/akka-persistence/akka-persistence-mongo/pom.xml b/akka-persistence/akka-persistence-mongo/pom.xml deleted file mode 100644 index 616deb7492..0000000000 --- a/akka-persistence/akka-persistence-mongo/pom.xml +++ /dev/null @@ -1,31 +0,0 @@ - - 4.0.0 - - akka-persistence-mongo - Akka Persistence Mongo Module - - jar - - - akka-persistence-parent - se.scalablesolutions.akka - 0.7-SNAPSHOT - - - - - akka-persistence-common - ${project.groupId} - ${project.version} - - - - - org.mongodb - mongo-java-driver - 1.1 - - - - diff --git a/akka-persistence/akka-persistence-mongo/src/main/scala/MongoStorage.scala b/akka-persistence/akka-persistence-mongo/src/main/scala/MongoStorage.scala index 9aaf7a601d..70c7937eae 100644 --- a/akka-persistence/akka-persistence-mongo/src/main/scala/MongoStorage.scala +++ b/akka-persistence/akka-persistence-mongo/src/main/scala/MongoStorage.scala @@ -2,16 +2,18 @@ * Copyright (C) 2009-2010 Scalable Solutions AB */ -package se.scalablesolutions.akka.state +package se.scalablesolutions.akka.persistence.mongo -import org.codehaus.aspectwerkz.proxy.Uuid +import se.scalablesolutions.akka.stm._ +import se.scalablesolutions.akka.persistence.common._ +import se.scalablesolutions.akka.util.UUID object MongoStorage extends Storage { type ElementType = AnyRef - def newMap: PersistentMap[ElementType, ElementType] = newMap(Uuid.newUuid.toString) - def newVector: PersistentVector[ElementType] = newVector(Uuid.newUuid.toString) - def newRef: PersistentRef[ElementType] = newRef(Uuid.newUuid.toString) + def newMap: PersistentMap[ElementType, ElementType] = newMap(UUID.newUuid.toString) + def newVector: PersistentVector[ElementType] = newVector(UUID.newUuid.toString) + def newRef: PersistentRef[ElementType] = newRef(UUID.newUuid.toString) def getMap(id: String): PersistentMap[ElementType, ElementType] = newMap(id) def getVector(id: String): PersistentVector[ElementType] = newVector(id) diff --git a/akka-persistence/akka-persistence-mongo/src/main/scala/MongoStorageBackend.scala b/akka-persistence/akka-persistence-mongo/src/main/scala/MongoStorageBackend.scala index afc80f5196..a4eba3db67 100644 --- a/akka-persistence/akka-persistence-mongo/src/main/scala/MongoStorageBackend.scala +++ b/akka-persistence/akka-persistence-mongo/src/main/scala/MongoStorageBackend.scala @@ -2,10 +2,12 @@ * Copyright (C) 2009-2010 Scalable Solutions AB */ -package se.scalablesolutions.akka.state +package se.scalablesolutions.akka.persistence.mongo +import se.scalablesolutions.akka.stm._ +import se.scalablesolutions.akka.persistence.common._ import se.scalablesolutions.akka.util.Logging -import se.scalablesolutions.akka.Config.config +import se.scalablesolutions.akka.config.Config.config import sjson.json.Serializer._ diff --git a/akka-persistence/akka-persistence-mongo/src/test/scala/MongoPersistentActorSpec.scala b/akka-persistence/akka-persistence-mongo/src/test/scala/MongoPersistentActorSpec.scala index 8681ebadb9..93aa1862d1 100644 --- a/akka-persistence/akka-persistence-mongo/src/test/scala/MongoPersistentActorSpec.scala +++ b/akka-persistence/akka-persistence-mongo/src/test/scala/MongoPersistentActorSpec.scala @@ -1,4 +1,4 @@ -package se.scalablesolutions.akka.state +package se.scalablesolutions.akka.persistence.mongo import junit.framework.TestCase @@ -8,7 +8,7 @@ import org.junit.Assert._ import _root_.dispatch.json.{JsNumber, JsValue} import _root_.dispatch.json.Js._ -import se.scalablesolutions.akka.actor.Actor +import se.scalablesolutions.akka.actor.{Transactor, Actor} /** * A persistent actor based on MongoDB storage. @@ -29,10 +29,10 @@ case class MultiDebit(accountNo: String, amounts: List[BigInt], failer: Actor) case class Credit(accountNo: String, amount: BigInt) case object LogSize -class BankAccountActor extends Actor { - makeTransactionRequired - private val accountState = MongoStorage.newMap - private val txnLog = MongoStorage.newVector +class BankAccountActor extends Transactor { + + private lazy val accountState = MongoStorage.newMap + private lazy val txnLog = MongoStorage.newVector def receive: PartialFunction[Any, Unit] = { // check balance @@ -91,8 +91,7 @@ class BankAccountActor extends Actor { } } -@serializable class PersistentFailerActor extends Actor { - makeTransactionRequired +@serializable class PersistentFailerActor extends Transactor { def receive = { case "Failure" => throw new RuntimeException("expected") diff --git a/akka-persistence/akka-persistence-mongo/src/test/scala/MongoStorageSpec.scala b/akka-persistence/akka-persistence-mongo/src/test/scala/MongoStorageSpec.scala index 711d73c848..186157b576 100644 --- a/akka-persistence/akka-persistence-mongo/src/test/scala/MongoStorageSpec.scala +++ b/akka-persistence/akka-persistence-mongo/src/test/scala/MongoStorageSpec.scala @@ -1,4 +1,4 @@ -package se.scalablesolutions.akka.state +package se.scalablesolutions.akka.persistence.mongo import junit.framework.TestCase diff --git a/akka-persistence/akka-persistence-redis/pom.xml b/akka-persistence/akka-persistence-redis/pom.xml deleted file mode 100644 index c6088e573b..0000000000 --- a/akka-persistence/akka-persistence-redis/pom.xml +++ /dev/null @@ -1,31 +0,0 @@ - - 4.0.0 - - akka-persistence-redis - Akka Persistence Redis Module - - jar - - - akka-persistence-parent - se.scalablesolutions.akka - 0.7-SNAPSHOT - - - - - akka-persistence-common - ${project.groupId} - ${project.version} - - - - - com.redis - redisclient - 1.0.1 - - - - diff --git a/akka-persistence/akka-persistence-redis/src/main/scala/RedisStorage.scala b/akka-persistence/akka-persistence-redis/src/main/scala/RedisStorage.scala index fffa0011e5..b8aada0572 100644 --- a/akka-persistence/akka-persistence-redis/src/main/scala/RedisStorage.scala +++ b/akka-persistence/akka-persistence-redis/src/main/scala/RedisStorage.scala @@ -2,27 +2,33 @@ * Copyright (C) 2009-2010 Scalable Solutions AB */ -package se.scalablesolutions.akka.state +package se.scalablesolutions.akka.persistence.redis -import org.codehaus.aspectwerkz.proxy.Uuid +import se.scalablesolutions.akka.util.UUID +import se.scalablesolutions.akka.stm._ +import se.scalablesolutions.akka.persistence.common._ object RedisStorage extends Storage { type ElementType = Array[Byte] - def newMap: PersistentMap[ElementType, ElementType] = newMap(Uuid.newUuid.toString) - def newVector: PersistentVector[ElementType] = newVector(Uuid.newUuid.toString) - def newRef: PersistentRef[ElementType] = newRef(Uuid.newUuid.toString) - override def newQueue: PersistentQueue[ElementType] = newQueue(Uuid.newUuid.toString) + def newMap: PersistentMap[ElementType, ElementType] = newMap(UUID.newUuid.toString) + def newVector: PersistentVector[ElementType] = newVector(UUID.newUuid.toString) + def newRef: PersistentRef[ElementType] = newRef(UUID.newUuid.toString) + override def newQueue: PersistentQueue[ElementType] = newQueue(UUID.newUuid.toString) + override def newSortedSet: PersistentSortedSet[ElementType] = newSortedSet(UUID.newUuid.toString) def getMap(id: String): PersistentMap[ElementType, ElementType] = newMap(id) def getVector(id: String): PersistentVector[ElementType] = newVector(id) def getRef(id: String): PersistentRef[ElementType] = newRef(id) override def getQueue(id: String): PersistentQueue[ElementType] = newQueue(id) + override def getSortedSet(id: String): PersistentSortedSet[ElementType] = newSortedSet(id) def newMap(id: String): PersistentMap[ElementType, ElementType] = new RedisPersistentMap(id) def newVector(id: String): PersistentVector[ElementType] = new RedisPersistentVector(id) def newRef(id: String): PersistentRef[ElementType] = new RedisPersistentRef(id) override def newQueue(id: String): PersistentQueue[ElementType] = new RedisPersistentQueue(id) + override def newSortedSet(id: String): PersistentSortedSet[ElementType] = + new RedisPersistentSortedSet(id) } /** @@ -61,3 +67,14 @@ class RedisPersistentQueue(id: String) extends PersistentQueue[Array[Byte]] { val uuid = id val storage = RedisStorageBackend } + +/** + * Implements a persistent transactional sorted set based on the Redis + * storage. + * + * @author Debasish Ghosh + */ +class RedisPersistentSortedSet(id: String) extends PersistentSortedSet[Array[Byte]] { + val uuid = id + val storage = RedisStorageBackend +} diff --git a/akka-persistence/akka-persistence-redis/src/main/scala/RedisStorageBackend.scala b/akka-persistence/akka-persistence-redis/src/main/scala/RedisStorageBackend.scala index 5890821414..b0ba8cc590 100644 --- a/akka-persistence/akka-persistence-redis/src/main/scala/RedisStorageBackend.scala +++ b/akka-persistence/akka-persistence-redis/src/main/scala/RedisStorageBackend.scala @@ -2,10 +2,12 @@ * Copyright (C) 2009-2010 Scalable Solutions AB */ -package se.scalablesolutions.akka.state +package se.scalablesolutions.akka.persistence.redis +import se.scalablesolutions.akka.stm._ +import se.scalablesolutions.akka.persistence.common._ import se.scalablesolutions.akka.util.Logging -import se.scalablesolutions.akka.Config.config +import se.scalablesolutions.akka.config.Config.config import com.redis._ @@ -50,7 +52,7 @@ private [akka] object RedisStorageBackend extends val REDIS_SERVER_HOSTNAME = config.getString("akka.storage.redis.hostname", "127.0.0.1") val REDIS_SERVER_PORT = config.getInt("akka.storage.redis.port", 6379) - val db = new Redis(REDIS_SERVER_HOSTNAME, REDIS_SERVER_PORT) + val db = new RedisClient(REDIS_SERVER_HOSTNAME, REDIS_SERVER_PORT) /** * Map storage in Redis. @@ -77,11 +79,11 @@ private [akka] object RedisStorageBackend extends * base64(T1):base64("debasish.programming_language") -> "scala" * */ - def insertMapStorageEntryFor(name: String, key: Array[Byte], value: Array[Byte]) { + def insertMapStorageEntryFor(name: String, key: Array[Byte], value: Array[Byte]): Unit = withErrorHandling { insertMapStorageEntriesFor(name, List((key, value))) } - def insertMapStorageEntriesFor(name: String, entries: List[Tuple2[Array[Byte], Array[Byte]]]) { + def insertMapStorageEntriesFor(name: String, entries: List[Tuple2[Array[Byte], Array[Byte]]]): Unit = withErrorHandling { mset(entries.map(e => (makeRedisKey(name, e._1), new String(e._2)))) } @@ -94,22 +96,22 @@ private [akka] object RedisStorageBackend extends *
  • : is chosen since it cannot appear in base64 encoding charset
  • *
  • both parts of the key need to be based64 encoded since there can be spaces within each of them
  • */ - private [this] def makeRedisKey(name: String, key: Array[Byte]): String = { + private [this] def makeRedisKey(name: String, key: Array[Byte]): String = withErrorHandling { "%s:%s".format(new String(encode(name.getBytes)), new String(encode(key))) } - private [this] def makeKeyFromRedisKey(redisKey: String) = { + private [this] def makeKeyFromRedisKey(redisKey: String) = withErrorHandling { val nk = redisKey.split(':').map{e: String => decode(e.getBytes)} (nk(0), nk(1)) } - private [this] def mset(entries: List[(String, String)]) { + private [this] def mset(entries: List[(String, String)]): Unit = withErrorHandling { entries.foreach {e: (String, String) => db.set(e._1, e._2) } } - def removeMapStorageFor(name: String): Unit = { + def removeMapStorageFor(name: String): Unit = withErrorHandling { db.keys("%s:*".format(encode(name.getBytes))) match { case None => throw new java.util.NoSuchElementException(name + " not present") @@ -118,18 +120,19 @@ private [akka] object RedisStorageBackend extends } } - def removeMapStorageFor(name: String, key: Array[Byte]): Unit = { + def removeMapStorageFor(name: String, key: Array[Byte]): Unit = withErrorHandling { db.delete(makeRedisKey(name, key)) } - def getMapStorageEntryFor(name: String, key: Array[Byte]): Option[Array[Byte]] = + def getMapStorageEntryFor(name: String, key: Array[Byte]): Option[Array[Byte]] = withErrorHandling { db.get(makeRedisKey(name, key)) match { case None => throw new java.util.NoSuchElementException(new String(key) + " not present") case Some(s) => Some(s.getBytes) } + } - def getMapStorageSizeFor(name: String): Int = { + def getMapStorageSizeFor(name: String): Int = withErrorHandling { db.keys("%s:*".format(new String(encode(name.getBytes)))) match { case None => 0 case Some(keys) => @@ -137,7 +140,7 @@ private [akka] object RedisStorageBackend extends } } - def getMapStorageFor(name: String): List[(Array[Byte], Array[Byte])] = { + def getMapStorageFor(name: String): List[(Array[Byte], Array[Byte])] = withErrorHandling { db.keys("%s:*".format(new String(encode(name.getBytes)))) match { case None => throw new java.util.NoSuchElementException(name + " not present") @@ -148,7 +151,7 @@ private [akka] object RedisStorageBackend extends def getMapStorageRangeFor(name: String, start: Option[Array[Byte]], finish: Option[Array[Byte]], - count: Int): List[(Array[Byte], Array[Byte])] = { + count: Int): List[(Array[Byte], Array[Byte])] = withErrorHandling { import scala.collection.immutable.TreeMap val wholeSorted = @@ -193,27 +196,27 @@ private [akka] object RedisStorageBackend extends } } - def insertVectorStorageEntryFor(name: String, element: Array[Byte]) { - db.pushHead(new String(encode(name.getBytes)), new String(element)) + def insertVectorStorageEntryFor(name: String, element: Array[Byte]): Unit = withErrorHandling { + db.lpush(new String(encode(name.getBytes)), new String(element)) } - def insertVectorStorageEntriesFor(name: String, elements: List[Array[Byte]]) { + def insertVectorStorageEntriesFor(name: String, elements: List[Array[Byte]]): Unit = withErrorHandling { elements.foreach(insertVectorStorageEntryFor(name, _)) } - def updateVectorStorageEntryFor(name: String, index: Int, elem: Array[Byte]) { - db.listSet(new String(encode(name.getBytes)), index, new String(elem)) + def updateVectorStorageEntryFor(name: String, index: Int, elem: Array[Byte]): Unit = withErrorHandling { + db.lset(new String(encode(name.getBytes)), index, new String(elem)) } - def getVectorStorageEntryFor(name: String, index: Int): Array[Byte] = { - db.listIndex(new String(encode(name.getBytes)), index) match { + def getVectorStorageEntryFor(name: String, index: Int): Array[Byte] = withErrorHandling { + db.lindex(new String(encode(name.getBytes)), index) match { case None => throw new java.util.NoSuchElementException(name + " does not have element at " + index) case Some(e) => e.getBytes } } - def getVectorStorageRangeFor(name: String, start: Option[Int], finish: Option[Int], count: Int): List[Array[Byte]] = { + def getVectorStorageRangeFor(name: String, start: Option[Int], finish: Option[Int], count: Int): List[Array[Byte]] = withErrorHandling { /** * count is the max number of results to return. Start with * start or 0 (if start is not defined) and go until @@ -226,27 +229,27 @@ private [akka] object RedisStorageBackend extends if (f >= s) Math.min(count, (f - s)) else count } else count - db.listRange(new String(encode(name.getBytes)), s, s + cnt - 1) match { + db.lrange(new String(encode(name.getBytes)), s, s + cnt - 1) match { case None => throw new java.util.NoSuchElementException(name + " does not have elements in the range specified") case Some(l) => - l map (_.getBytes) + l map (_.get.getBytes) } } def getVectorStorageSizeFor(name: String): Int = { - db.listLength(new String(encode(name.getBytes))) match { + db.llen(new String(encode(name.getBytes))) match { case None => throw new java.util.NoSuchElementException(name + " not present") case Some(l) => l } } - def insertRefStorageFor(name: String, element: Array[Byte]) { + def insertRefStorageFor(name: String, element: Array[Byte]): Unit = withErrorHandling { db.set(new String(encode(name.getBytes)), new String(element)) } - def getRefStorageFor(name: String): Option[Array[Byte]] = { + def getRefStorageFor(name: String): Option[Array[Byte]] = withErrorHandling { db.get(new String(encode(name.getBytes))) match { case None => throw new java.util.NoSuchElementException(name + " not present") @@ -254,14 +257,47 @@ private [akka] object RedisStorageBackend extends } } - // add to the end of the queue - def enqueue(name: String, item: Array[Byte]): Boolean = { - db.pushTail(new String(encode(name.getBytes)), new String(item)) + override def incrementAtomically(name: String): Option[Int] = withErrorHandling { + db.incr(new String(encode(name.getBytes))) match { + case Some(i) => Some(i) + case None => + throw new Predef.IllegalArgumentException(name + " exception in incr") + } } + override def incrementByAtomically(name: String, by: Int): Option[Int] = withErrorHandling { + db.incrBy(new String(encode(name.getBytes)), by) match { + case Some(i) => Some(i) + case None => + throw new Predef.IllegalArgumentException(name + " exception in incrby") + } + } + + override def decrementAtomically(name: String): Option[Int] = withErrorHandling { + db.decr(new String(encode(name.getBytes))) match { + case Some(i) => Some(i) + case None => + throw new Predef.IllegalArgumentException(name + " exception in decr") + } + } + + override def decrementByAtomically(name: String, by: Int): Option[Int] = withErrorHandling { + db.decrBy(new String(encode(name.getBytes)), by) match { + case Some(i) => Some(i) + case None => + throw new Predef.IllegalArgumentException(name + " exception in decrby") + } + } + + // add to the end of the queue + def enqueue(name: String, item: Array[Byte]): Boolean = withErrorHandling { + db.rpush(new String(encode(name.getBytes)), new String(item)) + } + + // pop from the front of the queue - def dequeue(name: String): Option[Array[Byte]] = { - db.popHead(new String(encode(name.getBytes))) match { + def dequeue(name: String): Option[Array[Byte]] = withErrorHandling { + db.lpop(new String(encode(name.getBytes))) match { case None => throw new java.util.NoSuchElementException(name + " not present") case Some(s) => @@ -270,8 +306,8 @@ private [akka] object RedisStorageBackend extends } // get the size of the queue - def size(name: String): Int = { - db.listLength(new String(encode(name.getBytes))) match { + def size(name: String): Int = withErrorHandling { + db.llen(new String(encode(name.getBytes))) match { case None => throw new java.util.NoSuchElementException(name + " not present") case Some(l) => l @@ -280,64 +316,95 @@ private [akka] object RedisStorageBackend extends // return an array of items currently stored in the queue // start is the item to begin, count is how many items to return - def peek(name: String, start: Int, count: Int): List[Array[Byte]] = count match { - case 1 => - db.listIndex(new String(encode(name.getBytes)), start) match { - case None => - throw new java.util.NoSuchElementException("No element at " + start) - case Some(s) => - List(s.getBytes) - } - case n => - db.listRange(new String(encode(name.getBytes)), start, start + count - 1) match { - case None => - throw new java.util.NoSuchElementException( - "No element found between " + start + " and " + (start + count - 1)) - case Some(es) => - es.map(_.getBytes) - } + def peek(name: String, start: Int, count: Int): List[Array[Byte]] = withErrorHandling { + count match { + case 1 => + db.lindex(new String(encode(name.getBytes)), start) match { + case None => + throw new Predef.NoSuchElementException("No element at " + start) + case Some(s) => + List(s.getBytes) + } + case n => + db.lrange(new String(encode(name.getBytes)), start, start + count - 1) match { + case None => + throw new Predef.NoSuchElementException( + "No element found between " + start + " and " + (start + count - 1)) + case Some(es) => + es.map(_.get.getBytes) + } + } } // completely delete the queue - def remove(name: String): Boolean = { - db.delete(new String(encode(name.getBytes))) + def remove(name: String): Boolean = withErrorHandling { + db.delete(new String(encode(name.getBytes))) match { + case Some(1) => true + case _ => false + } } // add item to sorted set identified by name - def zadd(name: String, zscore: String, item: Array[Byte]): Boolean = { - db.zAdd(new String(encode(name.getBytes)), zscore, new String(item)) + def zadd(name: String, zscore: String, item: Array[Byte]): Boolean = withErrorHandling { + db.zadd(new String(encode(name.getBytes)), zscore, new String(item)) match { + case Some(1) => true + case _ => false + } } // remove item from sorted set identified by name - def zrem(name: String, item: Array[Byte]): Boolean = { - db.zRem(new String(encode(name.getBytes)), new String(item)) + def zrem(name: String, item: Array[Byte]): Boolean = withErrorHandling { + db.zrem(new String(encode(name.getBytes)), new String(item)) match { + case Some(1) => true + case _ => false + } } // cardinality of the set identified by name - def zcard(name: String): Int = { - db.zCard(new String(encode(name.getBytes))) match { + def zcard(name: String): Int = withErrorHandling { + db.zcard(new String(encode(name.getBytes))) match { case None => throw new java.util.NoSuchElementException(name + " not present") case Some(l) => l } } - def zscore(name: String, item: Array[Byte]): String = { - db.zScore(new String(encode(name.getBytes)), new String(item)) match { - case None => - throw new java.util.NoSuchElementException(new String(item) + " not present") - case Some(s) => s + def zscore(name: String, item: Array[Byte]): Option[Float] = withErrorHandling { + db.zscore(new String(encode(name.getBytes)), new String(item)) match { + case Some(s) => Some(s.toFloat) + case None => None } } - def zrange(name: String, start: Int, end: Int): List[Array[Byte]] = { - db.zRange(new String(encode(name.getBytes)), start.toString, end.toString, SocketOperations.ASC, false) match { + def zrange(name: String, start: Int, end: Int): List[Array[Byte]] = withErrorHandling { + db.zrange(new String(encode(name.getBytes)), start.toString, end.toString, RedisClient.ASC, false) match { case None => throw new java.util.NoSuchElementException(name + " not present") case Some(s) => - s.map(_.getBytes) + s.map(_.get.getBytes) + } + } + + def zrangeWithScore(name: String, start: Int, end: Int): List[(Array[Byte], Float)] = withErrorHandling { + db.zrangeWithScore( + new String(encode(name.getBytes)), start.toString, end.toString, RedisClient.ASC) match { + case None => + throw new Predef.NoSuchElementException(name + " not present") + case Some(l) => + l.map{ case (elem, score) => (elem.get.getBytes, score.get.toFloat) } } } - def flushDB = db.flushDb + def flushDB = withErrorHandling(db.flushDb) + + private def withErrorHandling[T](body: => T): T = { + try { + body + } catch { + case e: java.lang.NullPointerException => + throw new StorageException("Could not connect to Redis server") + case e => + throw new StorageException("Error in Redis: " + e.getMessage) + } + } } diff --git a/akka-persistence/akka-persistence-redis/src/test/scala/RedisPersistentActorSpec.scala b/akka-persistence/akka-persistence-redis/src/test/scala/RedisPersistentActorSpec.scala index 93e3c0df3f..3c1eaab709 100644 --- a/akka-persistence/akka-persistence-redis/src/test/scala/RedisPersistentActorSpec.scala +++ b/akka-persistence/akka-persistence-redis/src/test/scala/RedisPersistentActorSpec.scala @@ -1,4 +1,4 @@ -package se.scalablesolutions.akka.state +package se.scalablesolutions.akka.persistence.redis import junit.framework.TestCase @@ -29,6 +29,7 @@ case object LogSize class AccountActor extends Transactor { private lazy val accountState = RedisStorage.newMap private lazy val txnLog = RedisStorage.newVector + //timeout = 5000 def receive = { // check balance @@ -86,6 +87,7 @@ class AccountActor extends Transactor { } @serializable class PersistentFailerActor extends Transactor { + // timeout = 5000 def receive = { case "Failure" => throw new RuntimeException("expected") @@ -140,7 +142,7 @@ class RedisPersistentActorSpec extends TestCase { bactor.start bactor !! Credit("a-123", 5000) - assertEquals(BigInt(5000), (bactor !! Balance("a-123")).get) + assertEquals(BigInt(5000), (bactor !! (Balance("a-123"), 5000)).get) val failer = new PersistentFailerActor failer.start @@ -149,7 +151,7 @@ class RedisPersistentActorSpec extends TestCase { fail("should throw exception") } catch { case e: RuntimeException => {}} - assertEquals(BigInt(5000), (bactor !! Balance("a-123")).get) + assertEquals(BigInt(5000), (bactor !! (Balance("a-123"), 5000)).get) // should not count the failed one val c: Integer = (bactor !! LogSize).get diff --git a/akka-persistence/akka-persistence-redis/src/test/scala/RedisPersistentQSpec.scala b/akka-persistence/akka-persistence-redis/src/test/scala/RedisPersistentQSpec.scala index 56fafa24d8..15636b637b 100644 --- a/akka-persistence/akka-persistence-redis/src/test/scala/RedisPersistentQSpec.scala +++ b/akka-persistence/akka-persistence-redis/src/test/scala/RedisPersistentQSpec.scala @@ -1,4 +1,4 @@ -package se.scalablesolutions.akka.state +package se.scalablesolutions.akka.persistence.redis import junit.framework.TestCase @@ -15,9 +15,9 @@ import se.scalablesolutions.akka.actor.{Actor, Transactor} */ case class NQ(accountNo: String) -case class DQ +case object DQ case class MNDQ(accountNos: List[String], noOfDQs: Int, failer: Actor) -case class SZ +case object SZ class QueueActor extends Transactor { private lazy val accounts = RedisStorage.newQueue diff --git a/akka-persistence/akka-persistence-redis/src/test/scala/RedisPersistentSortedSetSpec.scala b/akka-persistence/akka-persistence-redis/src/test/scala/RedisPersistentSortedSetSpec.scala new file mode 100644 index 0000000000..a2abb2cd40 --- /dev/null +++ b/akka-persistence/akka-persistence-redis/src/test/scala/RedisPersistentSortedSetSpec.scala @@ -0,0 +1,237 @@ +package se.scalablesolutions.akka.persistence.redis + +import org.scalatest.Spec +import org.scalatest.Assertions +import org.scalatest.matchers.ShouldMatchers +import org.scalatest.BeforeAndAfterAll +import org.scalatest.junit.JUnitRunner +import org.junit.runner.RunWith + +import se.scalablesolutions.akka.actor.{Actor, Transactor} + +/** + * A persistent actor based on Redis sortedset storage. + *

    + * Needs a running Redis server. + * @author Debasish Ghosh + */ + +trait ZScorable { + def zscore: Float +} + +case class Hacker(name: String, birth: String) extends ZScorable { + def zscore = birth.toFloat +} + +class SetThresholdViolationException extends RuntimeException + +// add hacker to the set +case class ADD(h: Hacker) + +// remove hacker from set +case class REMOVE(h: Hacker) + +// size of the set +case object SIZE + +// zscore of the hacker +case class SCORE(h: Hacker) + +// zrange +case class RANGE(start: Int, end: Int) + +// add and remove subject to the condition that there will be at least 3 hackers +case class MULTI(add: List[Hacker], rem: List[Hacker], failer: Actor) + +class SortedSetActor extends Transactor { + timeout = 100000 + private lazy val hackers = RedisStorage.newSortedSet + + def receive = { + case ADD(h) => + hackers.+(h.name.getBytes, h.zscore) + reply(true) + + case REMOVE(h) => + hackers.-(h.name.getBytes) + reply(true) + + case SIZE => + reply(hackers.size) + + case SCORE(h) => + reply(hackers.zscore(h.name.getBytes)) + + case RANGE(s, e) => + reply(hackers.zrange(s, e)) + + case MULTI(a, r, failer) => + a.foreach{ h: Hacker => + hackers.+(h.name.getBytes, h.zscore) + } + try { + r.foreach{ h => + if (hackers.size <= 3) + throw new SetThresholdViolationException + hackers.-(h.name.getBytes) + } + } catch { + case e: Exception => + failer !! "Failure" + } + reply((a.size, r.size)) + } +} + +import RedisStorageBackend._ + +@RunWith(classOf[JUnitRunner]) +class RedisPersistentSortedSetSpec extends + Spec with + ShouldMatchers with + BeforeAndAfterAll { + + override def beforeAll { + flushDB + println("** destroyed database") + } + + override def afterAll { + flushDB + println("** destroyed database") + } + + val h1 = Hacker("Alan kay", "1940") + val h2 = Hacker("Richard Stallman", "1953") + val h3 = Hacker("Yukihiro Matsumoto", "1965") + val h4 = Hacker("Claude Shannon", "1916") + val h5 = Hacker("Linus Torvalds", "1969") + val h6 = Hacker("Alan Turing", "1912") + + describe("Add and report cardinality of the set") { + val qa = new SortedSetActor + qa.start + + it("should enter 6 hackers") { + qa !! ADD(h1) + qa !! ADD(h2) + qa !! ADD(h3) + qa !! ADD(h4) + qa !! ADD(h5) + qa !! ADD(h6) + (qa !! SIZE).get.asInstanceOf[Int] should equal(6) + } + + it("should fetch correct scores for hackers") { + (qa !! SCORE(h1)).get.asInstanceOf[Float] should equal(1940.0f) + (qa !! SCORE(h5)).get.asInstanceOf[Float] should equal(1969.0f) + (qa !! SCORE(h6)).get.asInstanceOf[Float] should equal(1912.0f) + } + + it("should fetch proper range") { + (qa !! RANGE(0, 4)).get.asInstanceOf[List[_]].size should equal(5) + (qa !! RANGE(0, 6)).get.asInstanceOf[List[_]].size should equal(6) + } + + it("should remove and throw exception for removing non-existent hackers") { + qa !! REMOVE(h2) + (qa !! SIZE).get.asInstanceOf[Int] should equal(5) + qa !! REMOVE(h3) + (qa !! SIZE).get.asInstanceOf[Int] should equal(4) + val h7 = Hacker("Paul Snively", "1952") + try { + qa !! REMOVE(h7) + } + catch { + case e: Predef.NoSuchElementException => + e.getMessage should endWith("not present") + } + } + + it("should change score for entering the same hacker name with diff score") { + (qa !! SIZE).get.asInstanceOf[Int] should equal(4) + + // same name as h6 + val h7 = Hacker("Alan Turing", "1992") + qa !! ADD(h7) + + // size remains same + (qa !! SIZE).get.asInstanceOf[Int] should equal(4) + + // score updated + (qa !! SCORE(h7)).get.asInstanceOf[Float] should equal(1992.0f) + } + } + + describe("Transaction semantics") { + it("should rollback on exception") { + val qa = new SortedSetActor + qa.start + + val failer = new PersistentFailerActor + failer.start + + (qa !! SIZE).get.asInstanceOf[Int] should equal(0) + val add = List(h1, h2, h3, h4) + val rem = List(h2) + (qa !! MULTI(add, rem, failer)).get.asInstanceOf[Tuple2[Int, Int]] should equal((4,1)) + (qa !! SIZE).get.asInstanceOf[Int] should equal(3) + // size == 3 + + // add 2 more + val add1 = List(h5, h6) + + // remove 3 + val rem1 = List(h1, h3, h4) + try { + qa !! MULTI(add1, rem1, failer) + } catch { case e: Exception => {} + } + (qa !! SIZE).get.asInstanceOf[Int] should equal(3) + } + } + + describe("zrange") { + it ("should report proper range") { + val qa = new SortedSetActor + qa.start + qa !! ADD(h1) + qa !! ADD(h2) + qa !! ADD(h3) + qa !! ADD(h4) + qa !! ADD(h5) + qa !! ADD(h6) + (qa !! SIZE).get.asInstanceOf[Int] should equal(6) + val l = (qa !! RANGE(0, 6)).get.asInstanceOf[List[(Array[Byte], Float)]] + l.map { case (e, s) => (new String(e), s) }.head should equal(("Alan Turing", 1912.0f)) + val h7 = Hacker("Alan Turing", "1992") + qa !! ADD(h7) + (qa !! SIZE).get.asInstanceOf[Int] should equal(6) + val m = (qa !! RANGE(0, 6)).get.asInstanceOf[List[(Array[Byte], Float)]] + m.map { case (e, s) => (new String(e), s) }.head should equal(("Claude Shannon", 1916.0f)) + } + + it ("should report proper rge") { + val qa = new SortedSetActor + qa.start + qa !! ADD(h1) + qa !! ADD(h2) + qa !! ADD(h3) + qa !! ADD(h4) + qa !! ADD(h5) + qa !! ADD(h6) + (qa !! SIZE).get.asInstanceOf[Int] should equal(6) + (qa !! RANGE(0, 5)).get.asInstanceOf[List[_]].size should equal(6) + (qa !! RANGE(0, 6)).get.asInstanceOf[List[_]].size should equal(6) + (qa !! RANGE(0, 3)).get.asInstanceOf[List[_]].size should equal(4) + (qa !! RANGE(0, 1)).get.asInstanceOf[List[_]].size should equal(2) + (qa !! RANGE(0, 0)).get.asInstanceOf[List[_]].size should equal(1) + (qa !! RANGE(3, 1)).get.asInstanceOf[List[_]].size should equal(0) + (qa !! RANGE(0, -1)).get.asInstanceOf[List[_]].size should equal(6) + (qa !! RANGE(0, -2)).get.asInstanceOf[List[_]].size should equal(5) + (qa !! RANGE(0, -4)).get.asInstanceOf[List[_]].size should equal(3) + (qa !! RANGE(-4, -1)).get.asInstanceOf[List[_]].size should equal(4) + } + } +} diff --git a/akka-persistence/akka-persistence-redis/src/test/scala/RedisStorageBackendSpec.scala b/akka-persistence/akka-persistence-redis/src/test/scala/RedisStorageBackendSpec.scala index aec86902d5..7cb659fbf5 100644 --- a/akka-persistence/akka-persistence-redis/src/test/scala/RedisStorageBackendSpec.scala +++ b/akka-persistence/akka-persistence-redis/src/test/scala/RedisStorageBackendSpec.scala @@ -1,4 +1,4 @@ -package se.scalablesolutions.akka.state +package se.scalablesolutions.akka.persistence.redis import org.scalatest.Spec import org.scalatest.matchers.ShouldMatchers @@ -121,6 +121,48 @@ class RedisStorageBackendSpec extends } } + describe("atomic increment in ref") { + it("should increment an existing key value by 1") { + insertRefStorageFor("T-4-1", "1200".getBytes) + new String(getRefStorageFor("T-4-1").get) should equal("1200") + incrementAtomically("T-4-1").get should equal(1201) + } + it("should create and increment a non-existing key value by 1") { + incrementAtomically("T-4-2").get should equal(1) + new String(getRefStorageFor("T-4-2").get) should equal("1") + } + it("should increment an existing key value by the amount specified") { + insertRefStorageFor("T-4-3", "1200".getBytes) + new String(getRefStorageFor("T-4-3").get) should equal("1200") + incrementByAtomically("T-4-3", 50).get should equal(1250) + } + it("should create and increment a non-existing key value by the amount specified") { + incrementByAtomically("T-4-4", 20).get should equal(20) + new String(getRefStorageFor("T-4-4").get) should equal("20") + } + } + + describe("atomic decrement in ref") { + it("should decrement an existing key value by 1") { + insertRefStorageFor("T-4-5", "1200".getBytes) + new String(getRefStorageFor("T-4-5").get) should equal("1200") + decrementAtomically("T-4-5").get should equal(1199) + } + it("should create and decrement a non-existing key value by 1") { + decrementAtomically("T-4-6").get should equal(-1) + new String(getRefStorageFor("T-4-6").get) should equal("-1") + } + it("should decrement an existing key value by the amount specified") { + insertRefStorageFor("T-4-7", "1200".getBytes) + new String(getRefStorageFor("T-4-7").get) should equal("1200") + decrementByAtomically("T-4-7", 50).get should equal(1150) + } + it("should create and decrement a non-existing key value by the amount specified") { + decrementByAtomically("T-4-8", 20).get should equal(-20) + new String(getRefStorageFor("T-4-8").get) should equal("-20") + } + } + describe("store and query in queue") { it("should give proper queue semantics") { enqueue("T-5", "alan kay".getBytes) @@ -156,10 +198,10 @@ class RedisStorageBackendSpec extends zcard("hackers") should equal(6) - zscore("hackers", "alan turing".getBytes) should equal("1912") - zscore("hackers", "richard stallman".getBytes) should equal("1953") - zscore("hackers", "claude shannon".getBytes) should equal("1916") - zscore("hackers", "linus torvalds".getBytes) should equal("1969") + zscore("hackers", "alan turing".getBytes).get should equal(1912.0f) + zscore("hackers", "richard stallman".getBytes).get should equal(1953.0f) + zscore("hackers", "claude shannon".getBytes).get should equal(1916.0f) + zscore("hackers", "linus torvalds".getBytes).get should equal(1969.0f) val s: List[Array[Byte]] = zrange("hackers", 0, 2) s.size should equal(3) @@ -171,6 +213,10 @@ class RedisStorageBackendSpec extends val t: List[Array[Byte]] = zrange("hackers", 0, -1) t.size should equal(6) t.map(new String(_)) should equal(sorted) + + val u: List[(Array[Byte], Float)] = zrangeWithScore("hackers", 0, -1) + u.size should equal(6) + u.map{ case (e, s) => new String(e) } should equal(sorted) } } } diff --git a/akka-persistence/pom.xml b/akka-persistence/pom.xml deleted file mode 100644 index f529dd162d..0000000000 --- a/akka-persistence/pom.xml +++ /dev/null @@ -1,43 +0,0 @@ - - 4.0.0 - - akka-persistence-parent - Akka Persistence Modules - - pom - - - akka - se.scalablesolutions.akka - 0.7-SNAPSHOT - - - - akka-persistence-common - - akka-persistence-mongo - akka-persistence-cassandra - - - - - akka-core - ${project.groupId} - ${project.version} - - - - org.scalatest - scalatest - ${scalatest.version} - test - - - junit - junit - 4.5 - test - - - diff --git a/akka-rest/pom.xml b/akka-rest/pom.xml deleted file mode 100644 index 4e875cb310..0000000000 --- a/akka-rest/pom.xml +++ /dev/null @@ -1,57 +0,0 @@ - - 4.0.0 - - akka-rest - Akka REST Module - - jar - - - akka - se.scalablesolutions.akka - 0.7-SNAPSHOT - - - - - - akka-core - ${project.groupId} - ${project.version} - - - - - javax.servlet - servlet-api - 2.5 - - - com.sun.jersey - jersey-core - ${jersey.version} - - - com.sun.jersey - jersey-server - ${jersey.version} - - - com.sun.jersey - jersey-json - ${jersey.version} - - - javax.ws.rs - jsr311-api - 1.1 - - - com.sun.jersey.contribs - jersey-scala - ${jersey.version} - - - diff --git a/akka-rest/src/main/scala/ActorComponentProvider.scala b/akka-rest/src/main/scala/ActorComponentProvider.scala index 5d9d49bef2..ed51482604 100644 --- a/akka-rest/src/main/scala/ActorComponentProvider.scala +++ b/akka-rest/src/main/scala/ActorComponentProvider.scala @@ -9,6 +9,7 @@ import com.sun.jersey.core.spi.component.ioc.IoCFullyManagedComponentProvider import se.scalablesolutions.akka.config.Configurator import se.scalablesolutions.akka.util.Logging +import se.scalablesolutions.akka.actor.Actor class ActorComponentProvider(val clazz: Class[_], val configurators: List[Configurator]) extends IoCFullyManagedComponentProvider with Logging { @@ -19,11 +20,10 @@ class ActorComponentProvider(val clazz: Class[_], val configurators: List[Config val instances = for { conf <- configurators if conf.isDefined(clazz) - } yield conf.getInstance(clazz).asInstanceOf[AnyRef] - instances match { - case instance :: Nil => instance - case Nil => throw new IllegalArgumentException("No Actor for class [" + clazz + "] could be found. Make sure you have defined and configured the class as an Active Object or Actor in a Configurator") - case _ => throw new IllegalArgumentException("Actor for class [" + clazz + "] is defined in more than one Configurator. Eliminate the redundancy.") - } + instance <- conf.getInstance(clazz) + } yield instance + if (instances.isEmpty) throw new IllegalArgumentException( + "No Actor or Active Object for class [" + clazz + "] could be found.\nMake sure you have defined and configured the class as an Active Object or Actor in a supervisor hierarchy.") + else instances.head.asInstanceOf[AnyRef] } } \ No newline at end of file diff --git a/akka-rest/src/main/scala/AkkaServlet.scala b/akka-rest/src/main/scala/AkkaServlet.scala index c06d5a72b8..32b10b354b 100644 --- a/akka-rest/src/main/scala/AkkaServlet.scala +++ b/akka-rest/src/main/scala/AkkaServlet.scala @@ -5,6 +5,7 @@ package se.scalablesolutions.akka.rest import se.scalablesolutions.akka.config.ConfiguratorRepository +import se.scalablesolutions.akka.config.Config.config import com.sun.jersey.api.core.ResourceConfig import com.sun.jersey.spi.container.servlet.ServletContainer @@ -20,14 +21,12 @@ class AkkaServlet extends ServletContainer { import scala.collection.JavaConversions._ override def initiate(resourceConfig: ResourceConfig, webApplication: WebApplication) = { - //Kernel.boot // will boot if not already booted by 'main' - val configurators = ConfiguratorRepository.getConfigurators resourceConfig.getClasses.addAll(configurators.flatMap(_.getComponentInterfaces)) resourceConfig.getProperties.put( "com.sun.jersey.spi.container.ResourceFilters", - se.scalablesolutions.akka.Config.config.getList("akka.rest.filters").mkString(",")) + config.getList("akka.rest.filters").mkString(",")) webApplication.initiate(resourceConfig, new ActorComponentProviderFactory(configurators)) } diff --git a/akka-samples/akka-sample-camel/src/main/resources/sample-camel-context.xml b/akka-samples/akka-sample-camel/src/main/resources/sample-camel-context.xml new file mode 100644 index 0000000000..b3d811d8de --- /dev/null +++ b/akka-samples/akka-sample-camel/src/main/resources/sample-camel-context.xml @@ -0,0 +1,23 @@ + + + + + + + + + + + + + + + + + + + diff --git a/akka-samples/akka-sample-camel/src/main/scala/Actors.scala b/akka-samples/akka-sample-camel/src/main/scala/Actors.scala new file mode 100644 index 0000000000..c82b29afc9 --- /dev/null +++ b/akka-samples/akka-sample-camel/src/main/scala/Actors.scala @@ -0,0 +1,92 @@ +package sample.camel + +import se.scalablesolutions.akka.actor.{Actor, RemoteActor} +import se.scalablesolutions.akka.actor.annotation.consume +import se.scalablesolutions.akka.camel.{Producer, Message, Consumer} +import se.scalablesolutions.akka.util.Logging + +/** + * Client-initiated remote actor. + */ +class RemoteActor1 extends RemoteActor("localhost", 7777) with Consumer { + def endpointUri = "jetty:http://localhost:6644/remote1" + + protected def receive = { + case msg: Message => reply(Message("hello %s" format msg.body, Map("sender" -> "remote1"))) + } +} + +/** + * Server-initiated remote actor. + */ +class RemoteActor2 extends Actor with Consumer { + def endpointUri = "jetty:http://localhost:6644/remote2" + + protected def receive = { + case msg: Message => reply(Message("hello %s" format msg.body, Map("sender" -> "remote2"))) + } +} + +class Producer1 extends Actor with Producer { + def endpointUri = "direct:welcome" + + override def oneway = false // default + override def async = true // default + + protected def receive = produce +} + +class Consumer1 extends Actor with Consumer with Logging { + def endpointUri = "file:data/input" + + def receive = { + case msg: Message => log.info("received %s" format msg.bodyAs(classOf[String])) + } +} + +@consume("jetty:http://0.0.0.0:8877/camel/test1") +class Consumer2 extends Actor { + def receive = { + case msg: Message => reply("Hello %s" format msg.bodyAs(classOf[String])) + } +} + +class Consumer3(transformer: Actor) extends Actor with Consumer { + def endpointUri = "jetty:http://0.0.0.0:8877/camel/welcome" + + def receive = { + case msg: Message => transformer.forward(msg.setBodyAs(classOf[String])) + } +} + +class Transformer(producer: Actor) extends Actor { + protected def receive = { + case msg: Message => producer.forward(msg.transformBody[String]("- %s -" format _)) + } +} + +class Subscriber(name:String, uri: String) extends Actor with Consumer { + def endpointUri = uri + + protected def receive = { + case msg: Message => log.info("%s received: %s" format (name, msg.body)) + } +} + +class Publisher(name: String, uri: String) extends Actor with Producer { + id = name + def endpointUri = uri + override def oneway = true + protected def receive = produce +} + +class PublisherBridge(uri: String, publisher: Actor) extends Actor with Consumer { + def endpointUri = uri + + protected def receive = { + case msg: Message => { + publisher ! msg.bodyAs(classOf[String]) + reply("message published") + } + } +} \ No newline at end of file diff --git a/akka-samples/akka-sample-camel/src/main/scala/Application1.scala b/akka-samples/akka-sample-camel/src/main/scala/Application1.scala new file mode 100644 index 0000000000..4a55f2014f --- /dev/null +++ b/akka-samples/akka-sample-camel/src/main/scala/Application1.scala @@ -0,0 +1,28 @@ +package sample.camel + +import se.scalablesolutions.akka.actor.Actor +import se.scalablesolutions.akka.camel.Message +import se.scalablesolutions.akka.remote.RemoteClient + +/** + * @author Martin Krasser + */ +object Application1 { + + // + // TODO: completion of example + // + + def main(args: Array[String]) { + implicit val sender: Option[Actor] = None + + val actor1 = new RemoteActor1 + val actor2 = RemoteClient.actorFor("remote2", "localhost", 7777) + + actor1.start + + println(actor1 !! Message("actor1")) + println(actor2 !! Message("actor2")) + } + +} \ No newline at end of file diff --git a/akka-samples/akka-sample-camel/src/main/scala/Application2.scala b/akka-samples/akka-sample-camel/src/main/scala/Application2.scala new file mode 100644 index 0000000000..83c6e8c439 --- /dev/null +++ b/akka-samples/akka-sample-camel/src/main/scala/Application2.scala @@ -0,0 +1,22 @@ +package sample.camel + +import se.scalablesolutions.akka.camel.service.CamelService +import se.scalablesolutions.akka.remote.RemoteNode + +/** + * @author Martin Krasser + */ +object Application2 { + + // + // TODO: completion of example + // + + def main(args: Array[String]) { + val camelService = CamelService.newInstance + camelService.load + RemoteNode.start("localhost", 7777) + RemoteNode.register("remote2", new RemoteActor2().start) + } + +} \ No newline at end of file diff --git a/akka-samples/akka-sample-camel/src/main/scala/Boot.scala b/akka-samples/akka-sample-camel/src/main/scala/Boot.scala new file mode 100644 index 0000000000..481804de64 --- /dev/null +++ b/akka-samples/akka-sample-camel/src/main/scala/Boot.scala @@ -0,0 +1,76 @@ +package sample.camel + +import org.apache.camel.{Exchange, Processor} +import org.apache.camel.builder.RouteBuilder +import org.apache.camel.impl.DefaultCamelContext +import org.apache.camel.spring.spi.ApplicationContextRegistry +import org.springframework.context.support.ClassPathXmlApplicationContext + +import se.scalablesolutions.akka.actor.SupervisorFactory +import se.scalablesolutions.akka.camel.CamelContextManager +import se.scalablesolutions.akka.config.ScalaConfig._ + +/** + * @author Martin Krasser + */ +class Boot { + + // Create CamelContext with Spring-based registry and custom route builder + + val context = new ClassPathXmlApplicationContext("/sample-camel-context.xml", getClass) + val registry = new ApplicationContextRegistry(context) + CamelContextManager.init(new DefaultCamelContext(registry)) + CamelContextManager.context.addRoutes(new CustomRouteBuilder) + + // Basic example + + val factory = SupervisorFactory( + SupervisorConfig( + RestartStrategy(OneForOne, 3, 100, List(classOf[Exception])), + Supervise(new Consumer1, LifeCycle(Permanent)) :: + Supervise(new Consumer2, LifeCycle(Permanent)) :: Nil)) + factory.newInstance.start + + // Routing example + + val producer = new Producer1 + val mediator = new Transformer(producer) + val consumer = new Consumer3(mediator) + + producer.start + mediator.start + consumer.start + + // Publish subscribe example + + // + // Cometd example is disabled because of unresolved sbt/ivy dependency resolution issues. + // If you want to run this example, make sure to replace all jetty-*-6.1.22.jar files + // on the classpath with corresponding jetty-*-6.1.11.jar files. + // + + //val cometdUri = "cometd://localhost:8111/test/abc?resourceBase=target" + //val cometdSubscriber = new Subscriber("cometd-subscriber", cometdUri).start + //val cometdPublisher = new Publisher("cometd-publisher", cometdUri).start + + val jmsUri = "jms:topic:test" + val jmsSubscriber1 = new Subscriber("jms-subscriber-1", jmsUri).start + val jmsSubscriber2 = new Subscriber("jms-subscriber-2", jmsUri).start + val jmsPublisher = new Publisher("jms-publisher", jmsUri).start + + //val cometdPublisherBridge = new PublisherBridge("jetty:http://0.0.0.0:8877/camel/pub/cometd", cometdPublisher).start + val jmsPublisherBridge = new PublisherBridge("jetty:http://0.0.0.0:8877/camel/pub/jms", jmsPublisher).start + +} + +class CustomRouteBuilder extends RouteBuilder { + def configure { + val actorUri = "actor:%s" format classOf[Consumer2].getName + from("jetty:http://0.0.0.0:8877/camel/test2").to(actorUri) + from("direct:welcome").process(new Processor() { + def process(exchange: Exchange) { + exchange.getOut.setBody("Welcome %s" format exchange.getIn.getBody) + } + }) + } +} \ No newline at end of file diff --git a/akka-samples/akka-sample-chat/README b/akka-samples/akka-sample-chat/README index d2049cd7c0..88720d8c55 100644 --- a/akka-samples/akka-sample-chat/README +++ b/akka-samples/akka-sample-chat/README @@ -10,19 +10,22 @@ For details on how to set up Redis server have a look at http://code.google.com/ Then to run the sample: -1. Set ‘AKKA_HOME’ environment variable to the root of the Akka distribution. -2. Open up a shell and step into the Akka distribution root folder. -3. Build Akka by invoking ‘mvn install -Dmaven.test.skip=true’. This will also build the sample application and deploy it to the ‘$AKKA_HOME/deploy’ directory. -4. Run the microkernel - export AKKA_HOME=... - cd $AKKA_HOME - java -jar ./dist/akka-0.6.jar -5. Now start up a new shell and go down into the ‘./akka-samples/akka-sample-chat’ directory. -6. Invoke ‘mvn scala:console -o’. This will give you a Scala REPL (interpreter) with the chat application and all its dependency JARs on the classpath. -7. Simply paste in the whole code block with the ‘Runner’ object above and invoke ‘Runner.run’. This runs a simulated client session that will connect to the running server in the microkernel. -8. Invoke ‘Runner.run’ again and again… +1. Install the Redis network storage. Download it from [http://code.google.com/p/redis/]. +2. Open up a shell and start up an instance of Redis. +3. Fire up two shells. For each of them: + - Step down into to the root of the Akka distribution. + - Set 'export AKKA_HOME=. + - Run 'sbt console' to start up a REPL (interpreter). +4. In the first REPL you get execute: + - scala> import se.scalablesolutions.akka.sample.chat._ + - scala> ChatService.start +5. In the first REPL you get execute: + - scala> import se.scalablesolutions.akka.sample.chat._ + - scala> Runner.run +6. See the chat simulation run. +7. Run it again to see full speed after first initialization. -Now you could test client reconnect by killing the running microkernel and start it up again. See the client reconnect take place in the REPL shell. +Now you could test client reconnect by killing the console running the ChatService and start it up again. See the client reconnect take place in the REPL shell. That’s it. Have fun. diff --git a/akka-samples/akka-sample-chat/pom.xml b/akka-samples/akka-sample-chat/pom.xml deleted file mode 100644 index 20ee421978..0000000000 --- a/akka-samples/akka-sample-chat/pom.xml +++ /dev/null @@ -1,38 +0,0 @@ - - 4.0.0 - - akka-sample-chat - Akka Chat Sample Module - - jar - - - akka-samples-parent - se.scalablesolutions.akka - 0.7-SNAPSHOT - - - - src/main/scala - - - maven-antrun-plugin - - - install - - - - - - - run - - - - - - - diff --git a/akka-samples/akka-sample-chat/src/main/scala/ChatServer.scala b/akka-samples/akka-sample-chat/src/main/scala/ChatServer.scala index f605fc7cba..4ff7e1e0c6 100644 --- a/akka-samples/akka-sample-chat/src/main/scala/ChatServer.scala +++ b/akka-samples/akka-sample-chat/src/main/scala/ChatServer.scala @@ -1,28 +1,48 @@ -/**ChatStorage +/** * Copyright (C) 2009-2010 Scalable Solutions AB . */ package se.scalablesolutions.akka.sample.chat +import scala.collection.mutable.HashMap + import se.scalablesolutions.akka.actor.{SupervisorFactory, Actor, RemoteActor} +import se.scalablesolutions.akka.remote.{RemoteNode, RemoteClient} +import se.scalablesolutions.akka.persistence.common.PersistentVector +import se.scalablesolutions.akka.persistence.redis.RedisStorage import se.scalablesolutions.akka.stm.Transaction._ -import se.scalablesolutions.akka.remote.RemoteServer -import se.scalablesolutions.akka.util.Logging import se.scalablesolutions.akka.config.ScalaConfig._ import se.scalablesolutions.akka.config.OneForOneStrategy -import scala.collection.mutable.HashMap -import se.scalablesolutions.akka.state.{PersistentVector, RedisStorage} +import se.scalablesolutions.akka.util.Logging /****************************************************************************** - To run the sample: - 1. Run 'mvn install' (builds and deploys jar to AKKA_HOME/deploy) - 2. In another shell run 'java -jar ./dist/akka-0.6.jar' to start up Akka microkernel - 3. In the first shell run 'mvn scala:console -o' - 4. In the REPL you get execute: +Akka Chat Client/Server Sample Application + +First we need to download, build and start up Redis: + +1. Download Redis from http://code.google.com/p/redis/downloads/list. +2. Step into the distribution. +3. Build: ‘make install’. +4. Run: ‘./redis-server’. +For details on how to set up Redis server have a look at http://code.google.com/p/redis/wiki/QuickStart. + +Then to run the sample: + +1. Fire up two shells. For each of them: + - Step down into to the root of the Akka distribution. + - Set 'export AKKA_HOME=. + - Run 'sbt console' to start up a REPL (interpreter). +2. In the first REPL you get execute: + - scala> import se.scalablesolutions.akka.sample.chat._ + - scala> ChatService.start +3. In the first REPL you get execute: - scala> import se.scalablesolutions.akka.sample.chat._ - scala> Runner.run - 5. See the chat simulation run - 6. Run it again to see full speed after first initialization +4. See the chat simulation run. +5. Run it again to see full speed after first initialization. + +That’s it. Have fun. + ******************************************************************************/ /** @@ -40,10 +60,12 @@ case class ChatMessage(from: String, message: String) extends Event */ class ChatClient(val name: String) { import Actor.Sender.Self - def login = ChatService ! Login(name) - def logout = ChatService ! Logout(name) - def post(message: String) = ChatService ! ChatMessage(name, name + ": " + message) - def chatLog: ChatLog = (ChatService !! GetChatLog(name)).getOrElse(throw new Exception("Couldn't get the chat log from ChatServer")) + val chat = RemoteClient.actorFor("chat:service", "localhost", 9999) + + def login = chat ! Login(name) + def logout = chat ! Logout(name) + def post(message: String) = chat ! ChatMessage(name, name + ": " + message) + def chatLog: ChatLog = (chat !! GetChatLog(name)).getOrElse(throw new Exception("Couldn't get the chat log from ChatServer")) } /** @@ -75,12 +97,9 @@ trait ChatStorage extends Actor */ class RedisChatStorage extends ChatStorage { lifeCycle = Some(LifeCycle(Permanent)) - - private var chatLog: PersistentVector[Array[Byte]] = _ - - override def initTransactionalState = chatLog = RedisStorage.getVector("akka.chat.log") - - chatLog = RedisStorage.getVector("akka.chat.log") + val CHAT_LOG = "akka.chat.log" + + private var chatLog = atomic { RedisStorage.getVector(CHAT_LOG) } log.info("Redis-based chat storage is starting up...") @@ -98,7 +117,7 @@ class RedisChatStorage extends ChatStorage { reply(ChatLog(messageList)) } - override def postRestart(reason: Throwable) = chatLog = RedisStorage.getVector("akka.chat.log") + override def postRestart(reason: Throwable) = chatLog = RedisStorage.getVector(CHAT_LOG) } /** @@ -184,16 +203,19 @@ object ChatService extends ChatServer with SessionManagement with ChatManagement with - RedisChatStorageFactory + RedisChatStorageFactory { + override def start: Actor = { + super.start + RemoteNode.start("localhost", 9999) + RemoteNode.register("chat:service", this) + this + } +} /** * Test runner emulating a chat session. */ object Runner { - // create a handle to the remote ChatService - ChatService.makeRemote("localhost", 9999) - ChatService.start - def run = { val client = new ChatClient("jonas") diff --git a/akka-samples/akka-sample-lift/config/akka.conf b/akka-samples/akka-sample-lift/config/akka.conf deleted file mode 100644 index 4a02b208bb..0000000000 --- a/akka-samples/akka-sample-lift/config/akka.conf +++ /dev/null @@ -1,64 +0,0 @@ -##################### -# Akka Config File # -################### - -# This file has all the default settings, so all these could be removed with no visible effect. -# Modify as needed. - - - filename = "./logs/akka.log" - roll = "daily" # Options: never, hourly, daily, sunday/monday/... - level = "debug" # Options: fatal, critical, error, warning, info, debug, trace - console = on - # syslog_host = "" - # syslog_server_name = "" - - - - version = "0.7-SNAPSHOT" - - - timeout = 5000 # default timeout for future based invocations - concurrent-mode = off # if turned on, then the same actor instance is allowed to execute concurrently - - # e.g. departing from the actor model for better performance - serialize-messages = on # does a deep clone of (non-primitive) messages to ensure immutability - - - - service = on - restart-on-collision = off # (not implemented yet) if 'on' then it reschedules the transaction, - # if 'off' then throws an exception or rollback for user to handle - wait-for-completion = 100 # how long time in millis a transaction should be given time to complete when a collision is detected - wait-nr-of-times = 3 # the number of times it should check for completion of a pending transaction upon collision - distributed = off # not implemented yet - - - - service = on - hostname = "localhost" - port = 9999 - connection-timeout = 1000 # in millis - - - - service = on - hostname = "localhost" - port = 9998 - - - - system = "cassandra" # Options: cassandra (coming: terracotta, redis, tokyo-cabinet, tokyo-tyrant, voldemort, memcached, hazelcast) - - - service = on - storage-format = "java" # Options: java, scala-json, java-json - blocking = false # inserts and queries should be blocking or not - - - service = on - pidfile = "akka.pid" - - - - - diff --git a/akka-samples/akka-sample-lift/pom.xml b/akka-samples/akka-sample-lift/pom.xml deleted file mode 100644 index b7642bff06..0000000000 --- a/akka-samples/akka-sample-lift/pom.xml +++ /dev/null @@ -1,46 +0,0 @@ - - - 4.0.0 - - akka-sample-lift - Akka Lift Sample Module - - war - - - akka-samples-parent - se.scalablesolutions.akka - 0.7-SNAPSHOT - - - - - net.liftweb - lift-util - ${lift.version} - - - net.liftweb - lift-webkit - ${lift.version} - - - javax.servlet - servlet-api - 2.5 - provided - - - junit - junit - 4.5 - test - - - org.mortbay.jetty - jetty - [6.1.6,) - test - - - diff --git a/akka-samples/akka-sample-lift/src/main/scala/akka/SimpleService.scala b/akka-samples/akka-sample-lift/src/main/scala/akka/SimpleService.scala index 3f18f7d357..35a4158642 100644 --- a/akka-samples/akka-sample-lift/src/main/scala/akka/SimpleService.scala +++ b/akka-samples/akka-sample-lift/src/main/scala/akka/SimpleService.scala @@ -2,7 +2,8 @@ package sample.lift import se.scalablesolutions.akka.actor.{Transactor, Actor} import se.scalablesolutions.akka.config.ScalaConfig._ -import se.scalablesolutions.akka.state.{CassandraStorage, TransactionalState} +import se.scalablesolutions.akka.stm.TransactionalState +import se.scalablesolutions.akka.persistence.cassandra.CassandraStorage import java.lang.Integer import javax.ws.rs.{GET, Path, Produces} diff --git a/akka-samples/akka-sample-rest-java/pom.xml b/akka-samples/akka-sample-rest-java/pom.xml deleted file mode 100644 index 6539a0234b..0000000000 --- a/akka-samples/akka-sample-rest-java/pom.xml +++ /dev/null @@ -1,49 +0,0 @@ - - 4.0.0 - - akka-sample-rest-java - Akka REST Java Sample Module - - jar - - - akka-samples-parent - se.scalablesolutions.akka - 0.7-SNAPSHOT - - - - src/main/java - - - org.apache.maven.plugins - maven-compiler-plugin - - 1.5 - 1.5 - - **/* - - - - - maven-antrun-plugin - - - install - - - - - - - run - - - - - - - diff --git a/akka-samples/akka-sample-rest-java/src/main/java/sample/java/PersistentSimpleService.java b/akka-samples/akka-sample-rest-java/src/main/java/sample/java/PersistentSimpleService.java index 9a0a38f619..221b5613b8 100644 --- a/akka-samples/akka-sample-rest-java/src/main/java/sample/java/PersistentSimpleService.java +++ b/akka-samples/akka-sample-rest-java/src/main/java/sample/java/PersistentSimpleService.java @@ -8,11 +8,11 @@ import javax.ws.rs.Path; import javax.ws.rs.GET; import javax.ws.rs.Produces; -import se.scalablesolutions.akka.annotation.transactionrequired; -import se.scalablesolutions.akka.annotation.prerestart; -import se.scalablesolutions.akka.annotation.postrestart; -import se.scalablesolutions.akka.state.PersistentMap; -import se.scalablesolutions.akka.state.CassandraStorage; +import se.scalablesolutions.akka.actor.annotation.transactionrequired; +import se.scalablesolutions.akka.actor.annotation.prerestart; +import se.scalablesolutions.akka.actor.annotation.postrestart; +import se.scalablesolutions.akka.persistence.common.PersistentMap; +import se.scalablesolutions.akka.persistence.cassandra.CassandraStorage; import java.nio.ByteBuffer; diff --git a/akka-samples/akka-sample-rest-java/src/main/java/sample/java/SimpleService.java b/akka-samples/akka-sample-rest-java/src/main/java/sample/java/SimpleService.java index 54468495bc..b10bcdaea4 100644 --- a/akka-samples/akka-sample-rest-java/src/main/java/sample/java/SimpleService.java +++ b/akka-samples/akka-sample-rest-java/src/main/java/sample/java/SimpleService.java @@ -8,11 +8,11 @@ import javax.ws.rs.Path; import javax.ws.rs.GET; import javax.ws.rs.Produces; -import se.scalablesolutions.akka.annotation.transactionrequired; -import se.scalablesolutions.akka.annotation.prerestart; -import se.scalablesolutions.akka.annotation.postrestart; -import se.scalablesolutions.akka.state.TransactionalState; -import se.scalablesolutions.akka.state.TransactionalMap; +import se.scalablesolutions.akka.actor.annotation.transactionrequired; +import se.scalablesolutions.akka.actor.annotation.prerestart; +import se.scalablesolutions.akka.actor.annotation.postrestart; +import se.scalablesolutions.akka.stm.TransactionalState; +import se.scalablesolutions.akka.stm.TransactionalMap; /** * Try service out by invoking (multiple times): diff --git a/akka-samples/akka-sample-rest-scala/pom.xml b/akka-samples/akka-sample-rest-scala/pom.xml deleted file mode 100644 index e62a329f8c..0000000000 --- a/akka-samples/akka-sample-rest-scala/pom.xml +++ /dev/null @@ -1,46 +0,0 @@ - - 4.0.0 - - akka-sample-rest-scala - Akka REST Scala Sample Module - - jar - - - akka-samples-parent - se.scalablesolutions.akka - 0.7-SNAPSHOT - - - - - javax.ws.rs - jsr311-api - 1.0 - - - - - src/main/scala - - - maven-antrun-plugin - - - install - - - - - - - run - - - - - - - diff --git a/akka-samples/akka-sample-rest-scala/src/main/scala/SimpleService.scala b/akka-samples/akka-sample-rest-scala/src/main/scala/SimpleService.scala index 1600078636..9422283baf 100644 --- a/akka-samples/akka-sample-rest-scala/src/main/scala/SimpleService.scala +++ b/akka-samples/akka-sample-rest-scala/src/main/scala/SimpleService.scala @@ -5,10 +5,11 @@ package sample.scala import se.scalablesolutions.akka.actor.{Transactor, SupervisorFactory, Actor} -import se.scalablesolutions.akka.state.{CassandraStorage, TransactionalState} +import se.scalablesolutions.akka.stm.TransactionalState +import se.scalablesolutions.akka.persistence.cassandra.CassandraStorage import se.scalablesolutions.akka.config.ScalaConfig._ import se.scalablesolutions.akka.util.Logging -import se.scalablesolutions.akka.comet.{AkkaClusterBroadcastFilter} +import se.scalablesolutions.akka.comet.AkkaClusterBroadcastFilter import java.lang.Integer import java.nio.ByteBuffer diff --git a/akka-samples/akka-sample-security/pom.xml b/akka-samples/akka-sample-security/pom.xml deleted file mode 100644 index 86f331fd65..0000000000 --- a/akka-samples/akka-sample-security/pom.xml +++ /dev/null @@ -1,52 +0,0 @@ - - 4.0.0 - - akka-sample-security - Akka Sample Security Module - - jar - - - akka-samples-parent - se.scalablesolutions.akka - 0.7-SNAPSHOT - - - - - javax.ws.rs - jsr311-api - 1.0 - - - javax.annotation - jsr250-api - 1.0 - - - - - - src/main/scala - - - maven-antrun-plugin - - - install - - - - - - - run - - - - - - - diff --git a/akka-samples/akka-sample-security/src/main/resources/akka.conf b/akka-samples/akka-sample-security/src/main/resources/akka.conf deleted file mode 100644 index 60f68a64ec..0000000000 --- a/akka-samples/akka-sample-security/src/main/resources/akka.conf +++ /dev/null @@ -1,35 +0,0 @@ -#################### -# Akka Config File # -#################### - -# This file has all the default settings, so all these could be removed with no visible effect. -# Modify as needed. - - - version = "0.7-SNAPSHOT" - - boot = ["se.scalablesolutions.akka.security.samples.Boot"] # FQN to the class doing initial active object/actor - # supervisor bootstrap, should be defined in default constructor - - - filters = "se.scalablesolutions.akka.security.AkkaSecurityFilterFactory" - - # only one authenticator can be enabled for the security filter factory - authenticator = "se.scalablesolutions.akka.security.samples.BasicAuthenticationService" -# authenticator = "se.scalablesolutions.akka.security.samples.DigestAuthenticationService" -# authenticator = "se.scalablesolutions.akka.security.samples.SpnegoAuthenticationService" - -# -# -# servicePrincipal = "HTTP/localhost@EXAMPLE.COM" -# keyTabLocation = "URL to keytab" -# kerberosDebug = "true" -# realm = "EXAMPLE.COM" -# - - # service = on - # hostname = "localhost" - # port = 9998 - - - diff --git a/akka-samples/akka-sample-security/src/main/scala/SimpleService.scala b/akka-samples/akka-sample-security/src/main/scala/SimpleService.scala index 310585dc29..b6183cfda9 100644 --- a/akka-samples/akka-sample-security/src/main/scala/SimpleService.scala +++ b/akka-samples/akka-sample-security/src/main/scala/SimpleService.scala @@ -7,8 +7,8 @@ package se.scalablesolutions.akka.security.samples import se.scalablesolutions.akka.actor.{SupervisorFactory, Actor} import se.scalablesolutions.akka.config.ScalaConfig._ import se.scalablesolutions.akka.util.Logging -import se.scalablesolutions.akka.security._ -import se.scalablesolutions.akka.state.TransactionalState +import se.scalablesolutions.akka.security.{DigestAuthenticationActor, UserInfo} +import se.scalablesolutions.akka.stm.TransactionalState class Boot { val factory = SupervisorFactory( diff --git a/akka-samples/pom.xml b/akka-samples/pom.xml deleted file mode 100644 index 7738effe56..0000000000 --- a/akka-samples/pom.xml +++ /dev/null @@ -1,56 +0,0 @@ - - 4.0.0 - - akka-samples-parent - Akka Sample Modules - - pom - - - akka - se.scalablesolutions.akka - 0.7-SNAPSHOT - - - - - akka-sample-lift - akka-sample-security - akka-sample-rest-scala - akka-sample-rest-java - - - - - akka-core - ${project.groupId} - ${project.version} - - - akka-persistence-cassandra - ${project.groupId} - ${project.version} - - - akka-persistence-redis - ${project.groupId} - ${project.version} - - - akka-rest - ${project.groupId} - ${project.version} - - - akka-comet - ${project.groupId} - ${project.version} - - - akka-security - ${project.groupId} - ${project.version} - - - diff --git a/akka-security/pom.xml b/akka-security/pom.xml deleted file mode 100644 index 036eeecea0..0000000000 --- a/akka-security/pom.xml +++ /dev/null @@ -1,68 +0,0 @@ - - 4.0.0 - - akka-security - Akka Security Module - - jar - - - akka - se.scalablesolutions.akka - 0.7-SNAPSHOT - - - - - akka-core - ${project.groupId} - ${project.version} - - - javax.annotation - jsr250-api - 1.0 - - - com.sun.jersey - jersey-server - ${jersey.version} - - - javax.ws.rs - jsr311-api - 1.0 - - - javax.mail - mail - 1.4.3-rc1 - - - net.liftweb - lift-util - ${lift.version} - - - - - org.scalatest - scalatest - ${scalatest.version} - test - - - junit - junit - 4.5 - test - - - org.mockito - mockito-all - 1.8.0 - test - - - diff --git a/akka-security/src/main/scala/Security.scala b/akka-security/src/main/scala/Security.scala index f6f2b939a1..8a144f4282 100644 --- a/akka-security/src/main/scala/Security.scala +++ b/akka-security/src/main/scala/Security.scala @@ -22,20 +22,21 @@ package se.scalablesolutions.akka.security -import _root_.se.scalablesolutions.akka.actor.{Scheduler, Actor, ActorRegistry} -import _root_.se.scalablesolutions.akka.util.Logging -import _root_.se.scalablesolutions.akka.Config +import se.scalablesolutions.akka.actor.{Scheduler, Actor, ActorRegistry} +import se.scalablesolutions.akka.util.Logging +import se.scalablesolutions.akka.config.Config -import _root_.com.sun.jersey.api.model.AbstractMethod -import _root_.com.sun.jersey.spi.container.{ResourceFilterFactory, ContainerRequest, ContainerRequestFilter, ContainerResponse, ContainerResponseFilter, ResourceFilter} -import _root_.com.sun.jersey.core.util.Base64 -import _root_.javax.ws.rs.core.{SecurityContext, Context, Response} -import _root_.javax.ws.rs.WebApplicationException -import _root_.javax.annotation.security.{DenyAll, PermitAll, RolesAllowed} -import _root_.java.security.Principal -import _root_.java.util.concurrent.TimeUnit +import com.sun.jersey.api.model.AbstractMethod +import com.sun.jersey.spi.container.{ResourceFilterFactory, ContainerRequest, ContainerRequestFilter, ContainerResponse, ContainerResponseFilter, ResourceFilter} +import com.sun.jersey.core.util.Base64 -import _root_.net.liftweb.util.{SecurityHelpers, StringHelpers, IoHelpers} +import javax.ws.rs.core.{SecurityContext, Context, Response} +import javax.ws.rs.WebApplicationException +import javax.annotation.security.{DenyAll, PermitAll, RolesAllowed} +import java.security.Principal +import java.util.concurrent.TimeUnit + +import net.liftweb.util.{SecurityHelpers, StringHelpers, IoHelpers} object Enc extends SecurityHelpers with StringHelpers with IoHelpers @@ -86,10 +87,11 @@ class AkkaSecurityFilterFactory extends ResourceFilterFactory with Logging { override def filter(request: ContainerRequest): ContainerRequest = rolesAllowed match { case Some(roles) => { - (authenticator !! (Authenticate(request, roles), 10000)).get.asInstanceOf[AnyRef] match { - case OK => request - case r if r.isInstanceOf[Response] => + (authenticator.!![AnyRef](Authenticate(request, roles), 10000)) match { + case Some(OK) => request + case Some(r) if r.isInstanceOf[Response] => throw new WebApplicationException(r.asInstanceOf[Response]) + case None => throw new WebApplicationException(408) case x => { log.error("Authenticator replied with unexpected result [%s]", x); throw new WebApplicationException(Response.Status.INTERNAL_SERVER_ERROR) @@ -329,19 +331,19 @@ trait DigestAuthenticationActor extends AuthenticationActor[DigestCredentials] { def noncePurgeInterval = 2 * 60 * 1000 //ms } -import _root_.java.security.Principal -import _root_.java.security.PrivilegedActionException -import _root_.java.security.PrivilegedExceptionAction +import java.security.Principal +import java.security.PrivilegedActionException +import java.security.PrivilegedExceptionAction -import _root_.javax.security.auth.login.AppConfigurationEntry -import _root_.javax.security.auth.login.Configuration -import _root_.javax.security.auth.login.LoginContext -import _root_.javax.security.auth.Subject -import _root_.javax.security.auth.kerberos.KerberosPrincipal +import javax.security.auth.login.AppConfigurationEntry +import javax.security.auth.login.Configuration +import javax.security.auth.login.LoginContext +import javax.security.auth.Subject +import javax.security.auth.kerberos.KerberosPrincipal -import _root_.org.ietf.jgss.GSSContext -import _root_.org.ietf.jgss.GSSCredential -import _root_.org.ietf.jgss.GSSManager +import org.ietf.jgss.GSSContext +import org.ietf.jgss.GSSCredential +import org.ietf.jgss.GSSManager trait SpnegoAuthenticationActor extends AuthenticationActor[SpnegoCredentials] { override def unauthorized = @@ -349,7 +351,7 @@ trait SpnegoAuthenticationActor extends AuthenticationActor[SpnegoCredentials] { // for some reason the jersey Base64 class does not work with kerberos // but the commons Base64 does - import _root_.org.apache.commons.codec.binary.Base64 + import org.apache.commons.codec.binary.Base64 override def extractCredentials(r: Req): Option[SpnegoCredentials] = { val AuthHeader = """Negotiate\s(.*)""".r diff --git a/akka-security/src/test/scala/SecuritySpec.scala b/akka-security/src/test/scala/SecuritySpec.scala index 7b669b407a..6e88335d22 100644 --- a/akka-security/src/test/scala/SecuritySpec.scala +++ b/akka-security/src/test/scala/SecuritySpec.scala @@ -14,9 +14,9 @@ import org.mockito.Mockito._ import org.mockito.Matchers._ import org.junit.{Before, After, Test} -import _root_.javax.ws.rs.core.{SecurityContext, Context, Response} -import _root_.com.sun.jersey.spi.container.{ResourceFilterFactory, ContainerRequest, ContainerRequestFilter, ContainerResponse, ContainerResponseFilter, ResourceFilter} -import _root_.com.sun.jersey.core.util.Base64 +import javax.ws.rs.core.{SecurityContext, Context, Response} +import com.sun.jersey.spi.container.{ResourceFilterFactory, ContainerRequest, ContainerRequestFilter, ContainerResponse, ContainerResponseFilter, ResourceFilter} +import com.sun.jersey.core.util.Base64 class BasicAuthenticatorSpec extends junit.framework.TestCase with Suite with MockitoSugar with MustMatchers { diff --git a/akka-spring/akka-spring-test-java/pom.xml b/akka-spring/akka-spring-test-java/pom.xml new file mode 100644 index 0000000000..e6eac986bc --- /dev/null +++ b/akka-spring/akka-spring-test-java/pom.xml @@ -0,0 +1,326 @@ + + 4.0.0 + + Akka Spring Tests in Java + akka-spring-test-java + se.scalablesolutions.akka + 0.7 + jar + + + 2.7.7 + 0.5.2 + 1.1.5 + 1.9.18-i + + + + + project.embedded.module + Project Embedded Repository + file://${env.AKKA_HOME}/embedded-repo + + + repo1.maven + Maven Main Repository + http://repo1.maven.org/maven2 + + + scala-tools-snapshots + Scala-Tools Maven2 Snapshot Repository + http://scala-tools.org/repo-snapshots + + + scala-tools + Scala-Tools Maven2 Repository + http://scala-tools.org/repo-releases + + + lag + Configgy's' Repository + http://www.lag.net/repo + + + multiverse-releases + http://multiverse.googlecode.com/svn/maven-repository/releases + + false + + + + multiverse-snaphosts + http://multiverse.googlecode.com/svn/maven-repository/snapshots + + + maven2-repository.dev.java.net + Java.net Repository for Maven + http://download.java.net/maven/2 + + + java.net + Java.net Legacy Repository for Maven + http://download.java.net/maven/1 + legacy + + + guiceyfruit.release + GuiceyFruit Release Repository + http://guiceyfruit.googlecode.com/svn/repo/releases/ + + false + + + true + + + + guiceyfruit.snapshot + GuiceyFruit Snapshot Repository + http://guiceyfruit.googlecode.com/svn/repo/snapshots/ + + true + + + false + + + + guice-maven + guice maven + http://guice-maven.googlecode.com/svn/trunk + + + google-maven-repository + Google Maven Repository + http://google-maven-repository.googlecode.com/svn/repository/ + + + repository.codehaus.org + Codehaus Maven Repository + http://repository.codehaus.org + + true + + + + repository.jboss.org + JBoss Repository for Maven + http://repository.jboss.org/maven2 + + false + + + + nexus.griddynamics.net + Grid Dynamics Maven Repository + https://nexus.griddynamics.net/nexus/content/groups/public + + false + + + + databinder.net/repo/ + dbDispatch Repository for Maven + http://databinder.net/repo + + false + + + + + + + + se.scalablesolutions.akka + akka-core_2.7.7 + 0.7 + + + se.scalablesolutions.akka + akka-util_2.7.7 + 0.7 + + + se.scalablesolutions.akka + akka-util-java_2.7.7 + 0.7 + + + se.scalablesolutions.akka + akka-spring_2.7.7 + 0.7 + + + org.springframework + spring + + + + + + + org.springframework + spring-beans + 3.0.1.RELEASE + + + org.springframework + spring-context + 3.0.1.RELEASE + + + net.lag + configgy + 1.4.7 + + + org.codehaus.aspectwerkz + aspectwerkz-nodeps-jdk5 + 2.1 + + + org.codehaus.aspectwerkz + aspectwerkz-jdk5 + 2.1 + + + org.guiceyfruit + guice-core + 2.0-beta-4 + + + com.google.protobuf + protobuf-java + 2.2.0 + + + com.google.protobuf + protobuf-java + 2.2.0 + + + org.multiverse + multiverse-alpha + 0.4-SNAPSHOT + + + commons-io + commons-io + 1.4 + + + org.jboss.netty + netty + 3.2.0.BETA1 + + + net.databinder + dispatch-json_2.7.7 + 0.6.4 + + + net.databinder + dispatch-http_2.7.7 + 0.6.4 + + + sjson.json + sjson + 0.4 + + + + sbinary + sbinary + 0.3 + + + org.codehaus.jackson + jackson-mapper-asl + 1.2.1 + + + org.codehaus.jackson + jackson-core-asl + 1.2.1 + + + voldemort.store.compress + h2-lzf + 1.0 + + + org.scala-tools + javautils + 2.7.4-0.1 + + + org.scala-lang + scala-library + 2.7.7 + + + org.scala-lang + scala-library + 2.7.7 + + + + + junit + junit + 4.5 + test + + + + + src/main/java + src/test/java + + + + org.apache.maven.plugins + maven-compiler-plugin + + 1.5 + 1.5 + + **/* + + + + + org.apache.maven.plugins + maven-surefire-plugin + + + **/*Persistent* + + + + + + + false + src/test/resources + + + false + src/main/resources + + + false + src/test/java + + ** + + + **/*.java + + + + + diff --git a/akka-spring/akka-spring-test-java/src/main/java/se/scalablesolutions/akka/spring/foo/Bar.java b/akka-spring/akka-spring-test-java/src/main/java/se/scalablesolutions/akka/spring/foo/Bar.java new file mode 100644 index 0000000000..7e21aaea8f --- /dev/null +++ b/akka-spring/akka-spring-test-java/src/main/java/se/scalablesolutions/akka/spring/foo/Bar.java @@ -0,0 +1,10 @@ +package se.scalablesolutions.akka.spring.foo; + +public class Bar implements IBar { + + @Override + public String getBar() { + return "bar"; + } + +} diff --git a/akka-spring/akka-spring-test-java/src/main/java/se/scalablesolutions/akka/spring/foo/Foo.java b/akka-spring/akka-spring-test-java/src/main/java/se/scalablesolutions/akka/spring/foo/Foo.java new file mode 100644 index 0000000000..36536cdb5d --- /dev/null +++ b/akka-spring/akka-spring-test-java/src/main/java/se/scalablesolutions/akka/spring/foo/Foo.java @@ -0,0 +1,9 @@ +package se.scalablesolutions.akka.spring.foo; + +public class Foo { + + public String foo() { + return "foo"; + } + +} diff --git a/akka-spring/akka-spring-test-java/src/main/java/se/scalablesolutions/akka/spring/foo/IBar.java b/akka-spring/akka-spring-test-java/src/main/java/se/scalablesolutions/akka/spring/foo/IBar.java new file mode 100644 index 0000000000..d462d0b09b --- /dev/null +++ b/akka-spring/akka-spring-test-java/src/main/java/se/scalablesolutions/akka/spring/foo/IBar.java @@ -0,0 +1,7 @@ +package se.scalablesolutions.akka.spring.foo; + +public interface IBar { + + String getBar(); + +} diff --git a/akka-spring/akka-spring-test-java/src/main/java/se/scalablesolutions/akka/spring/foo/MyPojo.java b/akka-spring/akka-spring-test-java/src/main/java/se/scalablesolutions/akka/spring/foo/MyPojo.java new file mode 100644 index 0000000000..1269f43f62 --- /dev/null +++ b/akka-spring/akka-spring-test-java/src/main/java/se/scalablesolutions/akka/spring/foo/MyPojo.java @@ -0,0 +1,40 @@ +package se.scalablesolutions.akka.spring.foo; + +public class MyPojo { + + private String foo; + private String bar; + + + public MyPojo() { + this.foo = "foo"; + this.bar = "bar"; + } + + + public String getFoo() { + return foo; + } + + + public String getBar() { + return bar; + } + + public void preRestart() { + System.out.println("pre restart"); + } + + public void postRestart() { + System.out.println("post restart"); + } + + public String longRunning() { + try { + Thread.sleep(6000); + } catch (InterruptedException e) { + } + return "this took long"; + } + +} diff --git a/akka-spring/akka-spring-test-java/src/main/java/se/scalablesolutions/akka/spring/foo/StatefulPojo.java b/akka-spring/akka-spring-test-java/src/main/java/se/scalablesolutions/akka/spring/foo/StatefulPojo.java new file mode 100644 index 0000000000..7db06c9a65 --- /dev/null +++ b/akka-spring/akka-spring-test-java/src/main/java/se/scalablesolutions/akka/spring/foo/StatefulPojo.java @@ -0,0 +1,47 @@ +package se.scalablesolutions.akka.spring.foo; + +import se.scalablesolutions.akka.stm.TransactionalMap; +import se.scalablesolutions.akka.stm.TransactionalVector; +import se.scalablesolutions.akka.stm.TransactionalRef; +import se.scalablesolutions.akka.stm.TransactionalState; + +public class StatefulPojo { + private TransactionalMap mapState; + private TransactionalVector vectorState; + private TransactionalRef refState; + private boolean isInitialized = false; + + public void init() { + if (!isInitialized) { + mapState = TransactionalState.newMap(); + vectorState = TransactionalState.newVector(); + refState = TransactionalState.newRef(); + isInitialized = true; + } + } + + public String getMapState(String key) { + return (String)mapState.get(key).get(); + } + + public String getVectorState() { + return (String)vectorState.last(); + } + + public String getRefState() { + return (String)refState.get().get(); + } + + public void setMapState(String key, String msg) { + mapState.put(key, msg); + } + + public void setVectorState(String msg) { + vectorState.add(msg); + } + + public void setRefState(String msg) { + refState.swap(msg); + } + +} diff --git a/akka-spring/akka-spring-test-java/src/main/resources/se/scalablesolutions/akka/spring/foo/test-config.xml b/akka-spring/akka-spring-test-java/src/main/resources/se/scalablesolutions/akka/spring/foo/test-config.xml new file mode 100644 index 0000000000..0b35ade59f --- /dev/null +++ b/akka-spring/akka-spring-test-java/src/main/resources/se/scalablesolutions/akka/spring/foo/test-config.xml @@ -0,0 +1,76 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + java.io.IOException + java.lang.NullPointerException + + + + + + + + + + + + + + + java.lang.Exception + + + + + + + + \ No newline at end of file diff --git a/akka-spring/akka-spring-test-java/src/test/java/se/scalablesolutions/akka/spring/SpringConfigurationTest.java b/akka-spring/akka-spring-test-java/src/test/java/se/scalablesolutions/akka/spring/SpringConfigurationTest.java new file mode 100644 index 0000000000..cd5490d1d4 --- /dev/null +++ b/akka-spring/akka-spring-test-java/src/test/java/se/scalablesolutions/akka/spring/SpringConfigurationTest.java @@ -0,0 +1,120 @@ +package se.scalablesolutions.akka.spring; + +import static org.junit.Assert.*; + +import org.junit.Before; +import org.junit.Test; + +import org.springframework.beans.factory.support.DefaultListableBeanFactory; +import org.springframework.beans.factory.xml.XmlBeanDefinitionReader; +import org.springframework.context.ApplicationContext; +import org.springframework.context.support.ClassPathXmlApplicationContext; +import org.springframework.core.io.ClassPathResource; +import org.springframework.core.io.Resource; + +import se.scalablesolutions.akka.config.ActiveObjectConfigurator; +import se.scalablesolutions.akka.dispatch.FutureTimeoutException; +import se.scalablesolutions.akka.config.Config; +import se.scalablesolutions.akka.remote.RemoteNode; +import se.scalablesolutions.akka.spring.foo.Foo; +import se.scalablesolutions.akka.spring.foo.IBar; +import se.scalablesolutions.akka.spring.foo.MyPojo; +import se.scalablesolutions.akka.spring.foo.StatefulPojo; + +/** + * Tests for spring configuration of active objects and supervisor configuration. + */ +public class SpringConfigurationTest { + + private ApplicationContext context = null; + + @Before + public void setUp() { + context = new ClassPathXmlApplicationContext("se/scalablesolutions/akka/spring/foo/test-config.xml"); + } + + /** + * Tests that the <akka:active-object/> and <akka:supervision/> element + * can be used as a top level element. + */ + @Test + public void testParse() throws Exception { + final Resource CONTEXT = new ClassPathResource("se/scalablesolutions/akka/spring/foo/test-config.xml"); + DefaultListableBeanFactory beanFactory = new DefaultListableBeanFactory(); + XmlBeanDefinitionReader reader = new XmlBeanDefinitionReader(beanFactory); + reader.loadBeanDefinitions(CONTEXT); + assertTrue(beanFactory.containsBeanDefinition("simple-active-object")); + assertTrue(beanFactory.containsBeanDefinition("remote-active-object")); + assertTrue(beanFactory.containsBeanDefinition("supervision1")); + } + + @Test + public void testSimpleActiveObject() { + MyPojo myPojo = (MyPojo) context.getBean("simple-active-object"); + String msg = myPojo.getFoo(); + msg += myPojo.getBar(); + assertEquals("wrong invocation order", "foobar", msg); + } + + @Test(expected=FutureTimeoutException.class) + public void testSimpleActiveObject_Timeout() { + MyPojo myPojo = (MyPojo) context.getBean("simple-active-object"); + myPojo.longRunning(); + } + + @Test + public void testSimpleActiveObject_NoTimeout() { + MyPojo myPojo = (MyPojo) context.getBean("simple-active-object-long-timeout"); + String msg = myPojo.longRunning(); + assertEquals("this took long", msg); + } + + @Test + public void testTransactionalActiveObject() { + MyPojo myPojo = (MyPojo) context.getBean("transactional-active-object"); + String msg = myPojo.getFoo(); + msg += myPojo.getBar(); + assertEquals("wrong invocation order", "foobar", msg); + } + + @Test + public void testRemoteActiveObject() { + new Thread(new Runnable() { + public void run() { + RemoteNode.start(); + } + }).start(); + try { Thread.currentThread().sleep(1000); } catch (Exception e) {} + Config.config(); + + MyPojo myPojo = (MyPojo) context.getBean("remote-active-object"); + assertEquals("foo", myPojo.getFoo()); + } + + @Test + public void testSupervision() { + // get ActiveObjectConfigurator bean from spring context + ActiveObjectConfigurator myConfigurator = (ActiveObjectConfigurator) context.getBean("supervision1"); + // get ActiveObjects + Foo foo = myConfigurator.getInstance(Foo.class); + assertNotNull(foo); + IBar bar = myConfigurator.getInstance(IBar.class); + assertNotNull(bar); + MyPojo pojo = myConfigurator.getInstance(MyPojo.class); + assertNotNull(pojo); + } + + @Test + public void testTransactionalState() { + ActiveObjectConfigurator conf = (ActiveObjectConfigurator) context.getBean("supervision2"); + StatefulPojo stateful = conf.getInstance(StatefulPojo.class); + stateful.init(); + stateful.setMapState("testTransactionalState", "some map state"); + stateful.setVectorState("some vector state"); + stateful.setRefState("some ref state"); + assertEquals("some map state", stateful.getMapState("testTransactionalState")); + assertEquals("some vector state", stateful.getVectorState()); + assertEquals("some ref state", stateful.getRefState()); + } + +} diff --git a/akka-spring/src/main/resources/META-INF/spring.handlers b/akka-spring/src/main/resources/META-INF/spring.handlers new file mode 100644 index 0000000000..c8d9dc55ae --- /dev/null +++ b/akka-spring/src/main/resources/META-INF/spring.handlers @@ -0,0 +1 @@ +http\://www.akkasource.org/schema/akka=se.scalablesolutions.akka.spring.AkkaNamespaceHandler \ No newline at end of file diff --git a/akka-spring/src/main/resources/META-INF/spring.schemas b/akka-spring/src/main/resources/META-INF/spring.schemas new file mode 100644 index 0000000000..d04d65566a --- /dev/null +++ b/akka-spring/src/main/resources/META-INF/spring.schemas @@ -0,0 +1 @@ +http\://www.akkasource.org/schema/akka=se/scalablesolutions/akka/spring/akka.xsd diff --git a/akka-spring/src/main/resources/se/scalablesolutions/akka/spring/akka.xsd b/akka-spring/src/main/resources/se/scalablesolutions/akka/spring/akka.xsd new file mode 100644 index 0000000000..134e53e82f --- /dev/null +++ b/akka-spring/src/main/resources/se/scalablesolutions/akka/spring/akka.xsd @@ -0,0 +1,163 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + Name of the remote host. + + + + + + + Port of the remote host. + + + + + + + + + + + Pre restart callback method that is called during restart. + + + + + + + Post restart callback method that is called during restart. + + + + + + + + + + + + + + + + Name of the target class. + + + + + + + default timeout for '!!' invocations + + + + + + + Set to true if messages should have REQUIRES_NEW semantics + + + + + + + Interface implemented by target class. + + + + + + + Lifecycle, permanent or temporary + + + + + + + + + + + + + + + + + + + + + + + + + + + + Failover scheme, AllForOne or OneForOne + + + + + + + Maximal number of retries. + + + + + + + Timerange for restart. + + + + + + + + + + + + + + + + + + + + + diff --git a/akka-spring/src/main/scala/ActiveObjectBeanDefinitionParser.scala b/akka-spring/src/main/scala/ActiveObjectBeanDefinitionParser.scala new file mode 100644 index 0000000000..e4b976188d --- /dev/null +++ b/akka-spring/src/main/scala/ActiveObjectBeanDefinitionParser.scala @@ -0,0 +1,88 @@ +/** + * Copyright (C) 2009-2010 Scalable Solutions AB + */ +package se.scalablesolutions.akka.spring + +import org.springframework.util.xml.DomUtils +import se.scalablesolutions.akka.util.Logging +import org.w3c.dom.Element + +/** + * Parser for custom namespace configuration for active-object. + * @author michaelkober + */ +trait ActiveObjectBeanDefinitionParser extends Logging { + import AkkaSpringConfigurationTags._ + + /** + * Parses the given element and returns a ActiveObjectProperties. + * @param element dom element to parse + * @return configuration for the active object + */ + def parseActiveObject(element: Element): ActiveObjectProperties = { + val objectProperties = new ActiveObjectProperties() + val remoteElement = DomUtils.getChildElementByTagName(element, REMOTE_TAG); + val callbacksElement = DomUtils.getChildElementByTagName(element, RESTART_CALLBACKS_TAG); + + if (remoteElement != null) { + objectProperties.host = mandatory(remoteElement, HOST) + objectProperties.port = mandatory(remoteElement, PORT).toInt + } + + if (callbacksElement != null) { + objectProperties.preRestart = callbacksElement.getAttribute(PRE_RESTART) + objectProperties.postRestart = callbacksElement.getAttribute(POST_RESTART) + if ((objectProperties.preRestart.isEmpty) && (objectProperties.preRestart.isEmpty)) { + throw new IllegalStateException("At least one of pre or post must be defined.") + } + } + + try { + objectProperties.timeout = mandatory(element, TIMEOUT).toLong + } catch { + case nfe: NumberFormatException => + log.error(nfe, "could not parse timeout %s", element.getAttribute(TIMEOUT)) + throw nfe + } + + objectProperties.target = mandatory(element, TARGET) + objectProperties.transactional = if (element.getAttribute(TRANSACTIONAL).isEmpty) false else element.getAttribute(TRANSACTIONAL).toBoolean + + if (!element.getAttribute(INTERFACE).isEmpty) { + objectProperties.interface = element.getAttribute(INTERFACE) + } + + if (!element.getAttribute(LIFECYCLE).isEmpty) { + objectProperties.lifecyclye = element.getAttribute(LIFECYCLE) + } + objectProperties + } + + /** + * Get a mandatory element attribute. + * @param element the element with the mandatory attribute + * @param attribute name of the mandatory attribute + */ + def mandatory(element: Element, attribute: String): String = { + if ((element.getAttribute(attribute) == null) || (element.getAttribute(attribute).isEmpty)) { + throw new IllegalArgumentException("Mandatory attribute missing: " + attribute) + } else { + element.getAttribute(attribute) + } + } + + /** + * Get a mandatory child element. + * @param element the parent element + * @param childName name of the mandatory child element + */ + def mandatoryElement(element: Element, childName: String): Element = { + val childElement = DomUtils.getChildElementByTagName(element, childName); + if (childElement == null) { + throw new IllegalArgumentException("Mandatory element missing: ''") + } else { + childElement + } + } + +} \ No newline at end of file diff --git a/akka-spring/src/main/scala/ActiveObjectFactoryBean.scala b/akka-spring/src/main/scala/ActiveObjectFactoryBean.scala new file mode 100644 index 0000000000..fb83c6fe09 --- /dev/null +++ b/akka-spring/src/main/scala/ActiveObjectFactoryBean.scala @@ -0,0 +1,81 @@ +/** + * Copyright (C) 2009-2010 Scalable Solutions AB + */ + +package se.scalablesolutions.akka.spring + +import org.springframework.beans.factory.config.AbstractFactoryBean +import se.scalablesolutions.akka.actor.ActiveObject +import reflect.BeanProperty +import se.scalablesolutions.akka.config.ScalaConfig.RestartCallbacks + + + + +/** + * Factory bean for active objects. + * @author michaelkober + */ +class ActiveObjectFactoryBean extends AbstractFactoryBean[AnyRef] { + import StringReflect._ + + @BeanProperty var target: String = "" + @BeanProperty var timeout: Long = _ + @BeanProperty var interface: String = "" + @BeanProperty var transactional: Boolean = false + @BeanProperty var pre: String = "" + @BeanProperty var post: String = "" + @BeanProperty var host: String = "" + @BeanProperty var port: Int = _ + @BeanProperty var lifecycle: String = "" + + + /* + * @see org.springframework.beans.factory.FactoryBean#getObjectType() + */ + def getObjectType: Class[AnyRef] = target.toClass + + + /* + * @see org.springframework.beans.factory.config.AbstractFactoryBean#createInstance() + */ + def createInstance: AnyRef = { + ActiveObject.newInstance(target.toClass, timeout, transactional, restartCallbacks) + if (isRemote) { + newRemoteInstance(target, timeout, interface, transactional, restartCallbacks, host, port) + } else { + newInstance(target, timeout, interface, transactional, restartCallbacks); + } + } + + private[akka] def isRemote = (host != null) && (!host.isEmpty) + + /** + * create Option[RestartCallback] + */ + private def restartCallbacks: Option[RestartCallbacks] = { + if (((pre == null) || pre.isEmpty) && ((post == null) || post.isEmpty)) { + None + } else { + val callbacks = new RestartCallbacks(pre, post) + Some(callbacks) + } + } + + private def newInstance(target: String, timeout: Long, interface: String, transactional: Boolean, callbacks: Option[RestartCallbacks]): AnyRef = { + if ((interface == null) || interface.isEmpty) { + ActiveObject.newInstance(target.toClass, timeout, transactional, callbacks) + } else { + ActiveObject.newInstance(interface.toClass, target.toClass, timeout, transactional, callbacks) + } + } + + private def newRemoteInstance(target: String, timeout: Long, interface: String, transactional: Boolean, callbacks: Option[RestartCallbacks], host: String, port: Int): AnyRef = { + if ((interface == null) || interface.isEmpty) { + ActiveObject.newRemoteInstance(target.toClass, timeout, transactional, host, port, callbacks) + } else { + ActiveObject.newRemoteInstance(interface.toClass, target.toClass, timeout, transactional, host, port, callbacks) + } + } + +} \ No newline at end of file diff --git a/akka-spring/src/main/scala/ActiveObjectProperties.scala b/akka-spring/src/main/scala/ActiveObjectProperties.scala new file mode 100644 index 0000000000..bd1f838d9b --- /dev/null +++ b/akka-spring/src/main/scala/ActiveObjectProperties.scala @@ -0,0 +1,41 @@ +/** + * Copyright (C) 2009-2010 Scalable Solutions AB + */ + +package se.scalablesolutions.akka.spring + +import org.springframework.beans.factory.support.BeanDefinitionBuilder +import AkkaSpringConfigurationTags._ + +/** + * Data container for active object configuration data. + * @author michaelkober + */ +class ActiveObjectProperties { + var target: String = "" + var timeout: Long = _ + var interface: String = "" + var transactional: Boolean = false + var preRestart: String = "" + var postRestart: String = "" + var host: String = "" + var port: Int = _ + var lifecyclye: String = "" + + /** + * Sets the properties to the given builder. + * @param builder bean definition builder + */ + def setAsProperties(builder: BeanDefinitionBuilder) { + builder.addPropertyValue(HOST, host) + builder.addPropertyValue(PORT, port) + builder.addPropertyValue(PRE_RESTART, preRestart) + builder.addPropertyValue(POST_RESTART, postRestart) + builder.addPropertyValue(TIMEOUT, timeout) + builder.addPropertyValue(TARGET, target) + builder.addPropertyValue(INTERFACE, interface) + builder.addPropertyValue(TRANSACTIONAL, transactional) + builder.addPropertyValue(LIFECYCLE, lifecyclye) + } + +} \ No newline at end of file diff --git a/akka-spring/src/main/scala/AkkaNamespaceHandler.scala b/akka-spring/src/main/scala/AkkaNamespaceHandler.scala new file mode 100644 index 0000000000..ebf70a7ae3 --- /dev/null +++ b/akka-spring/src/main/scala/AkkaNamespaceHandler.scala @@ -0,0 +1,18 @@ +/** + * Copyright (C) 2009-2010 Scalable Solutions AB + */ +package se.scalablesolutions.akka.spring + +import org.springframework.beans.factory.xml.NamespaceHandlerSupport +import AkkaSpringConfigurationTags._ + +/** + * Custom spring namespace handler for Akka. + * @author michaelkober + */ +class AkkaNamespaceHandler extends NamespaceHandlerSupport { + def init = { + registerBeanDefinitionParser(ACTIVE_OBJECT_TAG, new AkkaObjectBeanDefinitionParser()); + registerBeanDefinitionParser(SUPERVISION_TAG, new SupervisionBeanDefinitionParser()); + } +} \ No newline at end of file diff --git a/akka-spring/src/main/scala/AkkaObjectBeanDefinitionParser.scala b/akka-spring/src/main/scala/AkkaObjectBeanDefinitionParser.scala new file mode 100644 index 0000000000..1f90c17454 --- /dev/null +++ b/akka-spring/src/main/scala/AkkaObjectBeanDefinitionParser.scala @@ -0,0 +1,30 @@ +/** + * Copyright (C) 2009-2010 Scalable Solutions AB + */ +package se.scalablesolutions.akka.spring + +import org.springframework.beans.factory.support.BeanDefinitionBuilder +import org.springframework.beans.factory.xml.AbstractSingleBeanDefinitionParser +import org.springframework.beans.factory.xml.ParserContext +import org.w3c.dom.Element +import se.scalablesolutions.akka.util.Logging + + +/** + * Parser for custom namespace configuration. + * @author michaelkober + */ +class AkkaObjectBeanDefinitionParser extends AbstractSingleBeanDefinitionParser with ActiveObjectBeanDefinitionParser { + /* + * @see org.springframework.beans.factory.xml.AbstractSingleBeanDefinitionParser#doParse(org.w3c.dom.Element, org.springframework.beans.factory.xml.ParserContext, org.springframework.beans.factory.support.BeanDefinitionBuilder) + */ + override def doParse(element: Element, parserContext: ParserContext, builder: BeanDefinitionBuilder) { + val activeObjectConf = parseActiveObject(element) + activeObjectConf.setAsProperties(builder) + } + + /* + * @see org.springframework.beans.factory.xml.AbstractSingleBeanDefinitionParser#getBeanClass(org.w3c.dom.Element) + */ + override def getBeanClass(element: Element) = classOf[ActiveObjectFactoryBean] +} \ No newline at end of file diff --git a/akka-spring/src/main/scala/AkkaSpringConfigurationTags.scala b/akka-spring/src/main/scala/AkkaSpringConfigurationTags.scala new file mode 100644 index 0000000000..058d654ea7 --- /dev/null +++ b/akka-spring/src/main/scala/AkkaSpringConfigurationTags.scala @@ -0,0 +1,41 @@ +/** + * Copyright (C) 2009-2010 Scalable Solutions AB + */ +package se.scalablesolutions.akka.spring + +/** + * XML configuration tags. + * @author michaelkober + */ +object AkkaSpringConfigurationTags { + // top level tags + val ACTIVE_OBJECT_TAG = "active-object" + val SUPERVISION_TAG = "supervision" + // active-object sub tags + val RESTART_CALLBACKS_TAG = "restart-callbacks" + val REMOTE_TAG = "remote"; + // superivision sub tags + val ACTIVE_OBJECTS_TAG = "active-objects" + val STRATEGY_TAG = "restart-strategy" + val TRAP_EXISTS_TAG = "trap-exits" + val TRAP_EXIT_TAG = "trap-exit" + // active object attributes + val TIMEOUT = "timeout" + val TARGET = "target" + val INTERFACE = "interface" + val TRANSACTIONAL = "transactional" + val HOST = "host" + val PORT = "port" + val PRE_RESTART = "pre" + val POST_RESTART = "post" + val LIFECYCLE = "lifecycle" + // supervision attributes + val FAILOVER = "failover" + val RETRIES = "retries" + val TIME_RANGE = "timerange" + // Value types + val VAL_LIFECYCYLE_TEMPORARY = "temporary" + val VAL_LIFECYCYLE_PERMANENT = "permanent" + val VAL_ALL_FOR_ONE = "AllForOne" + val VAL_ONE_FOR_ONE = "OneForOne" +} \ No newline at end of file diff --git a/akka-spring/src/main/scala/StringReflect.scala b/akka-spring/src/main/scala/StringReflect.scala new file mode 100644 index 0000000000..7dda9dba08 --- /dev/null +++ b/akka-spring/src/main/scala/StringReflect.scala @@ -0,0 +1,24 @@ +/** + * Copyright (C) 2009-2010 Scalable Solutions AB + */ +package se.scalablesolutions.akka.spring + +object StringReflect { + /** + * Implicit conversion from String to StringReflect. + */ + implicit def string2StringReflect(x: String) = new StringReflect(x) +} + +/** + * Reflection helper class. + * @author michaelkober + */ +class StringReflect(val self: String) { + def toClass[T <: AnyRef]: Class[T] = { + val clazz = Class.forName(self) + clazz.asInstanceOf[Class[T]] + } +} + + diff --git a/akka-spring/src/main/scala/SupervisionBeanDefinitionParser.scala b/akka-spring/src/main/scala/SupervisionBeanDefinitionParser.scala new file mode 100644 index 0000000000..7134675af1 --- /dev/null +++ b/akka-spring/src/main/scala/SupervisionBeanDefinitionParser.scala @@ -0,0 +1,65 @@ +/** + * Copyright (C) 2009-2010 Scalable Solutions AB + */ +package se.scalablesolutions.akka.spring + +import se.scalablesolutions.akka.util.Logging +import org.springframework.beans.factory.support.BeanDefinitionBuilder +import org.springframework.beans.factory.xml.{ParserContext, AbstractSingleBeanDefinitionParser} +import se.scalablesolutions.akka.config.JavaConfig._ +import AkkaSpringConfigurationTags._ + + +import org.w3c.dom.Element +import org.springframework.util.xml.DomUtils + + +/** + * Parser for custom namespace for Akka declarative supervisor configuration. + * @author michaelkober + */ +class SupervisionBeanDefinitionParser extends AbstractSingleBeanDefinitionParser with ActiveObjectBeanDefinitionParser { + /* (non-Javadoc) + * @see org.springframework.beans.factory.xml.AbstractSingleBeanDefinitionParser#doParse(org.w3c.dom.Element, org.springframework.beans.factory.xml.ParserContext, org.springframework.beans.factory.support.BeanDefinitionBuilder) + */ + override def doParse(element: Element, parserContext: ParserContext, builder: BeanDefinitionBuilder) { + parseSupervisor(element, builder) + } + + /** + * made accessible for testing + */ + private[akka] def parseSupervisor(element: Element, builder: BeanDefinitionBuilder) { + val strategyElement = mandatoryElement(element, STRATEGY_TAG); + val activeObjectsElement = mandatoryElement(element, ACTIVE_OBJECTS_TAG); + parseRestartStrategy(strategyElement, builder) + parseActiveObjectList(activeObjectsElement, builder) + } + + private[akka] def parseRestartStrategy(element: Element, builder: BeanDefinitionBuilder) { + val failover = if (mandatory(element, FAILOVER) == "AllForOne") new AllForOne() else new OneForOne() + val timeRange = mandatory(element, TIME_RANGE).toInt + val retries = mandatory(element, RETRIES).toInt + val trapExitsElement = mandatoryElement(element, TRAP_EXISTS_TAG) + val trapExceptions = parseTrapExits(trapExitsElement) + val restartStrategy = new RestartStrategy(failover, retries, timeRange, trapExceptions) + builder.addPropertyValue("restartStrategy", restartStrategy) + } + + private[akka] def parseActiveObjectList(element: Element, builder: BeanDefinitionBuilder) { + val activeObjects = DomUtils.getChildElementsByTagName(element, ACTIVE_OBJECT_TAG).toArray.toList.asInstanceOf[List[Element]] + val activeObjectProperties = activeObjects.map(parseActiveObject(_)) + builder.addPropertyValue("supervised", activeObjectProperties) + } + + private def parseTrapExits(element: Element): Array[Class[_ <: Throwable]] = { + import StringReflect._ + val trapExits = DomUtils.getChildElementsByTagName(element, TRAP_EXIT_TAG).toArray.toList.asInstanceOf[List[Element]] + trapExits.map(DomUtils.getTextValue(_).toClass).toArray + } + + /* + * @see org.springframework.beans.factory.xml.AbstractSingleBeanDefinitionParser#getBeanClass(org.w3c.dom.Element) + */ + override def getBeanClass(element: Element) = classOf[SupervisionFactoryBean] +} \ No newline at end of file diff --git a/akka-spring/src/main/scala/SupervisionFactoryBean.scala b/akka-spring/src/main/scala/SupervisionFactoryBean.scala new file mode 100644 index 0000000000..7044445ba8 --- /dev/null +++ b/akka-spring/src/main/scala/SupervisionFactoryBean.scala @@ -0,0 +1,63 @@ +/** + * Copyright (C) 2009-2010 Scalable Solutions AB + */ +package se.scalablesolutions.akka.spring + +import org.springframework.beans.factory.config.AbstractFactoryBean +import se.scalablesolutions.akka.config.ActiveObjectConfigurator +import se.scalablesolutions.akka.config.JavaConfig._ +import AkkaSpringConfigurationTags._ +import reflect.BeanProperty + + +/** + * Factory bean for supervisor configuration. + * @author michaelkober + */ +class SupervisionFactoryBean extends AbstractFactoryBean[ActiveObjectConfigurator] { + @BeanProperty var restartStrategy: RestartStrategy = _ + @BeanProperty var supervised: List[ActiveObjectProperties] = _ + + /* + * @see org.springframework.beans.factory.FactoryBean#getObjectType() + */ + def getObjectType: Class[ActiveObjectConfigurator] = classOf[ActiveObjectConfigurator] + + /* + * @see org.springframework.beans.factory.config.AbstractFactoryBean#createInstance() + */ + def createInstance: ActiveObjectConfigurator = { + val configurator = new ActiveObjectConfigurator() + + configurator.configure( + restartStrategy, + supervised.map(createComponent(_)).toArray + ).supervise + } + + /** + * Create configuration for ActiveObject + */ + private[akka] def createComponent(props: ActiveObjectProperties): Component = { + import StringReflect._ + val lifeCycle = if (!props.lifecyclye.isEmpty && props.lifecyclye.equalsIgnoreCase(VAL_LIFECYCYLE_TEMPORARY)) new LifeCycle(new Temporary()) else new LifeCycle(new Permanent()) + val isRemote = (props.host != null) && (!props.host.isEmpty) + val withInterface = (props.interface != null) && (!props.interface.isEmpty) + // FIXME: timeout int vs long + val timeout = props.timeout.asInstanceOf[Int] + if (isRemote) { + val remote = new RemoteAddress(props.host, props.port) + if (withInterface) { + new Component(props.interface.toClass, props.target.toClass, lifeCycle, timeout, props.transactional, remote) + } else { + new Component(props.target.toClass, lifeCycle, timeout, props.transactional, remote) + } + } else { + if (withInterface) { + new Component(props.interface.toClass, props.target.toClass, lifeCycle, timeout, props.transactional) + } else { + new Component(props.target.toClass, lifeCycle, timeout, props.transactional) + } + } + } +} \ No newline at end of file diff --git a/akka-spring/src/test/scala/ActiveObjectBeanDefinitionParserTest.scala b/akka-spring/src/test/scala/ActiveObjectBeanDefinitionParserTest.scala new file mode 100644 index 0000000000..19fe23e5c9 --- /dev/null +++ b/akka-spring/src/test/scala/ActiveObjectBeanDefinitionParserTest.scala @@ -0,0 +1,51 @@ +/** + * Copyright (C) 2009-2010 Scalable Solutions AB + */ +package se.scalablesolutions.akka.spring + +import org.scalatest.Spec +import org.scalatest.matchers.ShouldMatchers +import org.scalatest.junit.JUnitRunner +import org.junit.runner.RunWith +import ScalaDom._ + +import org.w3c.dom.Element + +/** + * Test for ActiveObjectBeanDefinitionParser + * @author michaelkober + */ +@RunWith(classOf[JUnitRunner]) +class ActiveObjectBeanDefinitionParserTest extends Spec with ShouldMatchers { + private class Parser extends ActiveObjectBeanDefinitionParser + + describe("An ActiveObjectBeanDefinitionParser") { + val parser = new Parser() + it("should parse the active object configuration") { + val props = parser.parseActiveObject(createTestElement); + assert(props != null) + assert(props.timeout == 1000) + assert(props.target == "foo.bar.MyPojo") + assert(props.transactional) + } + + it("should throw IllegalArgumentException on missing mandatory attributes") { + evaluating { parser.parseActiveObject(createTestElement2) } should produce [IllegalArgumentException] + } + } + + private def createTestElement : Element = { + val xml = + dom(xml).getDocumentElement + } + + private def createTestElement2 : Element = { + val xml = + dom(xml).getDocumentElement + } +} \ No newline at end of file diff --git a/akka-spring/src/test/scala/ActiveObjectFactoryBeanTest.scala b/akka-spring/src/test/scala/ActiveObjectFactoryBeanTest.scala new file mode 100644 index 0000000000..e0eec060bf --- /dev/null +++ b/akka-spring/src/test/scala/ActiveObjectFactoryBeanTest.scala @@ -0,0 +1,41 @@ +/** + * Copyright (C) 2009-2010 Scalable Solutions AB + */ +package se.scalablesolutions.akka.spring + +import org.scalatest.Spec +import org.scalatest.matchers.ShouldMatchers +import org.scalatest.junit.JUnitRunner +import org.junit.runner.RunWith + +/** + * Test for ActiveObjectFactoryBean + * @author michaelkober + */ +@RunWith(classOf[JUnitRunner]) +class ActiveObjectFactoryBeanTest extends Spec with ShouldMatchers { + + describe("A ActiveObjectFactoryBean") { + val bean = new ActiveObjectFactoryBean + it("should have java getters and setters for all properties") { + bean.setTarget("java.lang.String") + assert(bean.getTarget == "java.lang.String") + bean.setTimeout(1000) + assert(bean.getTimeout == 1000) + } + + it("should create a remote active object when a host is set") { + bean.setHost("some.host.com"); + assert(bean.isRemote) + } + + it("should return the object type") { + bean.setTarget("java.lang.String") + assert(bean.getObjectType == classOf[String]) + } + + it("should create an active object") { + // TODO: + } + } +} diff --git a/akka-spring/src/test/scala/ScalaDom.scala b/akka-spring/src/test/scala/ScalaDom.scala new file mode 100644 index 0000000000..70531861e1 --- /dev/null +++ b/akka-spring/src/test/scala/ScalaDom.scala @@ -0,0 +1,40 @@ +package se.scalablesolutions.akka.spring +/** + * from http://stackoverflow.com/questions/2002685/any-conversion-from-scalas-xml-to-w3c-dom + */ + +object ScalaDom { + import scala.xml._ + import org.w3c.dom.{Document => JDocument, Node => JNode} + import javax.xml.parsers.DocumentBuilderFactory + + def dom(n: Node): JDocument = { + + val doc = DocumentBuilderFactory + .newInstance + .newDocumentBuilder + .getDOMImplementation + .createDocument(null, null, null) + + def build(node: Node, parent: JNode): Unit = { + val jnode: JNode = node match { + case e: Elem => { + val jn = doc.createElement(e.label) + e.attributes foreach { a => jn.setAttribute(a.key, a.value.mkString) } + jn + } + case a: Atom[_] => doc.createTextNode(a.text) + case c: Comment => doc.createComment(c.commentText) + case er: EntityRef => doc.createEntityReference(er.entityName) + case pi: ProcInstr => doc.createProcessingInstruction(pi.target, pi.proctext) + } + parent.appendChild(jnode) + node.child.map { build(_, jnode) } + } + + build(n, doc) + doc + + } +} + \ No newline at end of file diff --git a/akka-spring/src/test/scala/SupervisionBeanDefinitionParserTest.scala b/akka-spring/src/test/scala/SupervisionBeanDefinitionParserTest.scala new file mode 100644 index 0000000000..af79ecf5df --- /dev/null +++ b/akka-spring/src/test/scala/SupervisionBeanDefinitionParserTest.scala @@ -0,0 +1,122 @@ +/** + * Copyright (C) 2009-2010 Scalable Solutions AB + */ +package se.scalablesolutions.akka.spring + +import org.scalatest.Spec +import org.scalatest.matchers.ShouldMatchers +import org.scalatest.junit.JUnitRunner +import org.junit.runner.RunWith +import ScalaDom._ + +import se.scalablesolutions.akka.config.JavaConfig._ + +import org.w3c.dom.Element +import org.springframework.beans.factory.support.BeanDefinitionBuilder + +/** + * Test for SupervisionBeanDefinitionParser + * @author michaelkober + */ +@RunWith(classOf[JUnitRunner]) +class SupervisionBeanDefinitionParserTest extends Spec with ShouldMatchers { + private class Parser extends SupervisionBeanDefinitionParser + + describe("A SupervisionBeanDefinitionParser") { + val parser = new Parser() + val builder = BeanDefinitionBuilder.genericBeanDefinition("foo.bar.Foo") + + it("should be able to parse active object configuration") { + val props = parser.parseActiveObject(createActiveObjectElement); + assert(props != null) + assert(props.timeout == 1000) + assert(props.target == "foo.bar.MyPojo") + assert(props.transactional) + } + + it("should parse the supervisor restart strategy") { + parser.parseSupervisor(createSupervisorElement, builder); + val strategy = builder.getBeanDefinition.getPropertyValues.getPropertyValue("restartStrategy").getValue.asInstanceOf[RestartStrategy] + assert(strategy != null) + assert(strategy.scheme match { + case x:AllForOne => true + case _ => false }) + expect(3) { strategy.maxNrOfRetries } + expect(1000) { strategy.withinTimeRange } + } + + it("should parse the supervised active objects") { + parser.parseSupervisor(createSupervisorElement, builder); + val supervised = builder.getBeanDefinition.getPropertyValues.getPropertyValue("supervised").getValue.asInstanceOf[List[ActiveObjectProperties]] + assert(supervised != null) + expect(3) { supervised.length } + val iterator = supervised.elements + expect("foo.bar.Foo") { iterator.next.target } + expect("foo.bar.Bar") { iterator.next.target } + expect("foo.bar.MyPojo") { iterator.next.target } + } + + it("should throw IllegalArgumentException on missing mandatory attributes") { + evaluating { parser.parseSupervisor(createSupervisorMissingAttribute, builder) } should produce [IllegalArgumentException] + } + + it("should throw IllegalArgumentException on missing mandatory elements") { + evaluating { parser.parseSupervisor(createSupervisorMissingElement, builder) } should produce [IllegalArgumentException] + } + } + + private def createActiveObjectElement : Element = { + val xml = + dom(xml).getDocumentElement + } + + private def createSupervisorElement : Element = { + val xml = + + + java.io.IOException + java.lang.NullPointerException + + + + + + + + + + + dom(xml).getDocumentElement + } + + + private def createSupervisorMissingAttribute : Element = { + val xml = + + + java.io.IOException + + + + + + + dom(xml).getDocumentElement + } + + private def createSupervisorMissingElement : Element = { + val xml = + + + + + + + + dom(xml).getDocumentElement + } +} + diff --git a/akka-spring/src/test/scala/SupervisionFactoryBeanTest.scala b/akka-spring/src/test/scala/SupervisionFactoryBeanTest.scala new file mode 100644 index 0000000000..dbb0798c9d --- /dev/null +++ b/akka-spring/src/test/scala/SupervisionFactoryBeanTest.scala @@ -0,0 +1,41 @@ +/** + * Copyright (C) 2009-2010 Scalable Solutions AB + */ +package se.scalablesolutions.akka.spring + +import org.scalatest.Spec +import org.scalatest.matchers.ShouldMatchers +import org.scalatest.junit.JUnitRunner +import org.junit.runner.RunWith +import se.scalablesolutions.akka.config.JavaConfig._ +import se.scalablesolutions.akka.config.ActiveObjectConfigurator + +private[akka] class Foo + +@RunWith(classOf[JUnitRunner]) +class SupervisionFactoryBeanTest extends Spec with ShouldMatchers { + + val restartStrategy = new RestartStrategy(new AllForOne(), 3, 1000, Array(classOf[Throwable])) + val activeObjects = List(createActiveObjectProperties("se.scalablesolutions.akka.spring.Foo", 1000L)) + + def createActiveObjectProperties(target: String, timeout: Long) : ActiveObjectProperties = { + val properties = new ActiveObjectProperties() + properties.target = target + properties.timeout = timeout + properties + } + + describe("A SupervisionFactoryBean") { + val bean = new SupervisionFactoryBean + it("should have java getters and setters for all properties") { + bean.setRestartStrategy(restartStrategy) + assert(bean.getRestartStrategy == restartStrategy) + bean.setSupervised(activeObjects) + assert(bean.getSupervised == activeObjects) + } + + it("should return the object type ActiveObjectConfigurator") { + assert(bean.getObjectType == classOf[ActiveObjectConfigurator]) + } + } +} \ No newline at end of file diff --git a/akka-util-java/pom.xml b/akka-util-java/pom.xml deleted file mode 100644 index e0a729491b..0000000000 --- a/akka-util-java/pom.xml +++ /dev/null @@ -1,74 +0,0 @@ - - 4.0.0 - - akka-util-java - Akka Java Utilities Module - - jar - - - akka - se.scalablesolutions.akka - 0.7-SNAPSHOT - - - - - org.guiceyfruit - guice-core - 2.0-beta-4 - - - com.google.protobuf - protobuf-java - 2.2.0 - - - org.multiverse - multiverse-alpha - 0.3 - jar-with-dependencies - - - org.multiverse - multiverse-core - - - asm - asm-tree - - - asm - asm-analysis - - - asm - asm-commons - - - asm - asm-util - - - - - - - src/main/java - src/test/java - - - org.apache.maven.plugins - maven-compiler-plugin - - 1.5 - 1.5 - - **/* - - - - - - diff --git a/akka-util-java/src/main/java/se/scalablesolutions/akka/annotation/configuration.java b/akka-util-java/src/main/java/se/scalablesolutions/akka/annotation/configuration.java index b0139ac6f0..9c5375398b 100644 --- a/akka-util-java/src/main/java/se/scalablesolutions/akka/annotation/configuration.java +++ b/akka-util-java/src/main/java/se/scalablesolutions/akka/annotation/configuration.java @@ -2,7 +2,7 @@ * Copyright (C) 2009-2010 Scalable Solutions AB */ -package se.scalablesolutions.akka.annotation; +package se.scalablesolutions.akka.actor.annotation; import java.lang.annotation.*; diff --git a/akka-util-java/src/main/java/se/scalablesolutions/akka/annotation/consume.java b/akka-util-java/src/main/java/se/scalablesolutions/akka/annotation/consume.java new file mode 100644 index 0000000000..17ac05bf17 --- /dev/null +++ b/akka-util-java/src/main/java/se/scalablesolutions/akka/annotation/consume.java @@ -0,0 +1,18 @@ +/** + * Copyright (C) 2009-2010 Scalable Solutions AB + */ + + package se.scalablesolutions.akka.actor.annotation; + +import java.lang.annotation.ElementType; +import java.lang.annotation.Retention; +import java.lang.annotation.RetentionPolicy; +import java.lang.annotation.Target; + +@Retention(RetentionPolicy.RUNTIME) +@Target(ElementType.TYPE) +public @interface consume { + + public abstract String value(); + +} \ No newline at end of file diff --git a/akka-util-java/src/main/java/se/scalablesolutions/akka/annotation/immutable.java b/akka-util-java/src/main/java/se/scalablesolutions/akka/annotation/immutable.java index 9dd2d17322..84dbbf4636 100644 --- a/akka-util-java/src/main/java/se/scalablesolutions/akka/annotation/immutable.java +++ b/akka-util-java/src/main/java/se/scalablesolutions/akka/annotation/immutable.java @@ -2,7 +2,7 @@ * Copyright (C) 2009-2010 Scalable Solutions AB */ -package se.scalablesolutions.akka.annotation; +package se.scalablesolutions.akka.actor.annotation; import java.lang.annotation.*; diff --git a/akka-util-java/src/main/java/se/scalablesolutions/akka/annotation/inittransactionalstate.java b/akka-util-java/src/main/java/se/scalablesolutions/akka/annotation/inittransactionalstate.java index 50e42546ad..35c5f05afe 100644 --- a/akka-util-java/src/main/java/se/scalablesolutions/akka/annotation/inittransactionalstate.java +++ b/akka-util-java/src/main/java/se/scalablesolutions/akka/annotation/inittransactionalstate.java @@ -2,7 +2,7 @@ * Copyright (C) 2009-2010 Scalable Solutions AB */ -package se.scalablesolutions.akka.annotation; +package se.scalablesolutions.akka.actor.annotation; import java.lang.annotation.*; diff --git a/akka-util-java/src/main/java/se/scalablesolutions/akka/annotation/oneway.java b/akka-util-java/src/main/java/se/scalablesolutions/akka/annotation/oneway.java index fa7084bb07..45440b5613 100644 --- a/akka-util-java/src/main/java/se/scalablesolutions/akka/annotation/oneway.java +++ b/akka-util-java/src/main/java/se/scalablesolutions/akka/annotation/oneway.java @@ -2,7 +2,7 @@ * Copyright (C) 2009-2010 Scalable Solutions AB */ -package se.scalablesolutions.akka.annotation; +package se.scalablesolutions.akka.actor.annotation; import java.lang.annotation.*; diff --git a/akka-util-java/src/main/java/se/scalablesolutions/akka/annotation/postrestart.java b/akka-util-java/src/main/java/se/scalablesolutions/akka/annotation/postrestart.java index d003a38df8..5eed474832 100644 --- a/akka-util-java/src/main/java/se/scalablesolutions/akka/annotation/postrestart.java +++ b/akka-util-java/src/main/java/se/scalablesolutions/akka/annotation/postrestart.java @@ -2,7 +2,7 @@ * Copyright (C) 2009-2010 Scalable Solutions AB */ -package se.scalablesolutions.akka.annotation; +package se.scalablesolutions.akka.actor.annotation; import java.lang.annotation.*; diff --git a/akka-util-java/src/main/java/se/scalablesolutions/akka/annotation/prerestart.java b/akka-util-java/src/main/java/se/scalablesolutions/akka/annotation/prerestart.java index e65f38cad6..94f9a01405 100644 --- a/akka-util-java/src/main/java/se/scalablesolutions/akka/annotation/prerestart.java +++ b/akka-util-java/src/main/java/se/scalablesolutions/akka/annotation/prerestart.java @@ -2,7 +2,7 @@ * Copyright (C) 2009-2010 Scalable Solutions AB */ -package se.scalablesolutions.akka.annotation; +package se.scalablesolutions.akka.actor.annotation; import java.lang.annotation.*; diff --git a/akka-util-java/src/main/java/se/scalablesolutions/akka/annotation/state.java b/akka-util-java/src/main/java/se/scalablesolutions/akka/annotation/state.java index 1e627dde8e..509d129c1b 100644 --- a/akka-util-java/src/main/java/se/scalablesolutions/akka/annotation/state.java +++ b/akka-util-java/src/main/java/se/scalablesolutions/akka/annotation/state.java @@ -2,7 +2,7 @@ * Copyright (C) 2009-2010 Scalable Solutions AB */ -package se.scalablesolutions.akka.annotation; +package se.scalablesolutions.akka.actor.annotation; import java.lang.annotation.*; diff --git a/akka-util-java/src/main/java/se/scalablesolutions/akka/annotation/transactionrequired.java b/akka-util-java/src/main/java/se/scalablesolutions/akka/annotation/transactionrequired.java index c45482c467..c41a09ee46 100644 --- a/akka-util-java/src/main/java/se/scalablesolutions/akka/annotation/transactionrequired.java +++ b/akka-util-java/src/main/java/se/scalablesolutions/akka/annotation/transactionrequired.java @@ -2,7 +2,7 @@ * Copyright (C) 2009-2010 Scalable Solutions AB */ -package se.scalablesolutions.akka.annotation; +package se.scalablesolutions.akka.actor.annotation; import java.lang.annotation.*; diff --git a/akka-util-java/src/main/java/se/scalablesolutions/akka/stm/AtomicTemplate.java b/akka-util-java/src/main/java/se/scalablesolutions/akka/stm/AtomicTemplate.java deleted file mode 100644 index a693ff1248..0000000000 --- a/akka-util-java/src/main/java/se/scalablesolutions/akka/stm/AtomicTemplate.java +++ /dev/null @@ -1,341 +0,0 @@ -package se.scalablesolutions.akka.stm; - -import static org.multiverse.api.GlobalStmInstance.getGlobalStmInstance; -import org.multiverse.api.Stm; -import static org.multiverse.api.ThreadLocalTransaction.getThreadLocalTransaction; -import static org.multiverse.api.ThreadLocalTransaction.setThreadLocalTransaction; -import org.multiverse.api.Transaction; -import org.multiverse.api.TransactionStatus; -import org.multiverse.api.exceptions.CommitFailureException; -import org.multiverse.api.exceptions.LoadException; -import org.multiverse.api.exceptions.RetryError; -import org.multiverse.api.exceptions.TooManyRetriesException; -import org.multiverse.templates.AbortedException; -import org.multiverse.utils.latches.CheapLatch; -import org.multiverse.utils.latches.Latch; - -import static java.lang.String.format; -import java.util.logging.Logger; - -/** - * A Template that handles the boilerplate code for transactions. A transaction will be placed if none is available - * around a section and if all goes right, commits at the end. - *

    - * example: - *

    - * new AtomicTemplate(){
    - *    Object execute(Transaction t){
    - *        queue.push(1);
    - *        return null;
    - *    }
    - * }.execute();
    - * 
    - *

    - * It could also be that the transaction is retried (e.g. caused by optimistic locking failures). This is also a task - * for template. In the future this retry behavior will be customizable. - *

    - * If a transaction already is available on the TransactionThreadLocal, no new transaction is started and essentially - * the whole AtomicTemplate is ignored. - *

    - * If no transaction is available on the TransactionThreadLocal, a new one will be created and used during the execution - * of the AtomicTemplate and will be removed once the AtomicTemplate finishes. - *

    - * All uncaught throwable's lead to a rollback of the transaction. - *

    - * AtomicTemplates are not thread-safe to use. - *

    - * AtomicTemplates can completely work without threadlocals. See the {@link AtomicTemplate#AtomicTemplate(org.multiverse.api.Stm - * ,String, boolean, boolean, int)} for more information. - * - * @author Peter Veentjer - */ -public abstract class AtomicTemplate { - - private final static Logger logger = Logger.getLogger(AtomicTemplate.class.getName()); - - private final Stm stm; - private final boolean ignoreThreadLocalTransaction; - private final int retryCount; - private final boolean readonly; - private int attemptCount; - private final String familyName; - - /** - * Creates a new AtomicTemplate that uses the STM stored in the GlobalStm and works the the {@link - * org.multiverse.utils.ThreadLocalTransaction}. - */ - public AtomicTemplate() { - this(getGlobalStmInstance()); - } - - public AtomicTemplate(boolean readonly) { - this(getGlobalStmInstance(), null, false, readonly, Integer.MAX_VALUE); - } - - /** - * Creates a new AtomicTemplate using the provided stm. The transaction used is stores/retrieved from the {@link - * org.multiverse.utils.ThreadLocalTransaction}. - * - * @param stm the stm to use for transactions. - * @throws NullPointerException if stm is null. - */ - public AtomicTemplate(Stm stm) { - this(stm, null, false, false, Integer.MAX_VALUE); - } - - public AtomicTemplate(String familyName, boolean readonly, int retryCount) { - this(getGlobalStmInstance(), familyName, false, readonly, retryCount); - } - - /** - * Creates a new AtomicTemplate that uses the provided STM. This method is provided to make Multiverse easy to - * integrate with environment that don't want to depend on threadlocals. - * - * @param stm the stm to use for transactions. - * @param ignoreThreadLocalTransaction true if this Template should completely ignore the ThreadLocalTransaction. - * This is useful for using the AtomicTemplate in other environments that don't - * want to depend on threadlocals but do want to use the AtomicTemplate. - * @throws NullPointerException if stm is null. - */ - public AtomicTemplate(Stm stm, String familyName, boolean ignoreThreadLocalTransaction, boolean readonly, - int retryCount) { - if (stm == null) { - throw new NullPointerException(); - } - if (retryCount < 0) { - throw new IllegalArgumentException(); - } - this.stm = stm; - this.ignoreThreadLocalTransaction = ignoreThreadLocalTransaction; - this.readonly = readonly; - this.retryCount = retryCount; - this.familyName = familyName; - } - - public String getFamilyName() { - return familyName; - } - - /** - * Returns the current attempt. Value will always be larger than zero and increases everytime the transaction needs - * to be retried. - * - * @return the current attempt count. - */ - public final int getAttemptCount() { - return attemptCount; - } - - /** - * Returns the number of retries that this AtomicTemplate is allowed to do. The returned value will always be equal - * or larger than 0. - * - * @return the number of retries. - */ - public final int getRetryCount() { - return retryCount; - } - - /** - * Returns the {@link Stm} used by this AtomicTemplate to execute transactions on. - * - * @return the Stm used by this AtomicTemplate. - */ - public final Stm getStm() { - return stm; - } - - /** - * Check if this AtomicTemplate ignores the ThreadLocalTransaction. - * - * @return true if this AtomicTemplate ignores the ThreadLocalTransaction, false otherwise. - */ - public final boolean isIgnoreThreadLocalTransaction() { - return ignoreThreadLocalTransaction; - } - - /** - * Checks if this AtomicTemplate executes readonly transactions. - * - * @return true if it executes readonly transactions, false otherwise. - */ - public final boolean isReadonly() { - return readonly; - } - - /** - * This is the method can be overridden to do pre-start tasks. - */ - public void preStart() { - } - - /** - * This is the method can be overridden to do post-start tasks. - * - * @param t the transaction used for this execution. - */ - public void postStart(Transaction t) { - } - - /** - * This is the method can be overridden to do pre-commit tasks. - */ - public void preCommit() { - } - - /** - * This is the method can be overridden to do post-commit tasks. - */ - public void postCommit() { - } - - /** - * This is the method that needs to be implemented. - * - * @param t the transaction used for this execution. - * @return the result of the execution. - * - * @throws Exception the Exception thrown - */ - public abstract E execute(Transaction t) throws Exception; - - /** - * Executes the template. - * - * @return the result of the {@link #execute(org.multiverse.api.Transaction)} method. - * - * @throws InvisibleCheckedException if a checked exception was thrown while executing the {@link - * #execute(org.multiverse.api.Transaction)} method. - * @throws AbortedException if the exception was explicitly aborted. - * @throws TooManyRetriesException if the template retried the transaction too many times. The cause of the last - * failure (also an exception) is included as cause. So you have some idea where - * to look for problems - */ - public final E execute() { - try { - return executeChecked(); - } catch (Exception ex) { - if (ex instanceof RuntimeException) { - throw (RuntimeException) ex; - } else { - throw new AtomicTemplate.InvisibleCheckedException(ex); - } - } - } - - /** - * Executes the Template and rethrows the checked exception instead of wrapping it in a InvisibleCheckedException. - * - * @return the result - * - * @throws Exception the Exception thrown inside the {@link #execute(org.multiverse.api.Transaction)} - * method. - * @throws AbortedException if the exception was explicitly aborted. - * @throws TooManyRetriesException if the template retried the transaction too many times. The cause of the last - * failure (also an exception) is included as cause. So you have some idea where to - * look for problems - */ - public final E executeChecked() throws Exception { - preStart(); - Transaction t = getTransaction(); - if (noUsableTransaction(t)) { - t = startTransaction(); - setTransaction(t); - postStart(t); - try { - attemptCount = 1; - Exception lastRetryCause = null; - while (attemptCount - 1 <= retryCount) { - boolean abort = true; - boolean reset = false; - try { - E result = execute(t); - if (t.getStatus().equals(TransactionStatus.aborted)) { - String msg = format("Transaction with familyname %s is aborted", t.getFamilyName()); - throw new AbortedException(msg); - } - preCommit(); - t.commit(); - abort = false; - reset = false; - postCommit(); - return result; - } catch (RetryError e) { - Latch latch = new CheapLatch(); - t.abortAndRegisterRetryLatch(latch); - latch.awaitUninterruptible(); - //since the abort is already done, no need to do it again. - abort = false; - } catch (CommitFailureException ex) { - lastRetryCause = ex; - reset = true; - //ignore, just retry the transaction - } catch (LoadException ex) { - lastRetryCause = ex; - reset = true; - //ignore, just retry the transaction - } finally { - if (abort) { - t.abort(); - if (reset) { - t = t.abortAndReturnRestarted(); - setTransaction(t); - } - } - } - attemptCount++; - } - - throw new TooManyRetriesException("Too many retries", lastRetryCause); - } finally { - setTransaction(null); - } - } else { - return execute(t); - } - } - - private Transaction startTransaction() { - return readonly ? stm.startReadOnlyTransaction(familyName) : stm.startUpdateTransaction(familyName); - } - - private boolean noUsableTransaction(Transaction t) { - return t == null || t.getStatus() != TransactionStatus.active; - } - - /** - * Gets the current Transaction stored in the TransactionThreadLocal. - *

    - * If the ignoreThreadLocalTransaction is set, the threadlocal stuff is completeley ignored. - * - * @return the found transaction, or null if none is found. - */ - private Transaction getTransaction() { - return ignoreThreadLocalTransaction ? null : getThreadLocalTransaction(); - } - - /** - * Stores the transaction in the TransactionThreadLocal. - *

    - * This call is ignored if the ignoreThreadLocalTransaction is true. - * - * @param t the transaction to set (is allowed to be null). - */ - private void setTransaction(Transaction t) { - if (!ignoreThreadLocalTransaction) { - setThreadLocalTransaction(t); - } - } - - public static class InvisibleCheckedException extends RuntimeException { - - public InvisibleCheckedException(Exception cause) { - super(cause); - } - - @Override - public Exception getCause() { - return (Exception) super.getCause(); - } - } -} diff --git a/akka-util/pom.xml b/akka-util/pom.xml deleted file mode 100644 index db6ff6df2c..0000000000 --- a/akka-util/pom.xml +++ /dev/null @@ -1,39 +0,0 @@ - - 4.0.0 - - akka-util - Akka Util Module - - jar - - - akka - se.scalablesolutions.akka - 0.7-SNAPSHOT - - - - - org.scala-lang - scala-library - ${scala.version} - - - org.codehaus.aspectwerkz - aspectwerkz-nodeps-jdk5 - 2.1 - - - org.codehaus.aspectwerkz - aspectwerkz-jdk5 - 2.1 - - - net.lag - configgy - ${configgy.version} - - - - diff --git a/akka-util/src/main/scala/Bootable.scala b/akka-util/src/main/scala/Bootable.scala index a46a131f00..172be3fd43 100644 --- a/akka-util/src/main/scala/Bootable.scala +++ b/akka-util/src/main/scala/Bootable.scala @@ -5,6 +5,6 @@ package se.scalablesolutions.akka.util trait Bootable { - def onLoad : Unit = () - def onUnload : Unit = () + def onLoad {} + def onUnload {} } \ No newline at end of file diff --git a/akka-util/src/main/scala/Config.scala b/akka-util/src/main/scala/Config.scala deleted file mode 100644 index f25b08ee46..0000000000 --- a/akka-util/src/main/scala/Config.scala +++ /dev/null @@ -1,74 +0,0 @@ -/** - * Copyright (C) 2009-2010 Scalable Solutions AB - */ - -package se.scalablesolutions.akka - -import util.Logging - -import net.lag.configgy.{Configgy, ParseException} - -/** - * @author Jonas Bonér - */ -object Config extends Logging { - val VERSION = "0.7-SNAPSHOT" - - // Set Multiverse options for max speed - System.setProperty("org.multiverse.MuliverseConstants.sanityChecks", "false") - System.setProperty("org.multiverse.api.GlobalStmInstance.factorymethod", "org.multiverse.stms.alpha.AlphaStm.createFast") - - val HOME = { - val systemHome = System.getenv("AKKA_HOME") - if (systemHome == null || systemHome.length == 0 || systemHome == ".") { - val optionHome = System.getProperty("akka.home", "") - if (optionHome.length != 0) Some(optionHome) - else None - } else Some(systemHome) - } - - val config = { - if (HOME.isDefined) { - try { - val configFile = HOME.get + "/config/akka.conf" - Configgy.configure(configFile) - log.info("AKKA_HOME is defined to [%s], config loaded from [%s].", HOME.get, configFile) - } catch { - case e: ParseException => throw new IllegalStateException( - "'akka.conf' config file can not be found in [" + HOME + "/config/akka.conf] aborting." + - "\n\tEither add it in the 'config' directory or add it to the classpath.") - } - } else if (System.getProperty("akka.config", "") != "") { - val configFile = System.getProperty("akka.config", "") - try { - Configgy.configure(configFile) - log.info("Config loaded from -Dakka.config=%s", configFile) - } catch { - case e: ParseException => throw new IllegalStateException( - "Config could not be loaded from -Dakka.config=" + configFile) - } - } else { - try { - Configgy.configureFromResource("akka.conf", getClass.getClassLoader) - log.info("Config loaded from the application classpath.") - } catch { - case e: ParseException => throw new IllegalStateException( - "\nCan't find 'akka.conf' configuration file." + - "\nOne of the three ways of locating the 'akka.conf' file needs to be defined:" + - "\n\t1. Define 'AKKA_HOME' environment variable to the root of the Akka distribution." + - "\n\t2. Define the '-Dakka.config=...' system property option." + - "\n\t3. Put the 'akka.conf' file on the classpath." + - "\nI have no way of finding the 'akka.conf' configuration file." + - "\nAborting.") - } - } - Configgy.config - } - - val CONFIG_VERSION = config.getString("akka.version", "0") - if (VERSION != CONFIG_VERSION) throw new IllegalStateException( - "Akka JAR version [" + VERSION + "] is different than the provided config ('akka.conf') version [" + CONFIG_VERSION + "]") - val startTime = System.currentTimeMillis - - def uptime = (System.currentTimeMillis - startTime) / 1000 -} diff --git a/akka-util/src/main/scala/Helpers.scala b/akka-util/src/main/scala/Helpers.scala index b7e5ff3b75..55abf6e7ac 100644 --- a/akka-util/src/main/scala/Helpers.scala +++ b/akka-util/src/main/scala/Helpers.scala @@ -40,7 +40,6 @@ object Helpers extends Logging { } // ================================================ - @serializable class ReadWriteLock { private val rwl = new ReentrantReadWriteLock private val readLock = rwl.readLock diff --git a/akka-util/src/main/scala/Logging.scala b/akka-util/src/main/scala/Logging.scala index a6b89b86b2..fc68e27178 100644 --- a/akka-util/src/main/scala/Logging.scala +++ b/akka-util/src/main/scala/Logging.scala @@ -6,10 +6,10 @@ package se.scalablesolutions.akka.util import net.lag.logging.Logger -import java.io.StringWriter; -import java.io.PrintWriter; -import java.net.InetAddress; -import java.net.UnknownHostException; +import java.io.StringWriter +import java.io.PrintWriter +import java.net.InetAddress +import java.net.UnknownHostException /** * Base trait for all classes that wants to be able use the logging infrastructure. @@ -17,7 +17,7 @@ import java.net.UnknownHostException; * @author Jonas Bonér */ trait Logging { - @transient @volatile var log = Logger.get(this.getClass.getName) + @transient lazy val log = Logger.get(this.getClass.getName) } /** @@ -30,6 +30,7 @@ trait Logging { * * @author Jonas Bonér */ + // FIXME make use of LoggableException class LoggableException extends Exception with Logging { private val uniqueId = getExceptionID private var originalException: Option[Exception] = None diff --git a/akka.iml b/akka.iml index 2f07a75716..74542e8e48 100644 --- a/akka.iml +++ b/akka.iml @@ -2,6 +2,23 @@ + + + + + + + + + diff --git a/changes.xml b/changes.xml deleted file mode 100644 index 90a9e31c88..0000000000 --- a/changes.xml +++ /dev/null @@ -1,86 +0,0 @@ - - - - - - - Akka Release Notes - Jonas Bonér - - - - Clustered Comet using Akka remote actors and clustered membership API - Cluster membership API and implementation based on JGroups - Security module for HTTP-based authentication and authorization - Support for using Scala XML tags in RESTful Actors (scala-jersey) - Support for Comet Actors using Atmosphere - MongoDB as Akka storage backend - Redis as Akka storage backend - Transparent JSON serialization of Scala objects based on SJSON - Kerberos/SPNEGO support for Security module - Implicit sender for remote actors: Remote actors are able to use reply to answer a request - Support for using the Lift Web framework with Actors - Rewritten STM, now integrated with Multiverse STM - Added STM API for atomic {..} and run {..} orElse {..} - Added STM retry - Complete rewrite of the persistence transaction management, now based on Unit of Work and Multiverse STM - Monadic API to TransactionalRef (use it in for-comprehension) - Lightweight actor syntax using one of the Actor.actor(..) methods. F.e: 'val a = actor { case _ => .. }' - Rewritten event-based dispatcher which improved perfomance by 10x, now substantially faster than event-driven Scala Actors - New Scala JSON parser based on sjson - Added zlib compression to remote actors - Added implicit sender reference for fire-forget ('!') message sends - Monadic API to TransactionalRef (use it in for-comprehension) - Smoother web app integration; just add akka.conf to the classpath (WEB-INF/classes), no need for AKKA_HOME or -Dakka.conf=.. - Modularization of distribution into a thin core (actors, remoting and STM) and the rest in submodules - Added 'forward' to Actor, forwards message but keeps original sender address - JSON serialization for Java objects (using Jackson) - JSON serialization for Scala objects (using SJSON) - Added implementation for remote actor reconnect upon failure - Protobuf serialization for Java and Scala objects - SBinary serialization for Scala objects - Protobuf as remote protocol - AMQP integration; abstracted as actors in a supervisor hierarchy. Impl AMQP 0.9.1 - Updated Cassandra integration and CassandraSession API to v0.4 - Added CassandraSession API (with socket pooling) wrapping Cassandra's Thrift API in Scala and Java APIs - CassandraStorage is now works with external Cassandra cluster - ActorRegistry for retrieving Actor instances by class name and by id - SchedulerActor for scheduling periodic tasks - Now start up kernel with 'java -jar dist/akka-0.6.jar' - Added mailing list: akka-user@googlegroups.com - Improved and restructured documentation - New URL: http://akkasource.org - New and much improved docs - Enhanced trapping of failures: 'trapExit = List(classOf[..], classOf[..])' - Upgraded to Netty 3.2, Protobuf 2.2, ScalaTest 1.0, Jersey 1.1.3, Atmosphere 0.4.1, Cassandra 0.4.1, Configgy 1.4 - Lowered actor memory footprint; now an actor consumes ~600 bytes, which mean that you can create 6.5 million on 4 G RAM - Removed concurrent mode - Remote actors are now defined by their UUID (not class name) - Fixed dispatcher bugs - Cleaned up Maven scripts and distribution in general - Fixed many many bugs and minor issues - Fixed inconsistencies and uglyness in Actors API - Removed embedded Cassandra mode - Removed the !? method in Actor (synchronous message send, since it's evil. Use !! with time-out instead. - Removed startup scripts and lib dir - Removed the 'Transient' life-cycle scope since to close to 'Temporary' in semantics. - Removed 'Transient' Actors and restart timeout - - - - \ No newline at end of file diff --git a/config/akka-reference.conf b/config/akka-reference.conf index 749b599e0b..5bdb644e75 100644 --- a/config/akka-reference.conf +++ b/config/akka-reference.conf @@ -8,19 +8,20 @@ filename = "./logs/akka.log" roll = "daily" # Options: never, hourly, daily, sunday/monday/... - level = "debug" # Options: fatal, critical, error, warning, info, debug, trace + level = "info" # Options: fatal, critical, error, warning, info, debug, trace console = on # syslog_host = "" # syslog_server_name = "" - version = "0.7-SNAPSHOT" + version = "0.7" # FQN to the class doing initial active object/actor # supervisor bootstrap, should be defined in default constructor - boot = ["sample.java.Boot", - "sample.scala.Boot", + boot = ["sample.camel.Boot", + "sample.java.Boot", + "sample.scala.Boot", "se.scalablesolutions.akka.security.samples.Boot"] @@ -30,8 +31,10 @@ service = on - max-nr-of-retries = 100 - distributed = off # not implemented yet + fair = on # should transactions be fair or non-fair (non fair yield better performance) + max-nr-of-retries = 1000 # max nr of retries of a failing transaction before giving up + timeout = 10000 # transaction timeout; if transaction has not committed within the timeout then it is aborted + distributed = off # not implemented yet @@ -47,9 +50,10 @@ zlib-compression-level = 6 # Options: 0-9 (1 being fastest and 9 being the most compressed), default is 6 - name = "default" # The name of the cluster - #actor = "se.scalablesolutions.akka.remote.JGroupsClusterActor" # FQN of an implementation of ClusterActor - serializer = "se.scalablesolutions.akka.serialization.Serializer$Java" # FQN of the serializer class + service = on + name = "default" # The name of the cluster + actor = "se.scalablesolutions.akka.cluster.jgroups.JGroupsClusterActor" # FQN of an implementation of ClusterActor + serializer = "se.scalablesolutions.akka.serialization.Serializer$Java$" # FQN of the serializer class diff --git a/config/akka.conf b/config/akka.conf index 94f630089a..84b9bfbbcf 100644 --- a/config/akka.conf +++ b/config/akka.conf @@ -1,4 +1,4 @@ -# This config import the Akka reference configuration. +# This config imports the Akka reference configuration. include "akka-reference.conf" # In this file you can override any option defined in the 'akka-reference.conf' file. diff --git a/deploy/.keep b/deploy/.keep deleted file mode 100644 index e69de29bb2..0000000000 diff --git a/embedded-repo/com/redis/redisclient/1.0.1/redisclient-1.0.1.jar b/embedded-repo/com/redis/redisclient/1.0.1/redisclient-1.0.1.jar deleted file mode 100644 index 31ff1b2c0d..0000000000 Binary files a/embedded-repo/com/redis/redisclient/1.0.1/redisclient-1.0.1.jar and /dev/null differ diff --git a/embedded-repo/com/redis/redisclient/1.0.1/redisclient-1.0.1.pom b/embedded-repo/com/redis/redisclient/1.0.1/redisclient-1.0.1.pom deleted file mode 100755 index f247482f72..0000000000 --- a/embedded-repo/com/redis/redisclient/1.0.1/redisclient-1.0.1.pom +++ /dev/null @@ -1,8 +0,0 @@ - - - 4.0.0 - com.redis - redisclient - 1.0.1 - jar - \ No newline at end of file diff --git a/embedded-repo/com/redis/redisclient/1.2-SNAPSHOT/redisclient-1.2-SNAPSHOT.jar b/embedded-repo/com/redis/redisclient/1.2-SNAPSHOT/redisclient-1.2-SNAPSHOT.jar new file mode 100644 index 0000000000..88815a75d9 Binary files /dev/null and b/embedded-repo/com/redis/redisclient/1.2-SNAPSHOT/redisclient-1.2-SNAPSHOT.jar differ diff --git a/pom.xml b/pom.xml deleted file mode 100644 index 6de267e1dc..0000000000 --- a/pom.xml +++ /dev/null @@ -1,570 +0,0 @@ - - - 4.0.0 - - Akka Project - akka - se.scalablesolutions.akka - 0.7-SNAPSHOT - 2009 - http://akkasource.org - pom - - - Akka implements a unique hybrid of: - * Actors , which gives you: - * Simple and high-level abstractions for concurrency and parallelism. - * Asynchronous, non-blocking and highly performant event-driven programming model. - * Very lightweight event-driven processes (create ~6.5 million actors on 4 G RAM). - * Supervision hierarchies with let-it-crash semantics. For writing highly fault-tolerant systems that never stop, systems that self-heal. - * Software Transactional Memory (STM). (Distributed transactions coming soon). - * Transactors: combine actors and STM into transactional actors. Allows you to compose atomic message flows with automatic rollback and retry. - * Remoting: highly performant distributed actors with remote supervision and error management. - * Cluster membership management. - - Akka also has a set of add-on modules: - * Persistence: A set of pluggable back-end storage modules that works in sync with the STM. - * Cassandra distributed and highly scalable database. - * MongoDB document database. - * Redis data structures database (upcoming) - * REST (JAX-RS): Expose actors as REST services. - * Comet: Expose actors as Comet services. - * Security: Digest and Kerberos based security. - * Microkernel: Run Akka as a stand-alone kernel. - - - - 2.8.0.Beta1 - UTF-8 - 1.5 - ${maven.compiler.source} - ${project.build.sourceEncoding} - ${project.build.sourceEncoding} - 0.5.2 - 1.1.5 - 1.9.18-i - 1.0.1-for-scala-2.8.0.Beta1-with-test-interfaces-0.3-SNAPSHOT - 0.7.0 - 2.0-scala280-SNAPSHOT - 2.8.0.Beta1-1.5-SNAPSHOT - 4.5 - - - - akka-util-java - akka-util - akka-cluster - akka-core - akka-persistence - akka-rest - akka-comet - akka-amqp - akka-security - akka-patterns - akka-kernel - akka-fun-test-java - akka-samples - - - - Scalable Solutions AB - http://scalablesolutions.se - - - - - The Apache License, ASL Version 2.0 - http://www.apache.org/licenses/LICENSE-2.0 - - - - - - jboner - Jonas Bonér - +1 - jonas [REMOVE] AT jonasboner DOT com - - Founder - Hacker - Despot - - - - viktorklang - Viktor Klang - +1 - viktor.klang [REMOVE] AT gmail DOT com - - Apostle - - - - - - scm:git:git://github.com/jboner/akka.git - scm:git:git@github.com:jboner/akka.git - http://github.com/jboner/akka - - - - assembla - http://assembla.com/spaces/akka/ - - - - hudson - http://hudson.scala-tools.org/job/akka/ - - - - - - - - - User and Developer Discussion List - http://groups.google.com/group/akka-user - akka-user@googlegroups.com - akka-user+subscribe@googlegroups.com - akka-user+unsubscribe@googlegroups.com - - - - - - project.embedded.module - Project Embedded Repository - file://${env.AKKA_HOME}/embedded-repo - - - repo1.maven - Maven Main Repository - http://repo1.maven.org/maven2 - - - scala-tools-snapshots - Scala-Tools Maven2 Snapshot Repository - http://scala-tools.org/repo-snapshots - - - scala-tools - Scala-Tools Maven2 Repository - http://scala-tools.org/repo-releases - - - lag - Configgy's' Repository - http://www.lag.net/repo - - - multiverse-releases - http://multiverse.googlecode.com/svn/maven-repository/releases - - false - - - - multiverse-snaphosts - http://multiverse.googlecode.com/svn/maven-repository/snapshots - - - maven2-repository.dev.java.net - Java.net Repository for Maven - http://download.java.net/maven/2 - - - java.net - Java.net Legacy Repository for Maven - http://download.java.net/maven/1 - legacy - - - guiceyfruit.release - GuiceyFruit Release Repository - http://guiceyfruit.googlecode.com/svn/repo/releases/ - - false - - - true - - - - guiceyfruit.snapshot - GuiceyFruit Snapshot Repository - http://guiceyfruit.googlecode.com/svn/repo/snapshots/ - - true - - - false - - - - guice-maven - guice maven - http://guice-maven.googlecode.com/svn/trunk - - - google-maven-repository - Google Maven Repository - http://google-maven-repository.googlecode.com/svn/repository/ - - - repository.codehaus.org - Codehaus Maven Repository - http://repository.codehaus.org - - true - - - - repository.jboss.org - JBoss Repository for Maven - http://repository.jboss.org/maven2 - - false - - - - nexus.griddynamics.net - Grid Dynamics Maven Repository - https://nexus.griddynamics.net/nexus/content/groups/public - - false - - - - databinder.net/repo/ - dbDispatch Repository for Maven - http://databinder.net/repo - - false - - - - - - - onejar-maven-plugin.googlecode.com - http://onejar-maven-plugin.googlecode.com/svn/mavenrepo - - - scala-tools.org - Scala-Tools Maven2 Repository - http://scala-tools.org/repo-releases - - - - - src/main/scala - src/test/scala - - - org.apache.maven.plugins - maven-enforcer-plugin - 1.0-beta-1 - - - enforce-akka-home - - enforce - - - - - env.AKKA_HOME - "You must have set AKKA_HOME!" - - - - ${env.AKKA_HOME}/embedded-repo - - - - true - - - - enforce-java - - enforce - - - - - 1.6.0 - - - - - - - - org.apache.maven.plugins - maven-surefire-plugin - 2.4.2 - - - **/*Test.java - - - - **/InMemNestedStateTest.java - - - - akka.home - ${basedir}/.. - - - org.multiverse.api.exceptions.WriteConflictException.reuse - true - - - - - - org.mortbay.jetty - maven-jetty-plugin - - / - 5 - - - - org.apache.maven.plugins - maven-compiler-plugin - 2.0.2 - - 1.5 - 1.5 - - - - org.scala-tools - maven-scala-plugin - 2.10.1 - - - - compile - testCompile - - - - - - -Xmx1024m - - - ${scala.version} - - - - true - maven-source-plugin - - - attach-sources - - jar - - - - - - org.apache.maven.plugins - maven-changes-plugin - 2.0 - - - org.apache.maven.plugins - maven-jar-plugin - 2.2 - - - - ${project.version} - - - - - - org.apache.maven.plugins - maven-antrun-plugin - - - - - - org.apache.maven.plugins - maven-enforcer-plugin - 1.0-beta-1 - - - org.apache.felix - maven-bundle-plugin - 2.0.0 - true - - - J2SE-1.5 - <_versionpolicy>[$(@),$(version;=+;$(@))) - - - - - create-bundle - package - - bundle - - - - bundle-install - install - - install - - - - - - - - - - - org.codehaus.mojo - taglist-maven-plugin - 2.3 - - - FIXME - TODO - XXX - @fixme - @todo - @deprecated - - - - - org.apache.maven.plugins - maven-project-info-reports-plugin - 2.1.2 - - - - cim - dependencies - dependency-convergence - - index - issue-tracking - license - mailing-list - - plugins - project-team - scm - summary - - - - - - org.scala-tools - maven-scala-plugin - 2.12.2 - - ${project.build.sourceEncoding} - - 1.2-SNAPSHOT - ${scala.version} - - -Xmx1024m - -DpackageLinkDefs=file://${project.build.directory}/packageLinkDefs.properties - - - - - - - org.apache.maven.plugins - maven-changes-plugin - 2.1 - - - - changes-report - - - - - ${basedir}/changes.xml - - - - maven-surefire-report-plugin - - - - - report-only - - - - - - - - - release - - - scala-tools.org - http://nexus.scala-tools.org/content/repositories/releases - - - scala-tools.org - file://${user.home}/.m2/mvnsites/akka - - - - - hudson - - - hudson.scala-tools.org - file:///home/scala-tools.org/www/repo-snapshots - - - hudson.scala-tools.org - file:///home/scala-tools.org/www/repo-snapshots - false - - - hudson.scala-tools.org - file:///home/scala-tools.org/www/mvnsites-snapshots/akka - - - - - diff --git a/project/build.properties b/project/build.properties new file mode 100644 index 0000000000..00c7497f06 --- /dev/null +++ b/project/build.properties @@ -0,0 +1,7 @@ +project.organization=se.scalablesolutions.akka +project.name=akka +project.version=0.7-2.8 +scala.version=2.8.0.Beta1 +sbt.version=0.7.1 +def.scala.version=2.7.7 +build.scala.versions=2.8.0.Beta1 diff --git a/project/build/AkkaProject.scala b/project/build/AkkaProject.scala new file mode 100644 index 0000000000..c75fa97caa --- /dev/null +++ b/project/build/AkkaProject.scala @@ -0,0 +1,424 @@ +/*------------------------------------------------------------------------------- + Copyright (C) 2009-2010 Scalable Solutions AB + + ---------------------------------------------------- + -------- sbt buildfile for the Akka project -------- + ---------------------------------------------------- + + Akka implements a unique hybrid of: + * Actors , which gives you: + * Simple and high-level abstractions for concurrency and parallelism. + * Asynchronous, non-blocking and highly performant event-driven programming model. + * Very lightweight event-driven processes (create ~6.5 million actors on 4 G RAM). + * Supervision hierarchies with let-it-crash semantics. For writing highly + fault-tolerant systems that never stop, systems that self-heal. + * Software Transactional Memory (STM). (Distributed transactions coming soon). + * Transactors: combine actors and STM into transactional actors. Allows you to + compose atomic message flows with automatic rollback and retry. + * Remoting: highly performant distributed actors with remote supervision and + error management. + * Cluster membership management. + + Akka also has a set of add-on modules: + * Persistence: A set of pluggable back-end storage modules that works in sync with the STM. + * Cassandra distributed and highly scalable database. + * MongoDB document database. + * Redis data structures database (upcoming) + * Camel: Expose Actors as Camel endpoints. + * REST (JAX-RS): Expose actors as REST services. + * Comet: Expose actors as Comet services. + * Security: Digest and Kerberos based security. + * Spring: Spring integration + * Guice: Guice integration + * Microkernel: Run Akka as a stand-alone kernel. + +-------------------------------------------------------------------------------*/ + +import sbt._ +import java.io.File +import java.util.jar.Attributes + +class AkkaParent(info: ProjectInfo) extends DefaultProject(info) { + + // ------------------------------------------------------------ + // project versions + val JERSEY_VERSION = "1.1.5" + val ATMO_VERSION = "0.5.4" + val CASSANDRA_VERSION = "0.5.0" + val LIFT_VERSION = "2.0-scala280-SNAPSHOT" + val SCALATEST_VERSION = "1.0.1-for-scala-2.8.0.Beta1-with-test-interfaces-0.3-SNAPSHOT" + + // ------------------------------------------------------------ + lazy val akkaHome = { + val home = System.getenv("AKKA_HOME") + if (home == null) throw new Error( + "You need to set the $AKKA_HOME environment variable to the root of the Akka distribution") + home + } + lazy val deployPath = Path.fromFile(new java.io.File(akkaHome + "/deploy")) + lazy val distPath = Path.fromFile(new java.io.File(akkaHome + "/dist")) + + lazy val dist = zipTask(allArtifacts, "dist", distName) dependsOn (`package`) describedAs("Zips up the distribution.") + + def distName = "%s_%s-%s.zip".format(name, defScalaVersion.value, version) + + // ------------------------------------------------------------ + // repositories + val embeddedrepo = "embedded repo" at new File(akkaHome, "embedded-repo").toURI.toString + val sunjdmk = "sunjdmk" at "http://wp5.e-taxonomy.eu/cdmlib/mavenrepo" + val databinder = "DataBinder" at "http://databinder.net/repo" + val configgy = "Configgy" at "http://www.lag.net/repo" + val codehaus = "Codehaus" at "http://repository.codehaus.org" + val codehaus_snapshots = "Codehaus Snapshots" at "http://snapshots.repository.codehaus.org" + val jboss = "jBoss" at "http://repository.jboss.org/maven2" + val guiceyfruit = "GuiceyFruit" at "http://guiceyfruit.googlecode.com/svn/repo/releases/" + val google = "google" at "http://google-maven-repository.googlecode.com/svn/repository" + val m2 = "m2" at "http://download.java.net/maven/2" + + // ------------------------------------------------------------ + // project defintions + lazy val akka_java_util = project("akka-util-java", "akka-util-java", new AkkaJavaUtilProject(_)) + lazy val akka_util = project("akka-util", "akka-util", new AkkaUtilProject(_)) + lazy val akka_core = project("akka-core", "akka-core", new AkkaCoreProject(_), akka_util, akka_java_util) + lazy val akka_amqp = project("akka-amqp", "akka-amqp", new AkkaAMQPProject(_), akka_core) + lazy val akka_rest = project("akka-rest", "akka-rest", new AkkaRestProject(_), akka_core) + lazy val akka_comet = project("akka-comet", "akka-comet", new AkkaCometProject(_), akka_rest) + lazy val akka_camel = project("akka-camel", "akka-camel", new AkkaCamelProject(_), akka_core) + lazy val akka_patterns = project("akka-patterns", "akka-patterns", new AkkaPatternsProject(_), akka_core) + lazy val akka_security = project("akka-security", "akka-security", new AkkaSecurityProject(_), akka_core) + lazy val akka_persistence = project("akka-persistence", "akka-persistence", new AkkaPersistenceParentProject(_)) + lazy val akka_cluster = project("akka-cluster", "akka-cluster", new AkkaClusterParentProject(_)) + lazy val akka_spring = project("akka-spring", "akka-spring", new AkkaSpringProject(_), akka_core) + lazy val akka_kernel = project("akka-kernel", "akka-kernel", new AkkaKernelProject(_), + akka_core, akka_rest, akka_spring, akka_camel, akka_persistence, + akka_cluster, akka_amqp, akka_security, akka_comet, akka_patterns) + + // functional tests in java + lazy val akka_fun_test = project("akka-fun-test-java", "akka-fun-test-java", new AkkaFunTestProject(_), akka_kernel) + + // examples + lazy val akka_samples = project("akka-samples", "akka-samples", new AkkaSamplesParentProject(_)) + + // ------------------------------------------------------------ + // create executable jar + override def mainClass = Some("se.scalablesolutions.akka.kernel.Main") + + override def packageOptions = + manifestClassPath.map(cp => ManifestAttributes((Attributes.Name.CLASS_PATH, cp))).toList ::: + getMainClass(false).map(MainClass(_)).toList + + // create a manifest with all akka jars and dependency jars on classpath + override def manifestClassPath = Some(allArtifacts.getFiles + .filter(_.getName.endsWith(".jar")) + .map("lib_managed/scala_%s/compile/".format(defScalaVersion.value) + _.getName) + .mkString(" ") + + " dist/akka-util_%s-%s.jar".format(defScalaVersion.value, version) + + " dist/akka-util-java_%s-%s.jar".format(defScalaVersion.value, version) + + " dist/akka-core_%s-%s.jar".format(defScalaVersion.value, version) + + " dist/akka-cluster-shoal_%s-%s.jar".format(defScalaVersion.value, version) + + " dist/akka-cluster-jgroups_%s-%s.jar".format(defScalaVersion.value, version) + + " dist/akka-rest_%s-%s.jar".format(defScalaVersion.value, version) + + " dist/akka-comet_%s-%s.jar".format(defScalaVersion.value, version) + + " dist/akka-camel_%s-%s.jar".format(defScalaVersion.value, version) + + " dist/akka-security_%s-%s.jar".format(defScalaVersion.value, version) + + " dist/akka-amqp_%s-%s.jar".format(defScalaVersion.value, version) + + " dist/akka-patterns_%s-%s.jar".format(defScalaVersion.value, version) + + " dist/akka-persistence-common_%s-%s.jar".format(defScalaVersion.value, version) + + " dist/akka-persistence-redis_%s-%s.jar".format(defScalaVersion.value, version) + + " dist/akka-persistence-mongo_%s-%s.jar".format(defScalaVersion.value, version) + + " dist/akka-persistence-cassandra_%s-%s.jar".format(defScalaVersion.value, version) + + " dist/akka-kernel_%s-%s.jar".format(defScalaVersion.value, version) + + " dist/akka-spring_%s-%s.jar".format(defScalaVersion.value, version) + ) + + // ------------------------------------------------------------ + // publishing + override def managedStyle = ManagedStyle.Maven + val publishTo = Resolver.file("maven-local", Path.userHome / ".m2" / "repository" asFile) + + // Credentials(Path.userHome / ".akka_publish_credentials", log) + val sourceArtifact = Artifact(artifactID, "src", "jar", Some("sources"), Nil, None) + //val docsArtifact = Artifact(artifactID, "docs", "jar", Some("javadoc"), Nil, None) + + override def packageDocsJar = defaultJarPath("-javadoc.jar") + override def packageSrcJar= defaultJarPath("-sources.jar") + override def packageToPublishActions = super.packageToPublishActions ++ Seq(packageDocs, packageSrc) + + override def pomExtra = + 2009 + http://akkasource.org + + Scalable Solutions AB + http://scalablesolutions.se + + + + Apache 2 + http://www.apache.org/licenses/LICENSE-2.0.txt + repo + + + + // ------------------------------------------------------------ + // subprojects + class AkkaCoreProject(info: ProjectInfo) extends DefaultProject(info) { + val netty = "org.jboss.netty" % "netty" % "3.2.0.BETA1" % "compile" + val commons_io = "commons-io" % "commons-io" % "1.4" % "compile" + val dispatch_json = "net.databinder" % "dispatch-json_2.7.7" % "0.6.4" % "compile" + val dispatch_htdisttp = "net.databinder" % "dispatch-http_2.7.7" % "0.6.4" % "compile" + val sjson = "sjson.json" % "sjson" % "0.4" % "compile" +// val sbinary = "sbinary" % "sbinary" % "0.3" % "compile" + val jackson = "org.codehaus.jackson" % "jackson-mapper-asl" % "1.2.1" % "compile" + val jackson_core = "org.codehaus.jackson" % "jackson-core-asl" % "1.2.1" % "compile" + val voldemort = "voldemort.store.compress" % "h2-lzf" % "1.0" % "compile" + // testing + val scalatest = "org.scalatest" % "scalatest" % SCALATEST_VERSION % "test" + val junit = "junit" % "junit" % "4.5" % "test" + lazy val dist = deployTask(info, distPath) dependsOn(`package`) describedAs("Deploying") + } + + class AkkaUtilProject(info: ProjectInfo) extends DefaultProject(info) { + val werkz = "org.codehaus.aspectwerkz" % "aspectwerkz-nodeps-jdk5" % "2.1" % "compile" + val werkz_core = "org.codehaus.aspectwerkz" % "aspectwerkz-jdk5" % "2.1" % "compile" + val configgy = "net.lag" % "configgy" % "2.8.0.Beta1-1.5-SNAPSHOT" % "compile" + lazy val dist = deployTask(info, distPath) dependsOn(`package`) describedAs("Deploying") + } + + class AkkaJavaUtilProject(info: ProjectInfo) extends DefaultProject(info) { + val guicey = "org.guiceyfruit" % "guice-core" % "2.0-beta-4" % "compile" + val protobuf = "com.google.protobuf" % "protobuf-java" % "2.2.0" % "compile" + val multiverse = "org.multiverse" % "multiverse-alpha" % "0.4" % "compile" + lazy val dist = deployTask(info, distPath) dependsOn(`package`) describedAs("Deploying") + } + + class AkkaAMQPProject(info: ProjectInfo) extends DefaultProject(info) { + val commons_io = "commons-io" % "commons-io" % "1.4" % "compile" + val rabbit = "com.rabbitmq" % "amqp-client" % "1.7.2" + lazy val dist = deployTask(info, distPath) dependsOn(`package`) describedAs("Deploying") + } + + class AkkaRestProject(info: ProjectInfo) extends DefaultProject(info) { + val jackson_core_asl = "org.codehaus.jackson" % "jackson-core-asl" % "1.2.1" % "compile" + val stax_api = "javax.xml.stream" % "stax-api" % "1.0-2" % "compile" + val servlet = "javax.servlet" % "servlet-api" % "2.5" % "compile" + val jersey = "com.sun.jersey" % "jersey-core" % JERSEY_VERSION % "compile" + val jersey_server = "com.sun.jersey" % "jersey-server" % JERSEY_VERSION % "compile" + val jersey_json = "com.sun.jersey" % "jersey-json" % JERSEY_VERSION % "compile" + val jersey_contrib = "com.sun.jersey.contribs" % "jersey-scala" % JERSEY_VERSION % "compile" + val jsr311 = "javax.ws.rs" % "jsr311-api" % "1.1" % "compile" + lazy val dist = deployTask(info, distPath) dependsOn(`package`) describedAs("Deploying") + } + + class AkkaCometProject(info: ProjectInfo) extends DefaultProject(info) { + val grizzly = "com.sun.grizzly" % "grizzly-comet-webserver" % "1.9.18-i" % "compile" + val servlet = "javax.servlet" % "servlet-api" % "2.5" % "compile" + val atmo = "org.atmosphere" % "atmosphere-annotations" % ATMO_VERSION % "compile" + val atmo_jersey = "org.atmosphere" % "atmosphere-jersey" % ATMO_VERSION % "compile" + val atmo_runtime = "org.atmosphere" % "atmosphere-runtime" % ATMO_VERSION % "compile" + lazy val dist = deployTask(info, distPath) dependsOn(`package`) describedAs("Deploying") + } + + class AkkaCamelProject(info: ProjectInfo) extends DefaultProject(info) { + val camel_core = "org.apache.camel" % "camel-core" % "2.2.0" % "compile" + lazy val dist = deployTask(info, distPath) dependsOn(`package`) describedAs("Deploying") + } + + class AkkaPatternsProject(info: ProjectInfo) extends DefaultProject(info) { + // testing + val scalatest = "org.scalatest" % "scalatest" % SCALATEST_VERSION % "test" + val junit = "junit" % "junit" % "4.5" % "test" + lazy val dist = deployTask(info, distPath) dependsOn(`package`) describedAs("Deploying") + } + + class AkkaSecurityProject(info: ProjectInfo) extends DefaultProject(info) { + val commons_logging = "commons-logging" % "commons-logging" % "1.1.1" % "compile" + val annotation = "javax.annotation" % "jsr250-api" % "1.0" + val jersey_server = "com.sun.jersey" % "jersey-server" % JERSEY_VERSION % "compile" + val jsr311 = "javax.ws.rs" % "jsr311-api" % "1.1" % "compile" + val lift_util = "net.liftweb" % "lift-util" % LIFT_VERSION % "compile" + // testing + val scalatest = "org.scalatest" % "scalatest" % SCALATEST_VERSION % "test" + val junit = "junit" % "junit" % "4.5" % "test" + val mockito = "org.mockito" % "mockito-all" % "1.8.1" % "test" + lazy val dist = deployTask(info, distPath) dependsOn(`package`) describedAs("Deploying") + } + + class AkkaPersistenceCommonProject(info: ProjectInfo) extends DefaultProject(info) { + val thrift = "com.facebook" % "thrift" % "1.0" % "compile" + val commons_pool = "commons-pool" % "commons-pool" % "1.5.1" % "compile" + lazy val dist = deployTask(info, distPath) dependsOn(`package`) describedAs("Deploying") + } + + class AkkaRedisProject(info: ProjectInfo) extends DefaultProject(info) { + val redis = "com.redis" % "redisclient" % "1.2-SNAPSHOT" % "compile" + override def testOptions = TestFilter((name: String) => name.endsWith("Test")) :: Nil + lazy val dist = deployTask(info, distPath) dependsOn(`package`) describedAs("Deploying") + } + + class AkkaMongoProject(info: ProjectInfo) extends DefaultProject(info) { + val mongo = "org.mongodb" % "mongo-java-driver" % "1.1" % "compile" + override def testOptions = TestFilter((name: String) => name.endsWith("Test")) :: Nil + lazy val dist = deployTask(info, distPath) dependsOn(`package`) describedAs("Deploying") + } + + class AkkaCassandraProject(info: ProjectInfo) extends DefaultProject(info) { + val cassandra = "org.apache.cassandra" % "cassandra" % CASSANDRA_VERSION % "compile" + val high_scale = "org.apache.cassandra" % "high-scale-lib" % CASSANDRA_VERSION % "test" + val cassandra_clhm = "org.apache.cassandra" % "clhm-production" % CASSANDRA_VERSION % "test" + val commons_coll = "commons-collections" % "commons-collections" % "3.2.1" % "test" + val google_coll = "com.google.collections" % "google-collections" % "1.0" % "test" + val slf4j = "org.slf4j" % "slf4j-api" % "1.5.8" % "test" + val slf4j_log4j = "org.slf4j" % "slf4j-log4j12" % "1.5.8" % "test" + val log4j = "log4j" % "log4j" % "1.2.15" % "test" + override def testOptions = TestFilter((name: String) => name.endsWith("Test")) :: Nil + lazy val dist = deployTask(info, distPath) dependsOn(`package`) describedAs("Deploying") + } + + class AkkaPersistenceParentProject(info: ProjectInfo) extends ParentProject(info) { + lazy val akka_persistence_common = project("akka-persistence-common", "akka-persistence-common", + new AkkaPersistenceCommonProject(_), akka_core) + lazy val akka_persistence_redis = project("akka-persistence-redis", "akka-persistence-redis", + new AkkaRedisProject(_), akka_persistence_common) + lazy val akka_persistence_mongo = project("akka-persistence-mongo", "akka-persistence-mongo", + new AkkaMongoProject(_), akka_persistence_common) + lazy val akka_persistence_cassandra = project("akka-persistence-cassandra", "akka-persistence-cassandra", + new AkkaCassandraProject(_), akka_persistence_common) + } + + class AkkaJgroupsProject(info: ProjectInfo) extends DefaultProject(info) { + val jgroups = "jgroups" % "jgroups" % "2.8.0.CR7" % "compile" + lazy val dist = deployTask(info, distPath) dependsOn(`package`) describedAs("Deploying") + } + + class AkkaShoalProject(info: ProjectInfo) extends DefaultProject(info) { + val shoal = "shoal-jxta" % "shoal" % "1.1-20090818" % "compile" + val shoal_extra = "shoal-jxta" % "jxta" % "1.1-20090818" % "compile" + lazy val dist = deployTask(info, distPath) dependsOn(`package`) describedAs("Deploying") + } + + class AkkaClusterParentProject(info: ProjectInfo) extends ParentProject(info) { + lazy val akka_cluster_jgroups = project("akka-cluster-jgroups", "akka-cluster-jgroups", + new AkkaJgroupsProject(_), akka_core) + lazy val akka_cluster_shoal = project("akka-cluster-shoal", "akka-cluster-shoal", + new AkkaShoalProject(_), akka_core) + } + + class AkkaKernelProject(info: ProjectInfo) extends DefaultProject(info) { + lazy val dist = deployTask(info, distPath) dependsOn(`package`) describedAs("Deploying") + } + + class AkkaSpringProject(info: ProjectInfo) extends DefaultProject(info) { + val spring_beans = "org.springframework" % "spring-beans" % "3.0.1.RELEASE" + val spring_context = "org.springframework" % "spring-context" % "3.0.1.RELEASE" + // testing + val scalatest = "org.scalatest" % "scalatest" % SCALATEST_VERSION % "test" + val junit = "junit" % "junit" % "4.5" % "test" + lazy val dist = deployTask(info, distPath) dependsOn(`package`) describedAs("Deploying") + } + + // examples + class AkkaFunTestProject(info: ProjectInfo) extends DefaultProject(info) { + val jackson_core_asl = "org.codehaus.jackson" % "jackson-core-asl" % "1.2.1" % "compile" + val stax_api = "javax.xml.stream" % "stax-api" % "1.0-2" % "compile" + val protobuf = "com.google.protobuf" % "protobuf-java" % "2.2.0" + val grizzly = "com.sun.grizzly" % "grizzly-comet-webserver" % "1.9.18-i" % "compile" + val jersey_server = "com.sun.jersey" % "jersey-server" % JERSEY_VERSION % "compile" + val jersey_json = "com.sun.jersey" % "jersey-json" % JERSEY_VERSION % "compile" + val jersey_atom = "com.sun.jersey" % "jersey-atom" % JERSEY_VERSION % "compile" + // testing + val junit = "junit" % "junit" % "4.5" % "test" + val jmock = "org.jmock" % "jmock" % "2.4.0" % "test" + } + + class AkkaSampleChatProject(info: ProjectInfo) extends DefaultProject(info) { + lazy val dist = deployTask(info, deployPath) dependsOn(`package`) describedAs("Deploying") + } + + class AkkaSampleLiftProject(info: ProjectInfo) extends DefaultProject(info) { + val commons_logging = "commons-logging" % "commons-logging" % "1.1.1" % "compile" + val lift = "net.liftweb" % "lift-webkit" % LIFT_VERSION % "compile" + val lift_util = "net.liftweb" % "lift-util" % LIFT_VERSION % "compile" + val servlet = "javax.servlet" % "servlet-api" % "2.5" % "compile" + // testing + val jetty = "org.mortbay.jetty" % "jetty" % "6.1.22" % "test" + val junit = "junit" % "junit" % "4.5" % "test" + lazy val dist = deployTask(info, deployPath) dependsOn(`package`) describedAs("Deploying") + } + + class AkkaSampleRestJavaProject(info: ProjectInfo) extends DefaultProject(info) { + lazy val dist = deployTask(info, deployPath) dependsOn(`package`) describedAs("Deploying") + } + + class AkkaSampleRestScalaProject(info: ProjectInfo) extends DefaultProject(info) { + val jsr311 = "javax.ws.rs" % "jsr311-api" % "1.1.1" % "compile" + lazy val dist = deployTask(info, deployPath) dependsOn(`package`) describedAs("Deploying") + } + + class AkkaSampleCamelProject(info: ProjectInfo) extends DefaultProject(info) { + val commons_codec = "commons-codec" % "commons-codec" % "1.3" % "compile" + val spring_jms = "org.springframework" % "spring-jms" % "3.0.1.RELEASE" + val camel_jetty = "org.apache.camel" % "camel-jetty" % "2.2.0" % "compile" + val camel_jms = "org.apache.camel" % "camel-jms" % "2.2.0" % "compile" + val activemq_core = "org.apache.activemq" % "activemq-core" % "5.3.0" % "compile" + lazy val dist = deployTask(info, deployPath) dependsOn(`package`) describedAs("Deploying") + } + + class AkkaSampleSecurityProject(info: ProjectInfo) extends DefaultProject(info) { + val jsr311 = "javax.ws.rs" % "jsr311-api" % "1.1.1" % "compile" + val jsr250 = "javax.annotation" % "jsr250-api" % "1.0" + lazy val dist = deployTask(info, deployPath) dependsOn(`package`) describedAs("Deploying") + } + + class AkkaSamplesParentProject(info: ProjectInfo) extends ParentProject(info) { + lazy val akka_sample_chat = project("akka-sample-chat", "akka-sample-chat", + new AkkaSampleChatProject(_), akka_kernel) + lazy val akka_sample_lift = project("akka-sample-lift", "akka-sample-lift", + new AkkaSampleLiftProject(_), akka_kernel) + lazy val akka_sample_rest_java = project("akka-sample-rest-java", "akka-sample-rest-java", + new AkkaSampleRestJavaProject(_), akka_kernel) + lazy val akka_sample_rest_scala = project("akka-sample-rest-scala", "akka-sample-rest-scala", + new AkkaSampleRestScalaProject(_), akka_kernel) + lazy val akka_sample_camel = project("akka-sample-camel", "akka-sample-camel", + new AkkaSampleCamelProject(_), akka_kernel) + lazy val akka_sample_security = project("akka-sample-security", "akka-sample-security", + new AkkaSampleSecurityProject(_), akka_kernel) + } + + // ------------------------------------------------------------ + // helper functions + def removeDupEntries(paths: PathFinder) = + Path.lazyPathFinder { + val mapped = paths.get map { p => (p.relativePath, p) } + (Map() ++ mapped).values.toList + } + + def allArtifacts = { + (removeDupEntries(runClasspath filter ClasspathUtilities.isArchive) +++ + ((outputPath ##) / defaultJarName) +++ + mainResources +++ + mainDependencies.scalaJars +++ + descendents(info.projectPath, "*.conf") +++ + descendents(info.projectPath / "dist", "*.jar") +++ + descendents(info.projectPath / "deploy", "*.jar") +++ + descendents(path("lib") ##, "*.jar") +++ + descendents(configurationPath(Configurations.Compile) ##, "*.jar")) + .filter(jar => // remove redundant libs + !jar.toString.endsWith("stax-api-1.0.1.jar") && + !jar.toString.endsWith("scala-library-2.7.5.jar") && + !jar.toString.endsWith("scala-library-2.7.6.jar")) + } + + def deployTask(info: ProjectInfo, toDir: Path) = task { + val projectPath = info.projectPath.toString + val moduleName = projectPath.substring( + projectPath.lastIndexOf(System.getProperty("file.separator")) + 1, projectPath.length) + // FIXME need to find out a way to grab these paths from the sbt system + val JAR_FILE_NAME = moduleName + "_%s-%s.jar".format(defScalaVersion.value, version) + val JAR_FILE_PATH = projectPath + "/target/scala_%s/".format(defScalaVersion.value) + JAR_FILE_NAME + + val from = Path.fromFile(new java.io.File(JAR_FILE_PATH)) + val to = Path.fromFile(new java.io.File(toDir + "/" + JAR_FILE_NAME)) + log.info("Deploying " + to) + FileUtilities.copyFile(from, to, log) + } +} diff --git a/scripts/line_count.sh b/scripts/line_count.sh new file mode 100755 index 0000000000..3e624758b9 --- /dev/null +++ b/scripts/line_count.sh @@ -0,0 +1 @@ +wc -l `find . -name \*.scala -print` \ No newline at end of file diff --git a/scripts/run_akka.sh b/scripts/run_akka.sh new file mode 100755 index 0000000000..8112894022 --- /dev/null +++ b/scripts/run_akka.sh @@ -0,0 +1,16 @@ +#!/bin/bash +cd $AKKA_HOME +VERSION=akka_2.7.7-0.7 +TARGET_DIR=dist/$1 +shift 1 +VMARGS=$@ + +if [ -d $TARGET_DIR ]; then + cd $TARGET_DIR +else + unzip dist/${VERSION}.zip -d $TARGET_DIR + cd $TARGET_DIR +fi + +export AKKA_HOME=`pwd` +java -jar ${VMARGS} ${VERSION}.jar \ No newline at end of file