diff --git a/akka-actor-tests/src/test/scala/akka/actor/actor/ActorRefSpec.scala b/akka-actor-tests/src/test/scala/akka/actor/actor/ActorRefSpec.scala
index 944e334712..02dfb6ac9b 100644
--- a/akka-actor-tests/src/test/scala/akka/actor/actor/ActorRefSpec.scala
+++ b/akka-actor-tests/src/test/scala/akka/actor/actor/ActorRefSpec.scala
@@ -11,9 +11,10 @@ import akka.testing._
import akka.util.duration._
import akka.testing.Testing.sleepFor
import akka.config.Supervision.{ OneForOneStrategy }
-import akka.actor._
import akka.dispatch.Future
import java.util.concurrent.{ TimeUnit, CountDownLatch }
+import java.lang.IllegalStateException
+import akka.util.ReflectiveAccess
object ActorRefSpec {
@@ -68,26 +69,189 @@ object ActorRefSpec {
}
}
}
+
+ class OuterActor(val inner: ActorRef) extends Actor {
+ def receive = {
+ case "self" ⇒ self reply self
+ case x ⇒ inner forward x
+ }
+ }
+
+ class FailingOuterActor(val inner: ActorRef) extends Actor {
+ val fail = new InnerActor
+
+ def receive = {
+ case "self" ⇒ self reply self
+ case x ⇒ inner forward x
+ }
+ }
+
+ class FailingInheritingOuterActor(_inner: ActorRef) extends OuterActor(_inner) {
+ val fail = new InnerActor
+ }
+
+ class InnerActor extends Actor {
+ def receive = {
+ case "innerself" ⇒ self reply self
+ case other ⇒ self reply other
+ }
+ }
+
+ class FailingInnerActor extends Actor {
+ val fail = new InnerActor
+
+ def receive = {
+ case "innerself" ⇒ self reply self
+ case other ⇒ self reply other
+ }
+ }
+
+ class FailingInheritingInnerActor extends InnerActor {
+ val fail = new InnerActor
+ }
}
class ActorRefSpec extends WordSpec with MustMatchers {
- import ActorRefSpec._
+ import akka.actor.ActorRefSpec._
"An ActorRef" must {
"not allow Actors to be created outside of an actorOf" in {
intercept[akka.actor.ActorInitializationException] {
new Actor { def receive = { case _ ⇒ } }
- fail("shouldn't get here")
}
intercept[akka.actor.ActorInitializationException] {
- val a = Actor.actorOf(new Actor {
+ Actor.actorOf(new Actor {
val nested = new Actor { def receive = { case _ ⇒ } }
def receive = { case _ ⇒ }
}).start()
- fail("shouldn't get here")
}
+
+ def refStackMustBeEmpty = Actor.actorRefInCreation.get.headOption must be === None
+
+ refStackMustBeEmpty
+
+ intercept[akka.actor.ActorInitializationException] {
+ Actor.actorOf(new FailingOuterActor(Actor.actorOf(new InnerActor).start)).start()
+ }
+
+ refStackMustBeEmpty
+
+ intercept[akka.actor.ActorInitializationException] {
+ Actor.actorOf(new OuterActor(Actor.actorOf(new FailingInnerActor).start)).start()
+ }
+
+ refStackMustBeEmpty
+
+ intercept[akka.actor.ActorInitializationException] {
+ Actor.actorOf(new FailingInheritingOuterActor(Actor.actorOf(new InnerActor).start)).start()
+ }
+
+ refStackMustBeEmpty
+
+ intercept[akka.actor.ActorInitializationException] {
+ Actor.actorOf(new FailingOuterActor(Actor.actorOf(new FailingInheritingInnerActor).start)).start()
+ }
+
+ refStackMustBeEmpty
+
+ intercept[akka.actor.ActorInitializationException] {
+ Actor.actorOf(new FailingInheritingOuterActor(Actor.actorOf(new FailingInheritingInnerActor).start)).start()
+ }
+
+ refStackMustBeEmpty
+
+ intercept[akka.actor.ActorInitializationException] {
+ Actor.actorOf(new FailingInheritingOuterActor(Actor.actorOf(new FailingInnerActor).start)).start()
+ }
+
+ refStackMustBeEmpty
+
+ intercept[akka.actor.ActorInitializationException] {
+ Actor.actorOf(new OuterActor(Actor.actorOf(new InnerActor {
+ val a = new InnerActor
+ }).start)).start()
+ }
+
+ refStackMustBeEmpty
+
+ intercept[akka.actor.ActorInitializationException] {
+ Actor.actorOf(new FailingOuterActor(Actor.actorOf(new FailingInheritingInnerActor).start)).start()
+ }
+
+ refStackMustBeEmpty
+
+ intercept[akka.actor.ActorInitializationException] {
+ Actor.actorOf(new OuterActor(Actor.actorOf(new FailingInheritingInnerActor).start)).start()
+ }
+
+ refStackMustBeEmpty
+
+ intercept[akka.actor.ActorInitializationException] {
+ Actor.actorOf(new OuterActor(Actor.actorOf({ new InnerActor; new InnerActor }).start)).start()
+ }
+
+ refStackMustBeEmpty
+
+ (intercept[java.lang.IllegalStateException] {
+ Actor.actorOf(new OuterActor(Actor.actorOf({ throw new IllegalStateException("Ur state be b0rked"); new InnerActor }).start)).start()
+ }).getMessage must be === "Ur state be b0rked"
+
+ refStackMustBeEmpty
+ }
+
+ "be serializable using Java Serialization on local node" in {
+ val a = Actor.actorOf[InnerActor].start
+
+ import java.io._
+
+ val baos = new ByteArrayOutputStream(8192 * 32)
+ val out = new ObjectOutputStream(baos)
+
+ out.writeObject(a)
+
+ out.flush
+ out.close
+
+ val in = new ObjectInputStream(new ByteArrayInputStream(baos.toByteArray))
+ val readA = in.readObject
+
+ a.isInstanceOf[LocalActorRef] must be === true
+ readA.isInstanceOf[LocalActorRef] must be === true
+ (readA eq a) must be === true
+ }
+
+ "must throw exception on deserialize if not present in local registry and remoting is not enabled" in {
+ ReflectiveAccess.RemoteModule.isEnabled must be === false
+
+ val a = Actor.actorOf[InnerActor].start
+
+ val inetAddress = ReflectiveAccess.RemoteModule.configDefaultAddress
+
+ val expectedSerializedRepresentation = SerializedActorRef(
+ a.uuid,
+ a.address,
+ inetAddress.getAddress.getHostAddress,
+ inetAddress.getPort,
+ a.timeout)
+
+ Actor.registry.unregister(a)
+
+ import java.io._
+
+ val baos = new ByteArrayOutputStream(8192 * 32)
+ val out = new ObjectOutputStream(baos)
+
+ out.writeObject(a)
+
+ out.flush
+ out.close
+
+ val in = new ObjectInputStream(new ByteArrayInputStream(baos.toByteArray))
+ (intercept[java.lang.IllegalStateException] {
+ in.readObject
+ }).getMessage must be === "Trying to deserialize ActorRef (" + expectedSerializedRepresentation + ") but it's not found in the local registry and remoting is not enabled!"
}
"support nested actorOfs" in {
@@ -102,6 +266,17 @@ class ActorRefSpec extends WordSpec with MustMatchers {
(a ne nested) must be === true
}
+ "support advanced nested actorOfs" in {
+ val a = Actor.actorOf(new OuterActor(Actor.actorOf(new InnerActor).start)).start
+ val inner = (a !! "innerself").get
+
+ (a !! a).get must be(a)
+ (a !! "self").get must be(a)
+ inner must not be a
+
+ (a !! "msg").get must be === "msg"
+ }
+
"support reply via channel" in {
val serverRef = Actor.actorOf[ReplyActor].start()
val clientRef = Actor.actorOf(new SenderActor(serverRef)).start()
diff --git a/akka-actor-tests/src/test/scala/akka/actor/actor/DeployerSpec.scala b/akka-actor-tests/src/test/scala/akka/actor/actor/DeployerSpec.scala
index bd56a79a8e..15316f727d 100644
--- a/akka-actor-tests/src/test/scala/akka/actor/actor/DeployerSpec.scala
+++ b/akka-actor-tests/src/test/scala/akka/actor/actor/DeployerSpec.scala
@@ -22,7 +22,9 @@ class DeployerSpec extends WordSpec with MustMatchers {
Clustered(
Node("node1"),
Replicate(3),
- Stateless))))
+ Replication(
+ TransactionLog,
+ WriteThrough)))))
}
}
}
diff --git a/akka-actor-tests/src/test/scala/akka/config/ConfigSpec.scala b/akka-actor-tests/src/test/scala/akka/config/ConfigSpec.scala
index 66d21435f4..594d0ad811 100644
--- a/akka-actor-tests/src/test/scala/akka/config/ConfigSpec.scala
+++ b/akka-actor-tests/src/test/scala/akka/config/ConfigSpec.scala
@@ -37,9 +37,8 @@ class ConfigSpec extends WordSpec with MustMatchers {
getInt("akka.actor.throughput") must equal(Some(5))
getInt("akka.actor.throughput-deadline-time") must equal(Some(-1))
- getString("akka.remote.layer") must equal(Some("akka.remote.netty.NettyRemoteSupport"))
- getString("akka.remote.server.hostname") must equal(Some("localhost"))
- getInt("akka.remote.server.port") must equal(Some(2552))
+ getString("akka.cluster.layer") must equal(Some("akka.remote.netty.NettyRemoteSupport"))
+ getInt("akka.cluster.server.port") must equal(Some(2552))
}
}
}
diff --git a/akka-actor/src/main/scala/akka/actor/Actor.scala b/akka-actor/src/main/scala/akka/actor/Actor.scala
index adbfa0b115..ba4d712f79 100644
--- a/akka-actor/src/main/scala/akka/actor/Actor.scala
+++ b/akka-actor/src/main/scala/akka/actor/Actor.scala
@@ -73,10 +73,10 @@ case object Kill extends AutoReceivedMessage with LifeCycleMessage
case object ReceiveTimeout extends LifeCycleMessage
case class MaximumNumberOfRestartsWithinTimeRangeReached(
- @BeanProperty val victim: ActorRef,
- @BeanProperty val maxNrOfRetries: Option[Int],
- @BeanProperty val withinTimeRange: Option[Int],
- @BeanProperty val lastExceptionCausingRestart: Throwable) extends LifeCycleMessage
+ @BeanProperty victim: ActorRef,
+ @BeanProperty maxNrOfRetries: Option[Int],
+ @BeanProperty withinTimeRange: Option[Int],
+ @BeanProperty lastExceptionCausingRestart: Throwable) extends LifeCycleMessage
// Exceptions for Actors
class ActorStartException private[akka] (message: String, cause: Throwable = null) extends AkkaException(message, cause)
@@ -132,7 +132,7 @@ object Actor extends ListenerManagement {
subclassAudits synchronized { subclassAudits.clear() }
}
}
- Runtime.getRuntime.addShutdownHook(new Thread(hook))
+ Runtime.getRuntime.addShutdownHook(new Thread(hook, "akka-shutdown-hook"))
hook
}
@@ -270,7 +270,7 @@ object Actor extends ListenerManagement {
*
*/
def actorOf[T <: Actor](creator: ⇒ T, address: String): ActorRef = {
- createActor(address, () ⇒ new LocalActorRef(() ⇒ creator, address))
+ createActor(address, () ⇒ new LocalActorRef(() ⇒ creator, address, Transient))
}
/**
@@ -293,7 +293,7 @@ object Actor extends ListenerManagement {
* JAVA API
*/
def actorOf[T <: Actor](creator: Creator[T], address: String): ActorRef = {
- createActor(address, () ⇒ new LocalActorRef(() ⇒ creator.create, address))
+ createActor(address, () ⇒ new LocalActorRef(() ⇒ creator.create, address, Transient))
}
/**
@@ -375,12 +375,18 @@ object Actor extends ListenerManagement {
"\nif so put it outside the class/trait, f.e. in a companion object," +
"\nOR try to change: 'actorOf[MyActor]' to 'actorOf(new MyActor)'.", cause)
}
- }, address)
+ }, address, Transient)
}
private def newClusterActorRef(factory: () ⇒ ActorRef, address: String, deploy: Deploy): ActorRef = {
deploy match {
- case Deploy(configAdress, router, serializerClassName, Clustered(home, replication: Replication, state: State)) ⇒
+ case Deploy(
+ configAdress, router, serializerClassName,
+ Clustered(
+ home,
+ replicas,
+ replication)) ⇒
+
ClusterModule.ensureEnabled()
if (configAdress != address) throw new IllegalStateException(
@@ -420,32 +426,38 @@ object Actor extends ListenerManagement {
* }
*/
- val isStateful = state match {
- case _: Stateless | Stateless ⇒ false
- case _: Stateful | Stateful ⇒ true
- }
-
- if (isStateful && isHomeNode) { // stateful actor's home node
- cluster
- .use(address, serializer)
- .getOrElse(throw new ConfigurationException(
- "Could not check out actor [" + address + "] from cluster registry as a \"local\" actor"))
-
- } else {
- if (!cluster.isClustered(address)) { // add actor to cluster registry (if not already added)
- cluster.store(factory().start(), replicas, false, serializer)
- }
+ def storeActorAndGetClusterRef(replicationScheme: ReplicationScheme, serializer: Serializer): ActorRef = {
+ // add actor to cluster registry (if not already added)
+ if (!cluster.isClustered(address))
+ cluster.store(factory().start(), nrOfReplicas, replicationScheme, false, serializer)
// remote node (not home node), check out as ClusterActorRef
cluster.ref(address, DeploymentConfig.routerTypeFor(router))
}
+ val serializer = serializerFor(address, serializerClassName)
+
+ replication match {
+ case _: Transient | Transient ⇒
+ storeActorAndGetClusterRef(Transient, serializer)
+
+ case replication: Replication ⇒
+ if (isHomeNode) { // stateful actor's home node
+ cluster
+ .use(address, serializer)
+ .getOrElse(throw new ConfigurationException(
+ "Could not check out actor [" + address + "] from cluster registry as a \"local\" actor"))
+ } else {
+ // FIXME later manage different 'storage' (data grid) as well
+ storeActorAndGetClusterRef(replication, serializer)
+ }
+ }
+
case invalid ⇒ throw new IllegalActorStateException(
"Could not create actor with address [" + address +
"], not bound to a valid deployment scheme [" + invalid + "]")
}
}
-}
/**
* Actor base trait that should be extended by or mixed to create an Actor with the semantics of the 'Actor Model':
@@ -510,16 +522,21 @@ trait Actor {
implicit val someSelf: Some[ActorRef] = {
val refStack = Actor.actorRefInCreation.get
if (refStack.isEmpty) throw new ActorInitializationException(
- "ActorRef for instance of actor [" + getClass.getName + "] is not in scope." +
- "\n\tYou can not create an instance of an actor explicitly using 'new MyActor'." +
+ "\n\tYou can not create an instance of an " + getClass.getName + " explicitly using 'new MyActor'." +
"\n\tYou have to use one of the factory methods in the 'Actor' object to create a new actor." +
"\n\tEither use:" +
"\n\t\t'val actor = Actor.actorOf[MyActor]', or" +
"\n\t\t'val actor = Actor.actorOf(new MyActor(..))'")
val ref = refStack.head
- Actor.actorRefInCreation.set(refStack.pop)
- Some(ref)
+
+ if (ref eq null)
+ throw new ActorInitializationException("Trying to create an instance of " + getClass.getName + " outside of a wrapping 'actorOf'")
+ else {
+ // Push a null marker so any subsequent calls to new Actor doesn't reuse this actor ref
+ Actor.actorRefInCreation.set(refStack.push(null))
+ Some(ref)
+ }
}
/*
@@ -650,17 +667,10 @@ trait Actor {
val behaviorStack = self.hotswap
msg match {
- case l: AutoReceivedMessage ⇒
- autoReceiveMessage(l)
-
- case msg if behaviorStack.nonEmpty && behaviorStack.head.isDefinedAt(msg) ⇒
- behaviorStack.head.apply(msg)
-
- case msg if behaviorStack.isEmpty && processingBehavior.isDefinedAt(msg) ⇒
- processingBehavior.apply(msg)
-
- case unknown ⇒
- unhandled(unknown) //This is the only line that differs from processingbehavior
+ case l: AutoReceivedMessage ⇒ autoReceiveMessage(l)
+ case msg if behaviorStack.nonEmpty && behaviorStack.head.isDefinedAt(msg) ⇒ behaviorStack.head.apply(msg)
+ case msg if behaviorStack.isEmpty && processingBehavior.isDefinedAt(msg) ⇒ processingBehavior.apply(msg)
+ case unknown ⇒ unhandled(unknown) //This is the only line that differs from processingbehavior
}
}
@@ -698,12 +708,3 @@ private[actor] class AnyOptionAsTypedOption(anyOption: Option[Any]) {
*/
def asSilently[T: Manifest]: Option[T] = narrowSilently[T](anyOption)
}
-
-/**
- * Marker interface for proxyable actors (such as typed actor).
- *
- * @author Jonas Bonér
- */
-trait Proxyable {
- private[actor] def swapProxiedActor(newInstance: Actor)
-}
diff --git a/akka-actor/src/main/scala/akka/actor/ActorRef.scala b/akka-actor/src/main/scala/akka/actor/ActorRef.scala
index 54336dd2d2..c410956cd2 100644
--- a/akka-actor/src/main/scala/akka/actor/ActorRef.scala
+++ b/akka-actor/src/main/scala/akka/actor/ActorRef.scala
@@ -6,10 +6,13 @@ package akka.actor
import akka.event.EventHandler
import akka.dispatch._
-import akka.config.Config
+import akka.config._
import akka.config.Supervision._
import akka.util._
+import akka.serialization.{ Format, Serializer }
import ReflectiveAccess._
+import ClusterModule._
+import DeploymentConfig.{ ReplicationScheme, Replication, Transient, WriteThrough, WriteBehind }
import java.net.InetSocketAddress
import java.util.concurrent.atomic.AtomicReference
@@ -19,6 +22,7 @@ import java.util.{ Map ⇒ JMap }
import scala.reflect.BeanProperty
import scala.collection.immutable.Stack
import scala.annotation.tailrec
+import java.lang.IllegalStateException
private[akka] object ActorRefInternals {
@@ -85,14 +89,14 @@ abstract class Channel[T] {
*
* @author Jonas Bonér
*/
-trait ActorRef extends ActorRefShared with java.lang.Comparable[ActorRef] { scalaRef: ScalaActorRef ⇒
+trait ActorRef extends ActorRefShared with java.lang.Comparable[ActorRef] with Serializable { scalaRef: ScalaActorRef ⇒
// Only mutable for RemoteServer in order to maintain identity across nodes
@volatile
protected[akka] var _uuid = newUuid
@volatile
protected[this] var _status: ActorRefInternals.StatusType = ActorRefInternals.UNSTARTED
- val address: String
+ def address: String
/**
* User overridable callback/setting.
@@ -409,17 +413,6 @@ trait ActorRef extends ActorRefShared with java.lang.Comparable[ActorRef] { scal
*/
def startLink(actorRef: ActorRef): ActorRef
- /**
- * Returns the mailbox size.
- */
- def mailboxSize = dispatcher.mailboxSize(this)
-
- /**
- * Akka Java API.
- * Returns the mailbox size.
- */
- def getMailboxSize: Int = mailboxSize
-
/**
* Returns the supervisor, if there is one.
*/
@@ -500,7 +493,7 @@ trait ActorRef extends ActorRefShared with java.lang.Comparable[ActorRef] { scal
that.asInstanceOf[ActorRef].uuid == uuid
}
- override def toString = "Actor[" + address + ":" + uuid + "]"
+ override def toString = "Actor[%s:%s]".format(address, uuid)
}
/**
@@ -508,27 +501,72 @@ trait ActorRef extends ActorRefShared with java.lang.Comparable[ActorRef] { scal
*
* @author Jonas Bonér
*/
-class LocalActorRef private[akka] (private[this] val actorFactory: () ⇒ Actor, val address: String)
+class LocalActorRef private[akka] (
+ private[this] val actorFactory: () ⇒ Actor,
+ val address: String,
+ replicationScheme: ReplicationScheme)
extends ActorRef with ScalaActorRef {
+
protected[akka] val guard = new ReentrantGuard
@volatile
protected[akka] var _futureTimeout: Option[ScheduledFuture[AnyRef]] = None
+
@volatile
private[akka] lazy val _linkedActors = new ConcurrentHashMap[Uuid, ActorRef]
+
@volatile
private[akka] var _supervisor: Option[ActorRef] = None
+
@volatile
private var maxNrOfRetriesCount: Int = 0
+
@volatile
private var restartTimeWindowStartNanos: Long = 0L
+
@volatile
private var _mailbox: AnyRef = _
+
@volatile
private[akka] var _dispatcher: MessageDispatcher = Dispatchers.defaultGlobalDispatcher
protected[akka] val actorInstance = guard.withGuard { new AtomicReference[Actor](newActor) }
+ private val isReplicated: Boolean = replicationScheme match {
+ case _: Transient | Transient ⇒ false
+ case _ ⇒ true
+ }
+
+ // FIXME how to get the matching serializerClassName? Now default is used. Needed for transaction log snapshot
+ private val serializer = Actor.serializerFor(address, Format.defaultSerializerName)
+
+ private lazy val replicationStorage: Either[TransactionLog, AnyRef] = {
+ replicationScheme match {
+ case _: Transient | Transient ⇒
+ throw new IllegalStateException("Can not replicate 'transient' actor [" + toString + "]")
+
+ case Replication(storage, strategy) ⇒
+ val isWriteBehind = strategy match {
+ case _: WriteBehind | WriteBehind ⇒ true
+ case _: WriteThrough | WriteThrough ⇒ false
+ }
+
+ storage match {
+ case _: DeploymentConfig.TransactionLog | DeploymentConfig.TransactionLog ⇒
+ EventHandler.debug(this,
+ "Creating a transaction log for Actor [%s] with replication strategy [%s]"
+ .format(address, replicationScheme))
+ Left(transactionLog.newLogFor(_uuid.toString, isWriteBehind, replicationScheme, serializer))
+
+ case _: DeploymentConfig.DataGrid | DeploymentConfig.DataGrid ⇒
+ throw new ConfigurationException("Replication storage type \"data-grid\" is not yet supported")
+
+ case unknown ⇒
+ throw new ConfigurationException("Unknown replication storage type [" + unknown + "]")
+ }
+ }
+ }
+
//If it was started inside "newActor", initialize it
if (isRunning) initializeActorInstance
@@ -541,8 +579,11 @@ class LocalActorRef private[akka] (private[this] val actorFactory: () ⇒ Actor,
__lifeCycle: LifeCycle,
__supervisor: Option[ActorRef],
__hotswap: Stack[PartialFunction[Any, Unit]],
- __factory: () ⇒ Actor) = {
- this(__factory, __address)
+ __factory: () ⇒ Actor,
+ __replicationStrategy: ReplicationScheme) = {
+
+ this(__factory, __address, __replicationStrategy)
+
_uuid = __uuid
timeout = __timeout
receiveTimeout = __receiveTimeout
@@ -614,6 +655,10 @@ class LocalActorRef private[akka] (private[this] val actorFactory: () ⇒ Actor,
setActorSelfFields(actorInstance.get, null)
}
} //else if (isBeingRestarted) throw new ActorKilledException("Actor [" + toString + "] is being restarted.")
+
+ if (isReplicated) {
+ if (replicationStorage.isLeft) replicationStorage.left.get.delete()
+ }
}
}
@@ -650,7 +695,6 @@ class LocalActorRef private[akka] (private[this] val actorFactory: () ⇒ Actor,
guard.withGuard {
if (_linkedActors.remove(actorRef.uuid) eq null)
throw new IllegalActorStateException("Actor [" + actorRef + "] is not a linked actor, can't unlink")
-
actorRef.supervisor = None
}
}
@@ -681,7 +725,9 @@ class LocalActorRef private[akka] (private[this] val actorFactory: () ⇒ Actor,
// ========= AKKA PROTECTED FUNCTIONS =========
@throws(classOf[java.io.ObjectStreamException])
private def writeReplace(): AnyRef = {
- val inetaddr = Actor.remote.address
+ val inetaddr =
+ if (ReflectiveAccess.RemoteModule.isEnabled) Actor.remote.address
+ else ReflectiveAccess.RemoteModule.configDefaultAddress
SerializedActorRef(uuid, address, inetaddr.getAddress.getHostAddress, inetaddr.getPort, timeout)
}
@@ -690,7 +736,7 @@ class LocalActorRef private[akka] (private[this] val actorFactory: () ⇒ Actor,
}
protected[akka] def postMessageToMailbox(message: Any, senderOption: Option[ActorRef]) {
- dispatcher dispatchMessage new MessageInvocation(this, message, senderOption, None)
+ dispatcher dispatchMessage MessageInvocation(this, message, senderOption, None)
}
protected[akka] def postMessageToMailboxAndCreateFutureResultWithTimeout[T](
@@ -699,8 +745,7 @@ class LocalActorRef private[akka] (private[this] val actorFactory: () ⇒ Actor,
senderOption: Option[ActorRef],
senderFuture: Option[Promise[T]]): Promise[T] = {
val future = if (senderFuture.isDefined) senderFuture else Some(new DefaultPromise[T](timeout))
- dispatcher dispatchMessage new MessageInvocation(
- this, message, senderOption, future.asInstanceOf[Some[Promise[Any]]])
+ dispatcher dispatchMessage MessageInvocation(this, message, senderOption, future.asInstanceOf[Some[Promise[Any]]])
future.get
}
@@ -732,7 +777,12 @@ class LocalActorRef private[akka] (private[this] val actorFactory: () ⇒ Actor,
throw e
}
}
- } finally { guard.lock.unlock() }
+ } finally {
+ guard.lock.unlock()
+ if (isReplicated) {
+ if (replicationStorage.isLeft) replicationStorage.left.get.recordEntry(messageHandle, this)
+ }
+ }
}
protected[akka] def handleTrapExit(dead: ActorRef, reason: Throwable) {
@@ -784,19 +834,12 @@ class LocalActorRef private[akka] (private[this] val actorFactory: () ⇒ Actor,
protected[akka] def restart(reason: Throwable, maxNrOfRetries: Option[Int], withinTimeRange: Option[Int]) {
def performRestart() {
val failedActor = actorInstance.get
-
- failedActor match {
- case p: Proxyable ⇒
- failedActor.preRestart(reason)
- failedActor.postRestart(reason)
- case _ ⇒
- failedActor.preRestart(reason)
- val freshActor = newActor
- setActorSelfFields(failedActor, null) // Only null out the references if we could instantiate the new actor
- actorInstance.set(freshActor) // Assign it here so if preStart fails, we can null out the sef-refs next call
- freshActor.preStart()
- freshActor.postRestart(reason)
- }
+ failedActor.preRestart(reason)
+ val freshActor = newActor
+ setActorSelfFields(failedActor, null) // Only null out the references if we could instantiate the new actor
+ actorInstance.set(freshActor) // Assign it here so if preStart fails, we can null out the sef-refs next call
+ freshActor.preStart()
+ freshActor.postRestart(reason)
}
def tooManyRestarts() {
@@ -865,20 +908,18 @@ class LocalActorRef private[akka] (private[this] val actorFactory: () ⇒ Actor,
private[this] def newActor: Actor = {
import Actor.{ actorRefInCreation ⇒ refStack }
- (try {
- refStack.set(refStack.get.push(this))
+ val stackBefore = refStack.get
+ refStack.set(stackBefore.push(this))
+ try {
actorFactory()
- } catch {
- case e ⇒
- val stack = refStack.get
- //Clean up if failed
- if ((stack.nonEmpty) && (stack.head eq this)) refStack.set(stack.pop)
- //Then rethrow
- throw e
- }) match {
- case null ⇒ throw new ActorInitializationException("Actor instance passed to ActorRef can not be 'null'")
- case valid ⇒ valid
+ } finally {
+ val stackAfter = refStack.get
+ if (stackAfter.nonEmpty)
+ refStack.set(if (stackAfter.head eq null) stackAfter.pop.pop else stackAfter.pop) //pop null marker plus self
}
+ } match {
+ case null ⇒ throw new ActorInitializationException("Actor instance passed to ActorRef can not be 'null'")
+ case valid ⇒ valid
}
private def shutDownTemporaryActor(temporaryActor: ActorRef) {
@@ -959,7 +1000,7 @@ class LocalActorRef private[akka] (private[this] val actorFactory: () ⇒ Actor,
protected[akka] def checkReceiveTimeout() {
cancelReceiveTimeout()
- if (receiveTimeout.isDefined && dispatcher.mailboxSize(this) <= 0) { //Only reschedule if desired and there are currently no more messages to be processed
+ if (receiveTimeout.isDefined && dispatcher.mailboxIsEmpty(this)) { //Only reschedule if desired and there are currently no more messages to be processed
_futureTimeout = Some(Scheduler.scheduleOnce(this, ReceiveTimeout, receiveTimeout.get, TimeUnit.MILLISECONDS))
}
}
@@ -998,8 +1039,6 @@ private[akka] case class RemoteActorRef private[akka] (
timeout = _timeout
- // FIXME BAD, we should not have different ActorRefs
-
start()
def postMessageToMailbox(message: Any, senderOption: Option[ActorRef]) {
@@ -1032,15 +1071,12 @@ private[akka] case class RemoteActorRef private[akka] (
}
}
- // ==== NOT SUPPORTED ====
-
@throws(classOf[java.io.ObjectStreamException])
private def writeReplace(): AnyRef = {
SerializedActorRef(uuid, address, remoteAddress.getAddress.getHostAddress, remoteAddress.getPort, timeout)
}
- @deprecated("Will be removed without replacement, doesn't make any sense to have in the face of `become` and `unbecome`", "1.1")
- def actorClass: Class[_ <: Actor] = unsupported
+ // ==== NOT SUPPORTED ====
def dispatcher_=(md: MessageDispatcher) {
unsupported
}
@@ -1268,6 +1304,12 @@ case class SerializedActorRef(val uuid: Uuid,
@throws(classOf[java.io.ObjectStreamException])
def readResolve(): AnyRef = Actor.registry.local.actorFor(uuid) match {
case Some(actor) ⇒ actor
- case None ⇒ RemoteActorRef(new InetSocketAddress(hostname, port), address, timeout, None)
+ case None ⇒
+ if (ReflectiveAccess.RemoteModule.isEnabled)
+ RemoteActorRef(new InetSocketAddress(hostname, port), address, timeout, None)
+ else
+ throw new IllegalStateException(
+ "Trying to deserialize ActorRef (" + this +
+ ") but it's not found in the local registry and remoting is not enabled!")
}
}
diff --git a/akka-actor/src/main/scala/akka/actor/ActorRegistry.scala b/akka-actor/src/main/scala/akka/actor/ActorRegistry.scala
index 6c9c7a50cf..53a48969af 100644
--- a/akka-actor/src/main/scala/akka/actor/ActorRegistry.scala
+++ b/akka-actor/src/main/scala/akka/actor/ActorRegistry.scala
@@ -21,8 +21,8 @@ import akka.serialization._
* @author Jonas Bonér
*/
sealed trait ActorRegistryEvent
-case class ActorRegistered(address: String, actor: ActorRef) extends ActorRegistryEvent
-case class ActorUnregistered(address: String, actor: ActorRef) extends ActorRegistryEvent
+case class ActorRegistered(address: String, actor: ActorRef, typedActor: Option[AnyRef]) extends ActorRegistryEvent
+case class ActorUnregistered(address: String, actor: ActorRef, typedActor: Option[AnyRef]) extends ActorRegistryEvent
/**
* Registry holding all Actor instances in the whole system.
@@ -65,11 +65,12 @@ private[actor] final class ActorRegistry private[actor] () extends ListenerManag
actorsByAddress.put(address, actor)
actorsByUuid.put(actor.uuid, actor)
- notifyListeners(ActorRegistered(address, actor))
+ notifyListeners(ActorRegistered(address, actor, Option(typedActorsByUuid get actor.uuid)))
}
private[akka] def registerTypedActor(actorRef: ActorRef, interface: AnyRef) {
typedActorsByUuid.put(actorRef.uuid, interface)
+ actorRef.start // register actorRef
}
/**
@@ -78,7 +79,7 @@ private[actor] final class ActorRegistry private[actor] () extends ListenerManag
private[akka] def unregister(address: String) {
val actor = actorsByAddress remove address
actorsByUuid remove actor.uuid
- notifyListeners(ActorUnregistered(address, actor))
+ notifyListeners(ActorUnregistered(address, actor, None))
}
/**
@@ -88,8 +89,7 @@ private[actor] final class ActorRegistry private[actor] () extends ListenerManag
val address = actor.address
actorsByAddress remove address
actorsByUuid remove actor.uuid
- typedActorsByUuid remove actor.uuid
- notifyListeners(ActorUnregistered(address, actor))
+ notifyListeners(ActorUnregistered(address, actor, Option(typedActorsByUuid remove actor.uuid)))
}
/**
@@ -115,7 +115,7 @@ private[actor] final class ActorRegistry private[actor] () extends ListenerManag
}
/**
- * View over the local actor registry.
+ * Projection over the local actor registry.
*/
class LocalActorRegistry(
private val actorsByAddress: ConcurrentHashMap[String, ActorRef],
@@ -339,9 +339,7 @@ class Index[K <: AnyRef, V <: AnyRef: Manifest] {
*/
def foreach(fun: (K, V) ⇒ Unit) {
import scala.collection.JavaConversions._
- container.entrySet foreach { (e) ⇒
- e.getValue.foreach(fun(e.getKey, _))
- }
+ container.entrySet foreach { e ⇒ e.getValue.foreach(fun(e.getKey, _)) }
}
/**
diff --git a/akka-actor/src/main/scala/akka/actor/Deployer.scala b/akka-actor/src/main/scala/akka/actor/Deployer.scala
index 8550a54f90..685197820b 100644
--- a/akka-actor/src/main/scala/akka/actor/Deployer.scala
+++ b/akka-actor/src/main/scala/akka/actor/Deployer.scala
@@ -62,8 +62,8 @@ object DeploymentConfig {
sealed trait Scope
case class Clustered(
home: Home = Host("localhost"),
- replication: Replication = NoReplicas,
- state: State = Stateful) extends Scope
+ replicas: Replicas = NoReplicas,
+ replication: ReplicationScheme = Transient) extends Scope
// For Java API
case class Local() extends Scope
@@ -80,33 +80,60 @@ object DeploymentConfig {
case class IP(ipAddress: String) extends Home
// --------------------------------
- // --- Replication
+ // --- Replicas
// --------------------------------
- sealed trait Replication
- case class Replicate(factor: Int) extends Replication {
- if (factor < 1) throw new IllegalArgumentException("Replication factor can not be negative or zero")
+ sealed trait Replicas
+ case class Replicate(factor: Int) extends Replicas {
+ if (factor < 1) throw new IllegalArgumentException("Replicas factor can not be negative or zero")
}
// For Java API
- case class AutoReplicate() extends Replication
- case class NoReplicas() extends Replication
+ case class AutoReplicate() extends Replicas
+ case class NoReplicas() extends Replicas
// For Scala API
- case object AutoReplicate extends Replication
- case object NoReplicas extends Replication
+ case object AutoReplicate extends Replicas
+ case object NoReplicas extends Replicas
// --------------------------------
- // --- State
+ // --- Replication
// --------------------------------
- sealed trait State
+ sealed trait ReplicationScheme
// For Java API
- case class Stateless() extends State
- case class Stateful() extends State
+ case class Transient() extends ReplicationScheme
// For Scala API
- case object Stateless extends State
- case object Stateful extends State
+ case object Transient extends ReplicationScheme
+ case class Replication(
+ storage: ReplicationStorage,
+ strategy: ReplicationStrategy) extends ReplicationScheme
+
+ // --------------------------------
+ // --- ReplicationStorage
+ // --------------------------------
+ sealed trait ReplicationStorage
+
+ // For Java API
+ case class TransactionLog() extends ReplicationStorage
+ case class DataGrid() extends ReplicationStorage
+
+ // For Scala API
+ case object TransactionLog extends ReplicationStorage
+ case object DataGrid extends ReplicationStorage
+
+ // --------------------------------
+ // --- ReplicationStrategy
+ // --------------------------------
+ sealed trait ReplicationStrategy
+
+ // For Java API
+ case class WriteBehind() extends ReplicationStrategy
+ case class WriteThrough() extends ReplicationStrategy
+
+ // For Scala API
+ case object WriteBehind extends ReplicationStrategy
+ case object WriteThrough extends ReplicationStrategy
// --------------------------------
// --- Helper methods for parsing
@@ -114,11 +141,11 @@ object DeploymentConfig {
def isHomeNode(home: Home): Boolean = home match {
case Host(hostname) ⇒ hostname == Config.hostname
- case IP(address) ⇒ address == "0.0.0.0" // FIXME checking if IP address is on home node is missing
+ case IP(address) ⇒ address == "0.0.0.0" || address == "127.0.0.1" // FIXME look up IP address from the system
case Node(nodename) ⇒ nodename == Config.nodename
}
- def replicaValueFor(replication: Replication): Int = replication match {
+ def replicaValueFor(replicas: Replicas): Int = replicas match {
case Replicate(replicas) ⇒ replicas
case AutoReplicate ⇒ -1
case AutoReplicate() ⇒ -1
@@ -141,6 +168,11 @@ object DeploymentConfig {
case LeastMessages() ⇒ RouterType.LeastMessages
case c: CustomRouter ⇒ throw new UnsupportedOperationException("routerTypeFor: " + c)
}
+
+ def isReplicationAsync(strategy: ReplicationStrategy): Boolean = strategy match {
+ case _: WriteBehind | WriteBehind ⇒ true
+ case _: WriteThrough | WriteThrough ⇒ false
+ }
}
/**
@@ -346,13 +378,31 @@ object Deployer {
}
// --------------------------------
- // akka.actor.deployment..clustered.stateless
+ // akka.actor.deployment..clustered.replication
// --------------------------------
- val state =
- if (clusteredConfig.getBool("stateless", false)) Stateless
- else Stateful
+ clusteredConfig.getSection("replication") match {
+ case None ⇒
+ Some(Deploy(address, router, format, Clustered(home, replicas, Transient)))
- Some(Deploy(address, router, format, Clustered(home, replicas, state)))
+ case Some(replicationConfig) ⇒
+ val storage = replicationConfig.getString("storage", "transaction-log") match {
+ case "transaction-log" ⇒ TransactionLog
+ case "data-grid" ⇒ DataGrid
+ case unknown ⇒
+ throw new ConfigurationException("Config option [" + addressPath +
+ ".clustered.replication.storage] needs to be either [\"transaction-log\"] or [\"data-grid\"] - was [" +
+ unknown + "]")
+ }
+ val strategy = replicationConfig.getString("strategy", "write-through") match {
+ case "write-through" ⇒ WriteThrough
+ case "write-behind" ⇒ WriteBehind
+ case unknown ⇒
+ throw new ConfigurationException("Config option [" + addressPath +
+ ".clustered.replication.strategy] needs to be either [\"write-through\"] or [\"write-behind\"] - was [" +
+ unknown + "]")
+ }
+ Some(Deploy(address, router, format, Clustered(home, replicas, Replication(storage, strategy))))
+ }
}
}
}
@@ -415,7 +465,8 @@ object Address {
def validate(address: String) {
if (validAddressPattern.matcher(address).matches) true
else {
- val e = new IllegalArgumentException("Address [" + address + "] is not valid, need to follow pattern [0-9a-zA-Z\\-\\_\\$]+")
+ val e = new IllegalArgumentException(
+ "Address [" + address + "] is not valid, need to follow pattern [0-9a-zA-Z\\-\\_\\$]+")
EventHandler.error(e, this, e.getMessage)
throw e
}
diff --git a/akka-actor/src/main/scala/akka/actor/TypedActor.scala b/akka-actor/src/main/scala/akka/actor/TypedActor.scala
index 78f234c91a..85a388e24e 100644
--- a/akka-actor/src/main/scala/akka/actor/TypedActor.scala
+++ b/akka-actor/src/main/scala/akka/actor/TypedActor.scala
@@ -10,8 +10,8 @@ import akka.dispatch.{ MessageDispatcher, Dispatchers, Future }
import java.lang.reflect.{ InvocationTargetException, Method, InvocationHandler, Proxy }
import akka.util.{ Duration }
import java.util.concurrent.atomic.{ AtomicReference ⇒ AtomVar }
-import collection.immutable
+//TODO Document this class, not only in Scaladoc, but also in a dedicated typed-actor.rst, for both java and scala
object TypedActor {
private val selfReference = new ThreadLocal[AnyRef]
@@ -20,7 +20,7 @@ object TypedActor {
case some ⇒ some
}
- private class TypedActor[R <: AnyRef, T <: R](val proxyRef: AtomVar[R], createInstance: ⇒ T) extends Actor {
+ private[akka] class TypedActor[R <: AnyRef, T <: R](val proxyRef: AtomVar[R], createInstance: ⇒ T) extends Actor {
val me = createInstance
def receive = {
case m: MethodCall ⇒
@@ -58,7 +58,7 @@ object TypedActor {
}
}
- object Configuration {
+ object Configuration { //TODO: Replace this with the new ActorConfiguration when it exists
val defaultTimeout = Duration(Actor.TIMEOUT, "millis")
val defaultConfiguration = new Configuration(defaultTimeout, Dispatchers.defaultGlobalDispatcher)
def apply(): Configuration = defaultConfiguration
@@ -83,6 +83,8 @@ object TypedActor {
}
case class SerializedMethodCall(ownerType: Class[_], methodName: String, parameterTypes: Array[Class[_]], parameterValues: Array[AnyRef]) {
+ //TODO implement writeObject and readObject to serialize
+ //TODO Possible optimization is to special encode the parameter-types to conserve space
private def readResolve(): AnyRef = MethodCall(ownerType.getDeclaredMethod(methodName, parameterTypes: _*), parameterValues)
}
@@ -157,7 +159,7 @@ object TypedActor {
val proxy: T = Proxy.newProxyInstance(loader, interfaces, new TypedActorInvocationHandler(ref)).asInstanceOf[T]
proxyRef.set(proxy) // Chicken and egg situation we needed to solve, set the proxy so that we can set the self-reference inside each receive
- Actor.registry.registerTypedActor(ref.start, proxy) //We only have access to the proxy from the outside, so register it with the ActorRegistry, will be removed on actor.stop
+ Actor.registry.registerTypedActor(ref, proxy) //We only have access to the proxy from the outside, so register it with the ActorRegistry, will be removed on actor.stop
proxy
}
diff --git a/akka-actor/src/main/scala/akka/cluster/ClusterInterface.scala b/akka-actor/src/main/scala/akka/cluster/ClusterInterface.scala
index 7f7da6d29c..82155ebc90 100644
--- a/akka-actor/src/main/scala/akka/cluster/ClusterInterface.scala
+++ b/akka-actor/src/main/scala/akka/cluster/ClusterInterface.scala
@@ -7,6 +7,7 @@ package akka.cluster
import akka.remoteinterface.RemoteSupport
import akka.serialization.Serializer
import akka.actor._
+import DeploymentConfig._
import akka.dispatch.Future
import akka.config.Config
import akka.util._
@@ -129,13 +130,13 @@ object NodeAddress {
trait ClusterNode {
import ChangeListener._
- val nodeAddress: NodeAddress
- val zkServerAddresses: String
+ def nodeAddress: NodeAddress
+ def zkServerAddresses: String
- val remoteClientLifeCycleListener: ActorRef
- val remoteDaemon: ActorRef
- val remoteService: RemoteSupport
- val remoteServerAddress: InetSocketAddress
+ def remoteClientLifeCycleListener: ActorRef
+ def remoteDaemon: ActorRef
+ def remoteService: RemoteSupport
+ def remoteServerAddress: InetSocketAddress
val isConnected = new Switch(false)
val isLeader = new AtomicBoolean(false)
@@ -179,6 +180,13 @@ trait ClusterNode {
*/
def store[T <: Actor](address: String, actorClass: Class[T], format: Serializer): ClusterNode
+ /**
+ * Clusters an actor of a specific type. If the actor is already clustered then the clustered version will be updated
+ * with the actor passed in as argument. You can use this to save off snapshots of the actor to a highly
+ * available durable store.
+ */
+ def store[T <: Actor](address: String, actorClass: Class[T], replicationScheme: ReplicationScheme, format: Serializer): ClusterNode
+
/**
* Clusters an actor of a specific type. If the actor is already clustered then the clustered version will be updated
* with the actor passed in as argument. You can use this to save off snapshots of the actor to a highly
@@ -186,6 +194,13 @@ trait ClusterNode {
*/
def store[T <: Actor](address: String, actorClass: Class[T], replicationFactor: Int, format: Serializer): ClusterNode
+ /**
+ * Clusters an actor of a specific type. If the actor is already clustered then the clustered version will be updated
+ * with the actor passed in as argument. You can use this to save off snapshots of the actor to a highly
+ * available durable store.
+ */
+ def store[T <: Actor](address: String, actorClass: Class[T], replicationFactor: Int, replicationScheme: ReplicationScheme, format: Serializer): ClusterNode
+
/**
* Clusters an actor of a specific type. If the actor is already clustered then the clustered version will be updated
* with the actor passed in as argument. You can use this to save off snapshots of the actor to a highly
@@ -193,6 +208,13 @@ trait ClusterNode {
*/
def store[T <: Actor](address: String, actorClass: Class[T], serializeMailbox: Boolean, format: Serializer): ClusterNode
+ /**
+ * Clusters an actor of a specific type. If the actor is already clustered then the clustered version will be updated
+ * with the actor passed in as argument. You can use this to save off snapshots of the actor to a highly
+ * available durable store.
+ */
+ def store[T <: Actor](address: String, actorClass: Class[T], replicationScheme: ReplicationScheme, serializeMailbox: Boolean, format: Serializer): ClusterNode
+
/**
* Clusters an actor of a specific type. If the actor is already clustered then the clustered version will be updated
* with the actor passed in as argument. You can use this to save off snapshots of the actor to a highly
@@ -200,6 +222,13 @@ trait ClusterNode {
*/
def store[T <: Actor](address: String, actorClass: Class[T], replicationFactor: Int, serializeMailbox: Boolean, format: Serializer): ClusterNode
+ /**
+ * Clusters an actor of a specific type. If the actor is already clustered then the clustered version will be updated
+ * with the actor passed in as argument. You can use this to save off snapshots of the actor to a highly
+ * available durable store.
+ */
+ def store[T <: Actor](address: String, actorClass: Class[T], replicationFactor: Int, replicationScheme: ReplicationScheme, serializeMailbox: Boolean, format: Serializer): ClusterNode
+
/**
* Clusters an actor with UUID. If the actor is already clustered then the clustered version will be updated
* with the actor passed in as argument. You can use this to save off snapshots of the actor to a highly
@@ -207,6 +236,13 @@ trait ClusterNode {
*/
def store(actorRef: ActorRef, format: Serializer): ClusterNode
+ /**
+ * Clusters an actor with UUID. If the actor is already clustered then the clustered version will be updated
+ * with the actor passed in as argument. You can use this to save off snapshots of the actor to a highly
+ * available durable store.
+ */
+ def store(actorRef: ActorRef, replicationScheme: ReplicationScheme, format: Serializer): ClusterNode
+
/**
* Clusters an actor with UUID. If the actor is already clustered then the clustered version will be updated
* with the actor passed in as argument. You can use this to save off snapshots of the actor to a highly
@@ -214,6 +250,13 @@ trait ClusterNode {
*/
def store(actorRef: ActorRef, replicationFactor: Int, format: Serializer): ClusterNode
+ /**
+ * Clusters an actor with UUID. If the actor is already clustered then the clustered version will be updated
+ * with the actor passed in as argument. You can use this to save off snapshots of the actor to a highly
+ * available durable store.
+ */
+ def store(actorRef: ActorRef, replicationFactor: Int, replicationScheme: ReplicationScheme, format: Serializer): ClusterNode
+
/**
* Clusters an actor with UUID. If the actor is already clustered then the clustered version will be updated
* with the actor passed in as argument. You can use this to save off snapshots of the actor to a highly
@@ -221,11 +264,23 @@ trait ClusterNode {
*/
def store(actorRef: ActorRef, serializeMailbox: Boolean, format: Serializer): ClusterNode
+ /**
+ * Clusters an actor with UUID. If the actor is already clustered then the clustered version will be updated
+ * with the actor passed in as argument. You can use this to save off snapshots of the actor to a highly
+ * available durable store.
+ */
+ def store(actorRef: ActorRef, replicationScheme: ReplicationScheme, serializeMailbox: Boolean, format: Serializer): ClusterNode
+
/**
* Needed to have reflection through structural typing work.
*/
def store(actorRef: ActorRef, replicationFactor: Int, serializeMailbox: Boolean, format: AnyRef): ClusterNode
+ /**
+ * Needed to have reflection through structural typing work.
+ */
+ def store(actorRef: ActorRef, replicationFactor: Int, replicationScheme: ReplicationScheme, serializeMailbox: Boolean, format: AnyRef): ClusterNode
+
/**
* Clusters an actor with UUID. If the actor is already clustered then the clustered version will be updated
* with the actor passed in as argument. You can use this to save off snapshots of the actor to a highly
@@ -233,6 +288,13 @@ trait ClusterNode {
*/
def store(actorRef: ActorRef, replicationFactor: Int, serializeMailbox: Boolean, format: Serializer): ClusterNode
+ /**
+ * Clusters an actor with UUID. If the actor is already clustered then the clustered version will be updated
+ * with the actor passed in as argument. You can use this to save off snapshots of the actor to a highly
+ * available durable store.
+ */
+ def store(actorRef: ActorRef, replicationFactor: Int, replicationScheme: ReplicationScheme, serializeMailbox: Boolean, format: Serializer): ClusterNode
+
/**
* Removes actor with uuid from the cluster.
*/
@@ -262,13 +324,13 @@ trait ClusterNode {
* Checks out an actor for use on this node, e.g. checked out as a 'LocalActorRef' but it makes it available
* for remote access through lookup by its UUID.
*/
- def use[T <: Actor](actorAddress: String): Option[LocalActorRef]
+ def use[T <: Actor](actorAddress: String): Option[ActorRef]
/**
* Checks out an actor for use on this node, e.g. checked out as a 'LocalActorRef' but it makes it available
* for remote access through lookup by its UUID.
*/
- def use[T <: Actor](actorAddress: String, format: Serializer): Option[LocalActorRef]
+ def use[T <: Actor](actorAddress: String, format: Serializer): Option[ActorRef]
/**
* Using (checking out) all actors with a specific UUID on all nodes in the cluster.
diff --git a/akka-actor/src/main/scala/akka/dispatch/Dispatcher.scala b/akka-actor/src/main/scala/akka/dispatch/Dispatcher.scala
index bf02af5997..b78b99cabf 100644
--- a/akka-actor/src/main/scala/akka/dispatch/Dispatcher.scala
+++ b/akka-actor/src/main/scala/akka/dispatch/Dispatcher.scala
@@ -110,6 +110,8 @@ class Dispatcher(
*/
protected def getMailbox(receiver: ActorRef) = receiver.mailbox.asInstanceOf[MessageQueue with ExecutableMailbox]
+ def mailboxIsEmpty(actorRef: ActorRef): Boolean = getMailbox(actorRef).isEmpty
+
override def mailboxSize(actorRef: ActorRef) = getMailbox(actorRef).size
def createMailbox(actorRef: ActorRef): AnyRef = mailboxType match {
diff --git a/akka-actor/src/main/scala/akka/dispatch/MessageHandling.scala b/akka-actor/src/main/scala/akka/dispatch/MessageHandling.scala
index 22c3b46c52..40aae3f691 100644
--- a/akka-actor/src/main/scala/akka/dispatch/MessageHandling.scala
+++ b/akka-actor/src/main/scala/akka/dispatch/MessageHandling.scala
@@ -22,13 +22,8 @@ final case class MessageInvocation(receiver: ActorRef,
senderFuture: Option[Promise[Any]]) {
if (receiver eq null) throw new IllegalArgumentException("Receiver can't be null")
- def invoke() {
- try {
- receiver.invoke(this)
- } catch {
- case e: NullPointerException ⇒ throw new ActorInitializationException(
- "Don't call 'self ! message' in the Actor's constructor (in Scala this means in the body of the class).")
- }
+ final def invoke() {
+ receiver invoke this
}
}
@@ -177,7 +172,7 @@ trait MessageDispatcher {
val uuid = i.next()
Actor.registry.local.actorFor(uuid) match {
case Some(actor) ⇒ actor.stop()
- case None ⇒ {}
+ case None ⇒
}
}
}
@@ -240,6 +235,11 @@ trait MessageDispatcher {
*/
def mailboxSize(actorRef: ActorRef): Int
+ /**
+ * Returns the "current" emptiness status of the mailbox for the specified actor
+ */
+ def mailboxIsEmpty(actorRef: ActorRef): Boolean
+
/**
* Returns the amount of futures queued for execution
*/
diff --git a/akka-actor/src/main/scala/akka/dispatch/ThreadPoolBuilder.scala b/akka-actor/src/main/scala/akka/dispatch/ThreadPoolBuilder.scala
index f260cf39b4..b1c0f6e747 100644
--- a/akka-actor/src/main/scala/akka/dispatch/ThreadPoolBuilder.scala
+++ b/akka-actor/src/main/scala/akka/dispatch/ThreadPoolBuilder.scala
@@ -154,7 +154,7 @@ class MonitorableThreadFactory(val name: String) extends ThreadFactory {
* @author Jonas Bonér
*/
object MonitorableThread {
- val DEFAULT_NAME = "MonitorableThread"
+ val DEFAULT_NAME = "MonitorableThread".intern
// FIXME use MonitorableThread.created and MonitorableThread.alive in monitoring
val created = new AtomicInteger
diff --git a/akka-actor/src/main/scala/akka/routing/Pool.scala b/akka-actor/src/main/scala/akka/routing/Pool.scala
index c036616521..0c2c9d6378 100644
--- a/akka-actor/src/main/scala/akka/routing/Pool.scala
+++ b/akka-actor/src/main/scala/akka/routing/Pool.scala
@@ -116,7 +116,7 @@ trait SmallestMailboxSelector {
var take = if (partialFill) math.min(selectionCount, delegates.length) else selectionCount
while (take > 0) {
- set = delegates.sortWith(_.mailboxSize < _.mailboxSize).take(take) ++ set //Question, doesn't this risk selecting the same actor multiple times?
+ set = delegates.sortWith((a, b) ⇒ a.dispatcher.mailboxSize(a) < b.dispatcher.mailboxSize(b)).take(take) ++ set //Question, doesn't this risk selecting the same actor multiple times?
take -= set.size
}
@@ -187,7 +187,7 @@ trait BoundedCapacitor {
trait MailboxPressureCapacitor {
def pressureThreshold: Int
def pressure(delegates: Seq[ActorRef]): Int =
- delegates count { _.mailboxSize > pressureThreshold }
+ delegates count { a ⇒ a.dispatcher.mailboxSize(a) > pressureThreshold }
}
/**
diff --git a/akka-actor/src/main/scala/akka/routing/Routing.scala b/akka-actor/src/main/scala/akka/routing/Routing.scala
index 454760594a..e707e23f23 100644
--- a/akka-actor/src/main/scala/akka/routing/Routing.scala
+++ b/akka-actor/src/main/scala/akka/routing/Routing.scala
@@ -206,7 +206,7 @@ case class SmallestMailboxFirstIterator(val items: Seq[ActorRef]) extends Infini
def this(items: java.util.List[ActorRef]) = this(items.toList)
def hasNext = items != Nil
- def next = items.reduceLeft((a1, a2) ⇒ if (a1.mailboxSize < a2.mailboxSize) a1 else a2)
+ def next = items.reduceLeft((a1, a2) ⇒ if (a1.dispatcher.mailboxSize(a1) < a2.dispatcher.mailboxSize(a2)) a1 else a2)
override def exists(f: ActorRef ⇒ Boolean): Boolean = items.exists(f)
}
diff --git a/akka-actor/src/main/scala/akka/util/ReflectiveAccess.scala b/akka-actor/src/main/scala/akka/util/ReflectiveAccess.scala
index af530c3068..42fd88a78f 100644
--- a/akka-actor/src/main/scala/akka/util/ReflectiveAccess.scala
+++ b/akka-actor/src/main/scala/akka/util/ReflectiveAccess.scala
@@ -8,7 +8,7 @@ import akka.dispatch.{ Future, Promise, MessageInvocation }
import akka.config.{ Config, ModuleNotAvailableException }
import akka.remoteinterface.RemoteSupport
import akka.actor._
-import DeploymentConfig.Deploy
+import DeploymentConfig.{ Deploy, ReplicationScheme, ReplicationStrategy }
import akka.event.EventHandler
import akka.serialization.Format
import akka.cluster.ClusterNode
@@ -62,6 +62,13 @@ object ReflectiveAccess {
None
}
+ lazy val transactionLogInstance: Option[TransactionLogObject] = getObjectFor("akka.cluster.TransactionLog$") match {
+ case Right(value) ⇒ Some(value)
+ case Left(exception) ⇒
+ EventHandler.debug(this, exception.toString)
+ None
+ }
+
lazy val node: ClusterNode = {
ensureEnabled()
clusterInstance.get.node
@@ -72,6 +79,11 @@ object ReflectiveAccess {
clusterDeployerInstance.get
}
+ lazy val transactionLog: TransactionLogObject = {
+ ensureEnabled()
+ transactionLogInstance.get
+ }
+
type ClusterDeployer = {
def init(deployments: List[Deploy])
def shutdown()
@@ -94,6 +106,35 @@ object ReflectiveAccess {
def toBinary(obj: AnyRef): Array[Byte]
def fromBinary(bytes: Array[Byte], clazz: Option[Class[_]]): AnyRef
}
+
+ type TransactionLogObject = {
+ def newLogFor(
+ id: String,
+ isAsync: Boolean,
+ replicationScheme: ReplicationScheme,
+ format: Serializer): TransactionLog
+
+ def logFor(
+ id: String,
+ isAsync: Boolean,
+ replicationScheme: ReplicationScheme,
+ format: Serializer): TransactionLog
+
+ def shutdown()
+ }
+
+ type TransactionLog = {
+ def recordEntry(messageHandle: MessageInvocation, actorRef: ActorRef)
+ def recordEntry(entry: Array[Byte])
+ def recordSnapshot(snapshot: Array[Byte])
+ def entries: Vector[Array[Byte]]
+ def entriesFromLatestSnapshot: Tuple2[Array[Byte], Vector[Array[Byte]]]
+ def entriesInRange(from: Long, to: Long): Vector[Array[Byte]]
+ def latestEntryId: Long
+ def latestSnapshotId: Long
+ def delete()
+ def close()
+ }
}
/**
@@ -104,7 +145,7 @@ object ReflectiveAccess {
object RemoteModule {
val TRANSPORT = Config.config.getString("akka.remote.layer", "akka.remote.netty.NettyRemoteSupport")
- private[akka] val configDefaultAddress = new InetSocketAddress(Config.hostname, Config.remoteServerPort)
+ val configDefaultAddress = new InetSocketAddress(Config.hostname, Config.remoteServerPort)
lazy val isEnabled = remoteSupportClass.isDefined
diff --git a/akka-camel-typed/src/main/scala/akka/camel/TypedConsumer.scala b/akka-camel-typed/src/main/scala/akka/camel/TypedConsumer.scala
index c315742f3a..8df76dadbe 100644
--- a/akka-camel-typed/src/main/scala/akka/camel/TypedConsumer.scala
+++ b/akka-camel-typed/src/main/scala/akka/camel/TypedConsumer.scala
@@ -5,13 +5,16 @@
package akka.camel
import java.lang.reflect.Method
+import java.lang.reflect.Proxy._
import akka.actor.{ TypedActor, ActorRef }
+import akka.actor.TypedActor._
/**
* @author Martin Krasser
*/
private[camel] object TypedConsumer {
+
/**
* Applies a function f to actorRef if actorRef
* references a typed consumer actor. A valid reference to a typed consumer actor is a
@@ -21,18 +24,35 @@ private[camel] object TypedConsumer {
* is called with the corresponding method instance and the return value is
* added to a list which is then returned by this method.
*/
- def withTypedConsumer[T](actorRef: ActorRef)(f: Method ⇒ T): List[T] = {
- if (!actorRef.actor.isInstanceOf[TypedActor]) Nil
- else if (actorRef.homeAddress.isDefined) Nil
- else {
- val typedActor = actorRef.actor.asInstanceOf[TypedActor]
- // TODO: support consumer annotation inheritance
- // - visit overridden methods in superclasses
- // - visit implemented method declarations in interfaces
- val intfClass = typedActor.proxy.getClass
- val implClass = typedActor.getClass
- (for (m ← intfClass.getMethods.toList; if (m.isAnnotationPresent(classOf[consume]))) yield f(m)) ++
- (for (m ← implClass.getMethods.toList; if (m.isAnnotationPresent(classOf[consume]))) yield f(m))
+ def withTypedConsumer[T](actorRef: ActorRef, typedActor: Option[AnyRef])(f: (AnyRef, Method) ⇒ T): List[T] = {
+ typedActor match {
+ case None ⇒ Nil
+ case Some(tc) ⇒ {
+ withConsumeAnnotatedMethodsOnInterfaces(tc, f) ++
+ withConsumeAnnotatedMethodsonImplClass(tc, actorRef, f)
+ }
+ }
+ }
+
+ private implicit def class2ProxyClass(c: Class[_]) = new ProxyClass(c)
+
+ private def withConsumeAnnotatedMethodsOnInterfaces[T](tc: AnyRef, f: (AnyRef, Method) ⇒ T): List[T] = for {
+ i ← tc.getClass.allInterfaces
+ m ← i.getDeclaredMethods.toList
+ if (m.isAnnotationPresent(classOf[consume]))
+ } yield f(tc, m)
+
+ private def withConsumeAnnotatedMethodsonImplClass[T](tc: AnyRef, actorRef: ActorRef, f: (AnyRef, Method) ⇒ T): List[T] = {
+ val implClass = actorRef.actor.asInstanceOf[TypedActor.TypedActor[AnyRef, AnyRef]].me.getClass
+ for (m ← implClass.getDeclaredMethods.toList; if (m.isAnnotationPresent(classOf[consume]))) yield f(tc, m)
+
+ }
+
+ private class ProxyClass(c: Class[_]) {
+ def allInterfaces: List[Class[_]] = allInterfaces(c.getInterfaces.toList)
+ def allInterfaces(is: List[Class[_]]): List[Class[_]] = is match {
+ case Nil ⇒ Nil
+ case x :: xs ⇒ x :: allInterfaces(x.getInterfaces.toList) ::: allInterfaces(xs)
}
}
}
diff --git a/akka-camel-typed/src/main/scala/akka/camel/TypedConsumerPublisher.scala b/akka-camel-typed/src/main/scala/akka/camel/TypedConsumerPublisher.scala
index 883b98afce..fae8426cbe 100644
--- a/akka-camel-typed/src/main/scala/akka/camel/TypedConsumerPublisher.scala
+++ b/akka-camel-typed/src/main/scala/akka/camel/TypedConsumerPublisher.scala
@@ -7,8 +7,8 @@ package akka.camel
import java.lang.reflect.Method
import akka.actor._
-import akka.event.EventHandler
import akka.camel.component.TypedActorComponent
+import akka.event.EventHandler
/**
* Concrete publish requestor that requests publication of typed consumer actor methods on
@@ -19,8 +19,8 @@ import akka.camel.component.TypedActorComponent
*/
private[camel] class TypedConsumerPublishRequestor extends PublishRequestor {
def receiveActorRegistryEvent = {
- case ActorRegistered(actor) ⇒ for (event ← ConsumerMethodRegistered.eventsFor(actor)) deliverCurrentEvent(event)
- case ActorUnregistered(actor) ⇒ for (event ← ConsumerMethodUnregistered.eventsFor(actor)) deliverCurrentEvent(event)
+ case ActorRegistered(_, actor, typedActor) ⇒ for (event ← ConsumerMethodRegistered.eventsFor(actor, typedActor)) deliverCurrentEvent(event)
+ case ActorUnregistered(_, actor, typedActor) ⇒ for (event ← ConsumerMethodUnregistered.eventsFor(actor, typedActor)) deliverCurrentEvent(event)
}
}
@@ -84,12 +84,12 @@ private[camel] class ConsumerMethodRouteBuilder(event: ConsumerMethodRegistered)
*/
private[camel] trait ConsumerMethodEvent extends ConsumerEvent {
val actorRef: ActorRef
+ val typedActor: AnyRef
val method: Method
val uuid = actorRef.uuid.toString
val methodName = method.getName
val methodUuid = "%s_%s" format (uuid, methodName)
- val typedActor = actorRef.actor.asInstanceOf[TypedActor].proxy
lazy val routeDefinitionHandler = consumeAnnotation.routeDefinitionHandler.newInstance
lazy val consumeAnnotation = method.getAnnotation(classOf[consume])
@@ -100,13 +100,13 @@ private[camel] trait ConsumerMethodEvent extends ConsumerEvent {
* Event indicating that a typed consumer actor has been registered at the actor registry. For
* each @consume annotated typed actor method a separate event is created.
*/
-private[camel] case class ConsumerMethodRegistered(actorRef: ActorRef, method: Method) extends ConsumerMethodEvent
+private[camel] case class ConsumerMethodRegistered(actorRef: ActorRef, typedActor: AnyRef, method: Method) extends ConsumerMethodEvent
/**
* Event indicating that a typed consumer actor has been unregistered from the actor registry. For
* each @consume annotated typed actor method a separate event is created.
*/
-private[camel] case class ConsumerMethodUnregistered(actorRef: ActorRef, method: Method) extends ConsumerMethodEvent
+private[camel] case class ConsumerMethodUnregistered(actorRef: ActorRef, typedActor: AnyRef, method: Method) extends ConsumerMethodEvent
/**
* @author Martin Krasser
@@ -116,9 +116,9 @@ private[camel] object ConsumerMethodRegistered {
* Creates a list of ConsumerMethodRegistered event messages for a typed consumer actor or an empty
* list if actorRef doesn't reference a typed consumer actor.
*/
- def eventsFor(actorRef: ActorRef): List[ConsumerMethodRegistered] = {
- TypedConsumer.withTypedConsumer(actorRef: ActorRef) { m ⇒
- ConsumerMethodRegistered(actorRef, m)
+ def eventsFor(actorRef: ActorRef, typedActor: Option[AnyRef]): List[ConsumerMethodRegistered] = {
+ TypedConsumer.withTypedConsumer(actorRef, typedActor) { (tc, m) ⇒
+ ConsumerMethodRegistered(actorRef, tc, m)
}
}
}
@@ -131,9 +131,9 @@ private[camel] object ConsumerMethodUnregistered {
* Creates a list of ConsumerMethodUnregistered event messages for a typed consumer actor or an empty
* list if actorRef doesn't reference a typed consumer actor.
*/
- def eventsFor(actorRef: ActorRef): List[ConsumerMethodUnregistered] = {
- TypedConsumer.withTypedConsumer(actorRef) { m ⇒
- ConsumerMethodUnregistered(actorRef, m)
+ def eventsFor(actorRef: ActorRef, typedActor: Option[AnyRef]): List[ConsumerMethodUnregistered] = {
+ TypedConsumer.withTypedConsumer(actorRef, typedActor) { (tc, m) ⇒
+ ConsumerMethodUnregistered(actorRef, tc, m)
}
}
}
diff --git a/akka-camel-typed/src/main/scala/akka/camel/component/TypedActorComponent.scala b/akka-camel-typed/src/main/scala/akka/camel/component/TypedActorComponent.scala
index 5110867fa7..36d6c50516 100644
--- a/akka-camel-typed/src/main/scala/akka/camel/component/TypedActorComponent.scala
+++ b/akka-camel-typed/src/main/scala/akka/camel/component/TypedActorComponent.scala
@@ -65,10 +65,10 @@ class TypedActorHolder(uri: String, context: CamelContext, name: String)
extends RegistryBean(context, name) {
/**
- * Returns an akka.camel.component.TypedActorInfo instance.
+ * Returns an akka.camel.component.BeanInfo instance.
*/
override def getBeanInfo: BeanInfo =
- new TypedActorInfo(getContext, getBean.getClass, getParameterMappingStrategy)
+ new BeanInfo(getContext, getBean.getClass, getParameterMappingStrategy)
/**
* Obtains a typed actor from Actor.registry if the schema is
@@ -80,39 +80,6 @@ class TypedActorHolder(uri: String, context: CamelContext, name: String)
*/
override def getBean: AnyRef = {
val internal = uri.startsWith(TypedActorComponent.InternalSchema)
- if (internal) Actor.registry.typedActorFor(uuidFrom(getName)) getOrElse null else super.getBean
- }
-}
-
-/**
- * Typed actor meta information.
- *
- * @author Martin Krasser
- */
-class TypedActorInfo(context: CamelContext, clazz: Class[_], strategy: ParameterMappingStrategy)
- extends BeanInfo(context, clazz, strategy) {
-
- /**
- * Introspects AspectWerkz proxy classes.
- *
- * @param clazz AspectWerkz proxy class.
- */
- protected override def introspect(clazz: Class[_]): Unit = {
-
- // TODO: fix target class detection in BeanInfo.introspect(Class)
- // Camel assumes that classes containing a '$$' in the class name
- // are classes generated with CGLIB. This conflicts with proxies
- // created from interfaces with AspectWerkz. Once the fix is in
- // place this method can be removed.
-
- for (method ← clazz.getDeclaredMethods) {
- if (isValidMethod(clazz, method)) {
- introspect(clazz, method)
- }
- }
- val superclass = clazz.getSuperclass
- if ((superclass ne null) && !superclass.equals(classOf[AnyRef])) {
- introspect(superclass)
- }
+ if (internal) Actor.registry.local.typedActorFor(uuidFrom(getName)) getOrElse null else super.getBean
}
}
diff --git a/akka-camel-typed/src/test/java/akka/camel/SampleErrorHandlingTypedConsumerImpl.java b/akka-camel-typed/src/test/java/akka/camel/SampleErrorHandlingTypedConsumerImpl.java
index cfa42a7521..89b3948b00 100644
--- a/akka-camel-typed/src/test/java/akka/camel/SampleErrorHandlingTypedConsumerImpl.java
+++ b/akka-camel-typed/src/test/java/akka/camel/SampleErrorHandlingTypedConsumerImpl.java
@@ -1,11 +1,9 @@
package akka.camel;
-import akka.actor.TypedActor;
-
/**
* @author Martin Krasser
*/
-public class SampleErrorHandlingTypedConsumerImpl extends TypedActor implements SampleErrorHandlingTypedConsumer {
+public class SampleErrorHandlingTypedConsumerImpl implements SampleErrorHandlingTypedConsumer {
public String willFail(String s) {
throw new RuntimeException(String.format("error: %s", s));
diff --git a/akka-camel-typed/src/test/java/akka/camel/SampleRemoteTypedConsumerImpl.java b/akka-camel-typed/src/test/java/akka/camel/SampleRemoteTypedConsumerImpl.java
index d7fb463b44..067fb4eda6 100644
--- a/akka-camel-typed/src/test/java/akka/camel/SampleRemoteTypedConsumerImpl.java
+++ b/akka-camel-typed/src/test/java/akka/camel/SampleRemoteTypedConsumerImpl.java
@@ -1,11 +1,9 @@
package akka.camel;
-import akka.actor.TypedActor;
-
/**
* @author Martin Krasser
*/
-public class SampleRemoteTypedConsumerImpl extends TypedActor implements SampleRemoteTypedConsumer {
+public class SampleRemoteTypedConsumerImpl implements SampleRemoteTypedConsumer {
public String foo(String s) {
return String.format("remote typed actor: %s", s);
diff --git a/akka-camel-typed/src/test/java/akka/camel/SampleTypedActorImpl.java b/akka-camel-typed/src/test/java/akka/camel/SampleTypedActorImpl.java
index 773e3ec3ec..93d6cd9395 100644
--- a/akka-camel-typed/src/test/java/akka/camel/SampleTypedActorImpl.java
+++ b/akka-camel-typed/src/test/java/akka/camel/SampleTypedActorImpl.java
@@ -5,7 +5,7 @@ import akka.actor.TypedActor;
/**
* @author Martin Krasser
*/
-public class SampleTypedActorImpl extends TypedActor implements SampleTypedActor {
+public class SampleTypedActorImpl implements SampleTypedActor {
public String foo(String s) {
return String.format("foo: %s", s);
diff --git a/akka-camel-typed/src/test/java/akka/camel/SampleTypedConsumerImpl.java b/akka-camel-typed/src/test/java/akka/camel/SampleTypedConsumerImpl.java
index 3bbe7a9442..8a402133f6 100644
--- a/akka-camel-typed/src/test/java/akka/camel/SampleTypedConsumerImpl.java
+++ b/akka-camel-typed/src/test/java/akka/camel/SampleTypedConsumerImpl.java
@@ -1,11 +1,9 @@
package akka.camel;
-import akka.actor.TypedActor;
-
/**
* @author Martin Krasser
*/
-public class SampleTypedConsumerImpl extends TypedActor implements SampleTypedConsumer {
+public class SampleTypedConsumerImpl implements SampleTypedConsumer {
public String m1(String b, String h) {
return "m1: " + b + " " + h;
diff --git a/akka-camel-typed/src/test/java/akka/camel/SampleTypedSingleConsumerImpl.java b/akka-camel-typed/src/test/java/akka/camel/SampleTypedSingleConsumerImpl.java
index 27fbfdaa0d..fa4807eec4 100644
--- a/akka-camel-typed/src/test/java/akka/camel/SampleTypedSingleConsumerImpl.java
+++ b/akka-camel-typed/src/test/java/akka/camel/SampleTypedSingleConsumerImpl.java
@@ -1,11 +1,9 @@
package akka.camel;
-import akka.actor.TypedActor;
-
/**
* @author Martin Krasser
*/
-public class SampleTypedSingleConsumerImpl extends TypedActor implements SampleTypedSingleConsumer {
+public class SampleTypedSingleConsumerImpl implements SampleTypedSingleConsumer {
public void foo(String b) {
}
diff --git a/akka-camel-typed/src/test/java/akka/camel/TypedConsumerJavaTestBase.java b/akka-camel-typed/src/test/java/akka/camel/TypedConsumerJavaTestBase.java
index 64e8197de8..64aa29ed54 100644
--- a/akka-camel-typed/src/test/java/akka/camel/TypedConsumerJavaTestBase.java
+++ b/akka-camel-typed/src/test/java/akka/camel/TypedConsumerJavaTestBase.java
@@ -1,7 +1,11 @@
package akka.camel;
+import akka.actor.Actor;
import akka.actor.TypedActor;
+import akka.actor.TypedActor.Configuration;
+import akka.dispatch.Dispatchers;
import akka.japi.SideEffect;
+import akka.util.FiniteDuration;
import org.junit.AfterClass;
import org.junit.BeforeClass;
@@ -28,16 +32,18 @@ public class TypedConsumerJavaTestBase {
@AfterClass
public static void tearDownAfterClass() {
stopCamelService();
- registry().shutdownAll();
+ registry().local().shutdownAll();
}
@Test
public void shouldHandleExceptionThrownByTypedActorAndGenerateCustomResponse() {
getMandatoryService().awaitEndpointActivation(1, new SideEffect() {
public void apply() {
- consumer = TypedActor.newInstance(
+ consumer = TypedActor.typedActorOf(
SampleErrorHandlingTypedConsumer.class,
- SampleErrorHandlingTypedConsumerImpl.class);
+ SampleErrorHandlingTypedConsumerImpl.class,
+ new Configuration(new FiniteDuration(5000, "millis"), Dispatchers.defaultGlobalDispatcher()
+ ));
}
});
String result = getMandatoryTemplate().requestBody("direct:error-handler-test-java-typed", "hello", String.class);
diff --git a/akka-camel-typed/src/test/scala/akka/camel/TypedConsumerPublishRequestorTest.scala b/akka-camel-typed/src/test/scala/akka/camel/TypedConsumerPublishRequestorTest.scala
index 406c5656c1..cc4ff0f0cf 100644
--- a/akka-camel-typed/src/test/scala/akka/camel/TypedConsumerPublishRequestorTest.scala
+++ b/akka-camel-typed/src/test/scala/akka/camel/TypedConsumerPublishRequestorTest.scala
@@ -7,6 +7,7 @@ import org.scalatest.junit.JUnitSuite
import akka.actor._
import akka.actor.Actor._
+import akka.actor.TypedActor.Configuration._
import akka.camel.TypedCamelTestSupport.{ SetExpectedMessageCount ⇒ SetExpectedTestMessageCount, _ }
class TypedConsumerPublishRequestorTest extends JUnitSuite {
@@ -33,14 +34,14 @@ class TypedConsumerPublishRequestorTest extends JUnitSuite {
@After
def tearDown = {
Actor.registry.removeListener(requestor);
- Actor.registry.shutdownAll
+ Actor.registry.local.shutdownAll
}
@Test
def shouldReceiveOneConsumerMethodRegisteredEvent = {
Actor.registry.addListener(requestor)
val latch = (publisher !! SetExpectedTestMessageCount(1)).as[CountDownLatch].get
- val obj = TypedActor.newInstance(classOf[SampleTypedSingleConsumer], classOf[SampleTypedSingleConsumerImpl])
+ val obj = TypedActor.typedActorOf(classOf[SampleTypedSingleConsumer], classOf[SampleTypedSingleConsumerImpl], defaultConfiguration)
assert(latch.await(5000, TimeUnit.MILLISECONDS))
val event = (publisher !! GetRetainedMessage).as[ConsumerMethodRegistered].get
assert(event.endpointUri === "direct:foo")
@@ -50,7 +51,7 @@ class TypedConsumerPublishRequestorTest extends JUnitSuite {
@Test
def shouldReceiveOneConsumerMethodUnregisteredEvent = {
- val obj = TypedActor.newInstance(classOf[SampleTypedSingleConsumer], classOf[SampleTypedSingleConsumerImpl])
+ val obj = TypedActor.typedActorOf(classOf[SampleTypedSingleConsumer], classOf[SampleTypedSingleConsumerImpl], defaultConfiguration)
val latch = (publisher !! SetExpectedTestMessageCount(1)).as[CountDownLatch].get
Actor.registry.addListener(requestor)
TypedActor.stop(obj)
@@ -65,7 +66,7 @@ class TypedConsumerPublishRequestorTest extends JUnitSuite {
def shouldReceiveThreeConsumerMethodRegisteredEvents = {
Actor.registry.addListener(requestor)
val latch = (publisher !! SetExpectedTestMessageCount(3)).as[CountDownLatch].get
- val obj = TypedActor.newInstance(classOf[SampleTypedConsumer], classOf[SampleTypedConsumerImpl])
+ val obj = TypedActor.typedActorOf(classOf[SampleTypedConsumer], classOf[SampleTypedConsumerImpl], defaultConfiguration)
assert(latch.await(5000, TimeUnit.MILLISECONDS))
val request = GetRetainedMessages(_.isInstanceOf[ConsumerMethodRegistered])
val events = (publisher !! request).as[List[ConsumerMethodRegistered]].get
@@ -74,7 +75,7 @@ class TypedConsumerPublishRequestorTest extends JUnitSuite {
@Test
def shouldReceiveThreeConsumerMethodUnregisteredEvents = {
- val obj = TypedActor.newInstance(classOf[SampleTypedConsumer], classOf[SampleTypedConsumerImpl])
+ val obj = TypedActor.typedActorOf(classOf[SampleTypedConsumer], classOf[SampleTypedConsumerImpl], defaultConfiguration)
val latch = (publisher !! SetExpectedTestMessageCount(3)).as[CountDownLatch].get
Actor.registry.addListener(requestor)
TypedActor.stop(obj)
diff --git a/akka-camel-typed/src/test/scala/akka/camel/TypedConsumerScalaTest.scala b/akka-camel-typed/src/test/scala/akka/camel/TypedConsumerScalaTest.scala
index 0cc0073e2d..1692c8e6fc 100644
--- a/akka-camel-typed/src/test/scala/akka/camel/TypedConsumerScalaTest.scala
+++ b/akka-camel-typed/src/test/scala/akka/camel/TypedConsumerScalaTest.scala
@@ -7,6 +7,7 @@ import org.scalatest.matchers.MustMatchers
import akka.actor.Actor._
import akka.actor._
+import akka.actor.TypedActor.Configuration._
/**
* @author Martin Krasser
@@ -18,13 +19,13 @@ class TypedConsumerScalaTest extends WordSpec with BeforeAndAfterAll with MustMa
var service: CamelService = _
override protected def beforeAll = {
- registry.shutdownAll
+ registry.local.shutdownAll
service = CamelServiceManager.startCamelService
}
override protected def afterAll = {
service.stop
- registry.shutdownAll
+ registry.local.shutdownAll
}
"A responding, typed consumer" when {
@@ -32,7 +33,7 @@ class TypedConsumerScalaTest extends WordSpec with BeforeAndAfterAll with MustMa
"started" must {
"support in-out message exchanges via its endpoints" in {
service.awaitEndpointActivation(3) {
- actor = TypedActor.newInstance(classOf[SampleTypedConsumer], classOf[SampleTypedConsumerImpl])
+ actor = TypedActor.typedActorOf(classOf[SampleTypedConsumer], classOf[SampleTypedConsumerImpl], defaultConfiguration)
} must be(true)
mandatoryTemplate.requestBodyAndHeader("direct:m2", "x", "test", "y") must equal("m2: x y")
mandatoryTemplate.requestBodyAndHeader("direct:m3", "x", "test", "y") must equal("m3: x y")
@@ -62,7 +63,7 @@ class TypedConsumerScalaTest extends WordSpec with BeforeAndAfterAll with MustMa
"started" must {
"support in-out message exchanges via its endpoints" in {
service.awaitEndpointActivation(2) {
- actor = TypedActor.newInstance(classOf[TestTypedConsumer], classOf[TestTypedConsumerImpl])
+ actor = TypedActor.typedActorOf(classOf[TestTypedConsumer], classOf[TestTypedConsumerImpl], defaultConfiguration)
} must be(true)
mandatoryTemplate.requestBody("direct:publish-test-3", "x") must equal("foo: x")
mandatoryTemplate.requestBody("direct:publish-test-4", "x") must equal("bar: x")
@@ -91,7 +92,7 @@ object TypedConsumerScalaTest {
def bar(s: String): String
}
- class TestTypedConsumerImpl extends TypedActor with TestTypedConsumer {
+ class TestTypedConsumerImpl extends TestTypedConsumer {
def foo(s: String) = "foo: %s" format s
@consume("direct:publish-test-4")
def bar(s: String) = "bar: %s" format s
diff --git a/akka-camel-typed/src/test/scala/akka/camel/component/TypedActorComponentFeatureTest.scala b/akka-camel-typed/src/test/scala/akka/camel/component/TypedActorComponentFeatureTest.scala
index 04d08023a3..91058e3109 100644
--- a/akka-camel-typed/src/test/scala/akka/camel/component/TypedActorComponentFeatureTest.scala
+++ b/akka-camel-typed/src/test/scala/akka/camel/component/TypedActorComponentFeatureTest.scala
@@ -6,8 +6,8 @@ import org.apache.camel.impl.{ DefaultCamelContext, SimpleRegistry }
import org.scalatest.{ BeforeAndAfterEach, BeforeAndAfterAll, FeatureSpec }
import akka.actor.{ Actor, TypedActor }
+import akka.actor.TypedActor.Configuration._
import akka.camel._
-import akka.util.ReflectiveAccess.TypedActorModule
/**
* @author Martin Krasser
@@ -19,10 +19,14 @@ class TypedActorComponentFeatureTest extends FeatureSpec with BeforeAndAfterAll
var typedConsumerUuid: String = _
override protected def beforeAll = {
- val typedActor = TypedActor.newInstance(classOf[SampleTypedActor], classOf[SampleTypedActorImpl]) // not a consumer
- val typedConsumer = TypedActor.newInstance(classOf[SampleTypedConsumer], classOf[SampleTypedConsumerImpl])
+ val typedActor = TypedActor.typedActorOf(
+ classOf[SampleTypedActor],
+ classOf[SampleTypedActorImpl], defaultConfiguration) // not a consumer
+ val typedConsumer = TypedActor.typedActorOf(
+ classOf[SampleTypedConsumer],
+ classOf[SampleTypedConsumerImpl], defaultConfiguration)
- typedConsumerUuid = TypedActorModule.typedActorObjectInstance.get.actorFor(typedConsumer).get.uuid.toString
+ typedConsumerUuid = TypedActor.getActorRefFor(typedConsumer).uuid.toString
val registry = new SimpleRegistry
// external registration
@@ -35,7 +39,7 @@ class TypedActorComponentFeatureTest extends FeatureSpec with BeforeAndAfterAll
override protected def afterAll = {
CamelContextManager.stop
- Actor.registry.shutdownAll
+ Actor.registry.local.shutdownAll
}
feature("Communicate with an internally-registered typed actor using typed-actor-internal endpoint URIs") {
diff --git a/akka-camel/src/main/scala/akka/camel/ConsumerPublisher.scala b/akka-camel/src/main/scala/akka/camel/ConsumerPublisher.scala
index fb15c9d1fc..507124ba2f 100644
--- a/akka-camel/src/main/scala/akka/camel/ConsumerPublisher.scala
+++ b/akka-camel/src/main/scala/akka/camel/ConsumerPublisher.scala
@@ -20,8 +20,8 @@ import akka.event.EventHandler
*/
private[camel] class ConsumerPublishRequestor extends PublishRequestor {
def receiveActorRegistryEvent = {
- case ActorRegistered(_, actor) ⇒ for (event ← ConsumerActorRegistered.eventFor(actor)) deliverCurrentEvent(event)
- case ActorUnregistered(_, actor) ⇒ for (event ← ConsumerActorUnregistered.eventFor(actor)) deliverCurrentEvent(event)
+ case ActorRegistered(_, actor, None) ⇒ for (event ← ConsumerActorRegistered.eventFor(actor)) deliverCurrentEvent(event)
+ case ActorUnregistered(_, actor, None) ⇒ for (event ← ConsumerActorUnregistered.eventFor(actor)) deliverCurrentEvent(event)
}
}
diff --git a/akka-camel/src/main/scala/akka/camel/PublisherRequestor.scala b/akka-camel/src/main/scala/akka/camel/PublisherRequestor.scala
index 7083cdbe6e..7c1ace2b77 100644
--- a/akka-camel/src/main/scala/akka/camel/PublisherRequestor.scala
+++ b/akka-camel/src/main/scala/akka/camel/PublisherRequestor.scala
@@ -54,7 +54,7 @@ private[camel] abstract class PublishRequestor extends Actor {
* @author Martin Krasser
*/
private[camel] object PublishRequestor {
- def pastActorRegisteredEvents = for (actor ← Actor.registry.local.actors) yield ActorRegistered(actor.address, actor)
+ def pastActorRegisteredEvents = for (actor ← Actor.registry.local.actors) yield ActorRegistered(actor.address, actor, None)
}
/**
diff --git a/akka-camel/src/test/scala/akka/camel/ConsumerPublishRequestorTest.scala b/akka-camel/src/test/scala/akka/camel/ConsumerPublishRequestorTest.scala
index 4de98f335f..2cf4e3400f 100644
--- a/akka-camel/src/test/scala/akka/camel/ConsumerPublishRequestorTest.scala
+++ b/akka-camel/src/test/scala/akka/camel/ConsumerPublishRequestorTest.scala
@@ -36,7 +36,7 @@ class ConsumerPublishRequestorTest extends JUnitSuite {
@Test
def shouldReceiveOneConsumerRegisteredEvent = {
val latch = (publisher !! SetExpectedTestMessageCount(1)).as[CountDownLatch].get
- requestor ! ActorRegistered(consumer.address, consumer)
+ requestor ! ActorRegistered(consumer.address, consumer, None)
assert(latch.await(5000, TimeUnit.MILLISECONDS))
assert((publisher !! GetRetainedMessage) ===
Some(ConsumerActorRegistered(consumer, consumer.actor.asInstanceOf[Consumer])))
@@ -45,7 +45,7 @@ class ConsumerPublishRequestorTest extends JUnitSuite {
@Test
def shouldReceiveOneConsumerUnregisteredEvent = {
val latch = (publisher !! SetExpectedTestMessageCount(1)).as[CountDownLatch].get
- requestor ! ActorUnregistered(consumer.address, consumer)
+ requestor ! ActorUnregistered(consumer.address, consumer, None)
assert(latch.await(5000, TimeUnit.MILLISECONDS))
assert((publisher !! GetRetainedMessage) ===
Some(ConsumerActorUnregistered(consumer, consumer.actor.asInstanceOf[Consumer])))
diff --git a/akka-cluster/src/main/java/akka/cluster/ClusterProtocol.java b/akka-cluster/src/main/java/akka/cluster/ClusterProtocol.java
index ebf240b200..840e89c015 100644
--- a/akka-cluster/src/main/java/akka/cluster/ClusterProtocol.java
+++ b/akka-cluster/src/main/java/akka/cluster/ClusterProtocol.java
@@ -26,6 +26,21 @@ public final class ClusterProtocol {
FUNCTION_FUN1_ARG_ANY(13, 14),
;
+ public static final int START_VALUE = 1;
+ public static final int STOP_VALUE = 2;
+ public static final int USE_VALUE = 3;
+ public static final int RELEASE_VALUE = 4;
+ public static final int MAKE_AVAILABLE_VALUE = 5;
+ public static final int MAKE_UNAVAILABLE_VALUE = 6;
+ public static final int DISCONNECT_VALUE = 7;
+ public static final int RECONNECT_VALUE = 8;
+ public static final int RESIGN_VALUE = 9;
+ public static final int FAIL_OVER_CONNECTIONS_VALUE = 10;
+ public static final int FUNCTION_FUN0_UNIT_VALUE = 11;
+ public static final int FUNCTION_FUN0_ANY_VALUE = 12;
+ public static final int FUNCTION_FUN1_ARG_UNIT_VALUE = 13;
+ public static final int FUNCTION_FUN1_ARG_ANY_VALUE = 14;
+
public final int getNumber() { return value; }
@@ -57,8 +72,8 @@ public final class ClusterProtocol {
internalValueMap =
new com.google.protobuf.Internal.EnumLiteMap() {
public RemoteDaemonMessageType findValueByNumber(int number) {
- return RemoteDaemonMessageType.valueOf(number)
- ; }
+ return RemoteDaemonMessageType.valueOf(number);
+ }
};
public final com.google.protobuf.Descriptors.EnumValueDescriptor
@@ -77,6 +92,7 @@ public final class ClusterProtocol {
private static final RemoteDaemonMessageType[] VALUES = {
START, STOP, USE, RELEASE, MAKE_AVAILABLE, MAKE_UNAVAILABLE, DISCONNECT, RECONNECT, RESIGN, FAIL_OVER_CONNECTIONS, FUNCTION_FUN0_UNIT, FUNCTION_FUN0_ANY, FUNCTION_FUN1_ARG_UNIT, FUNCTION_FUN1_ARG_ANY,
};
+
public static RemoteDaemonMessageType valueOf(
com.google.protobuf.Descriptors.EnumValueDescriptor desc) {
if (desc.getType() != getDescriptor()) {
@@ -85,25 +101,44 @@ public final class ClusterProtocol {
}
return VALUES[desc.getIndex()];
}
+
private final int index;
private final int value;
+
private RemoteDaemonMessageType(int index, int value) {
this.index = index;
this.value = value;
}
- static {
- akka.cluster.ClusterProtocol.getDescriptor();
- }
-
// @@protoc_insertion_point(enum_scope:RemoteDaemonMessageType)
}
+ public interface RemoteDaemonMessageProtocolOrBuilder
+ extends com.google.protobuf.MessageOrBuilder {
+
+ // required .RemoteDaemonMessageType messageType = 1;
+ boolean hasMessageType();
+ akka.cluster.ClusterProtocol.RemoteDaemonMessageType getMessageType();
+
+ // optional .UuidProtocol actorUuid = 2;
+ boolean hasActorUuid();
+ akka.cluster.ClusterProtocol.UuidProtocol getActorUuid();
+ akka.cluster.ClusterProtocol.UuidProtocolOrBuilder getActorUuidOrBuilder();
+
+ // optional string actorAddress = 3;
+ boolean hasActorAddress();
+ String getActorAddress();
+
+ // optional bytes payload = 5;
+ boolean hasPayload();
+ com.google.protobuf.ByteString getPayload();
+ }
public static final class RemoteDaemonMessageProtocol extends
- com.google.protobuf.GeneratedMessage {
+ com.google.protobuf.GeneratedMessage
+ implements RemoteDaemonMessageProtocolOrBuilder {
// Use RemoteDaemonMessageProtocol.newBuilder() to construct.
- private RemoteDaemonMessageProtocol() {
- initFields();
+ private RemoteDaemonMessageProtocol(Builder builder) {
+ super(builder);
}
private RemoteDaemonMessageProtocol(boolean noInit) {}
@@ -126,60 +161,111 @@ public final class ClusterProtocol {
return akka.cluster.ClusterProtocol.internal_static_RemoteDaemonMessageProtocol_fieldAccessorTable;
}
+ private int bitField0_;
// required .RemoteDaemonMessageType messageType = 1;
public static final int MESSAGETYPE_FIELD_NUMBER = 1;
- private boolean hasMessageType;
private akka.cluster.ClusterProtocol.RemoteDaemonMessageType messageType_;
- public boolean hasMessageType() { return hasMessageType; }
- public akka.cluster.ClusterProtocol.RemoteDaemonMessageType getMessageType() { return messageType_; }
+ public boolean hasMessageType() {
+ return ((bitField0_ & 0x00000001) == 0x00000001);
+ }
+ public akka.cluster.ClusterProtocol.RemoteDaemonMessageType getMessageType() {
+ return messageType_;
+ }
// optional .UuidProtocol actorUuid = 2;
public static final int ACTORUUID_FIELD_NUMBER = 2;
- private boolean hasActorUuid;
private akka.cluster.ClusterProtocol.UuidProtocol actorUuid_;
- public boolean hasActorUuid() { return hasActorUuid; }
- public akka.cluster.ClusterProtocol.UuidProtocol getActorUuid() { return actorUuid_; }
+ public boolean hasActorUuid() {
+ return ((bitField0_ & 0x00000002) == 0x00000002);
+ }
+ public akka.cluster.ClusterProtocol.UuidProtocol getActorUuid() {
+ return actorUuid_;
+ }
+ public akka.cluster.ClusterProtocol.UuidProtocolOrBuilder getActorUuidOrBuilder() {
+ return actorUuid_;
+ }
// optional string actorAddress = 3;
public static final int ACTORADDRESS_FIELD_NUMBER = 3;
- private boolean hasActorAddress;
- private java.lang.String actorAddress_ = "";
- public boolean hasActorAddress() { return hasActorAddress; }
- public java.lang.String getActorAddress() { return actorAddress_; }
+ private java.lang.Object actorAddress_;
+ public boolean hasActorAddress() {
+ return ((bitField0_ & 0x00000004) == 0x00000004);
+ }
+ public String getActorAddress() {
+ java.lang.Object ref = actorAddress_;
+ if (ref instanceof String) {
+ return (String) ref;
+ } else {
+ com.google.protobuf.ByteString bs =
+ (com.google.protobuf.ByteString) ref;
+ String s = bs.toStringUtf8();
+ if (com.google.protobuf.Internal.isValidUtf8(bs)) {
+ actorAddress_ = s;
+ }
+ return s;
+ }
+ }
+ private com.google.protobuf.ByteString getActorAddressBytes() {
+ java.lang.Object ref = actorAddress_;
+ if (ref instanceof String) {
+ com.google.protobuf.ByteString b =
+ com.google.protobuf.ByteString.copyFromUtf8((String) ref);
+ actorAddress_ = b;
+ return b;
+ } else {
+ return (com.google.protobuf.ByteString) ref;
+ }
+ }
// optional bytes payload = 5;
public static final int PAYLOAD_FIELD_NUMBER = 5;
- private boolean hasPayload;
- private com.google.protobuf.ByteString payload_ = com.google.protobuf.ByteString.EMPTY;
- public boolean hasPayload() { return hasPayload; }
- public com.google.protobuf.ByteString getPayload() { return payload_; }
+ private com.google.protobuf.ByteString payload_;
+ public boolean hasPayload() {
+ return ((bitField0_ & 0x00000008) == 0x00000008);
+ }
+ public com.google.protobuf.ByteString getPayload() {
+ return payload_;
+ }
private void initFields() {
messageType_ = akka.cluster.ClusterProtocol.RemoteDaemonMessageType.START;
actorUuid_ = akka.cluster.ClusterProtocol.UuidProtocol.getDefaultInstance();
+ actorAddress_ = "";
+ payload_ = com.google.protobuf.ByteString.EMPTY;
}
+ private byte memoizedIsInitialized = -1;
public final boolean isInitialized() {
- if (!hasMessageType) return false;
- if (hasActorUuid()) {
- if (!getActorUuid().isInitialized()) return false;
+ byte isInitialized = memoizedIsInitialized;
+ if (isInitialized != -1) return isInitialized == 1;
+
+ if (!hasMessageType()) {
+ memoizedIsInitialized = 0;
+ return false;
}
+ if (hasActorUuid()) {
+ if (!getActorUuid().isInitialized()) {
+ memoizedIsInitialized = 0;
+ return false;
+ }
+ }
+ memoizedIsInitialized = 1;
return true;
}
public void writeTo(com.google.protobuf.CodedOutputStream output)
throws java.io.IOException {
getSerializedSize();
- if (hasMessageType()) {
- output.writeEnum(1, getMessageType().getNumber());
+ if (((bitField0_ & 0x00000001) == 0x00000001)) {
+ output.writeEnum(1, messageType_.getNumber());
}
- if (hasActorUuid()) {
- output.writeMessage(2, getActorUuid());
+ if (((bitField0_ & 0x00000002) == 0x00000002)) {
+ output.writeMessage(2, actorUuid_);
}
- if (hasActorAddress()) {
- output.writeString(3, getActorAddress());
+ if (((bitField0_ & 0x00000004) == 0x00000004)) {
+ output.writeBytes(3, getActorAddressBytes());
}
- if (hasPayload()) {
- output.writeBytes(5, getPayload());
+ if (((bitField0_ & 0x00000008) == 0x00000008)) {
+ output.writeBytes(5, payload_);
}
getUnknownFields().writeTo(output);
}
@@ -190,27 +276,34 @@ public final class ClusterProtocol {
if (size != -1) return size;
size = 0;
- if (hasMessageType()) {
+ if (((bitField0_ & 0x00000001) == 0x00000001)) {
size += com.google.protobuf.CodedOutputStream
- .computeEnumSize(1, getMessageType().getNumber());
+ .computeEnumSize(1, messageType_.getNumber());
}
- if (hasActorUuid()) {
+ if (((bitField0_ & 0x00000002) == 0x00000002)) {
size += com.google.protobuf.CodedOutputStream
- .computeMessageSize(2, getActorUuid());
+ .computeMessageSize(2, actorUuid_);
}
- if (hasActorAddress()) {
+ if (((bitField0_ & 0x00000004) == 0x00000004)) {
size += com.google.protobuf.CodedOutputStream
- .computeStringSize(3, getActorAddress());
+ .computeBytesSize(3, getActorAddressBytes());
}
- if (hasPayload()) {
+ if (((bitField0_ & 0x00000008) == 0x00000008)) {
size += com.google.protobuf.CodedOutputStream
- .computeBytesSize(5, getPayload());
+ .computeBytesSize(5, payload_);
}
size += getUnknownFields().getSerializedSize();
memoizedSerializedSize = size;
return size;
}
+ private static final long serialVersionUID = 0L;
+ @java.lang.Override
+ protected java.lang.Object writeReplace()
+ throws java.io.ObjectStreamException {
+ return super.writeReplace();
+ }
+
public static akka.cluster.ClusterProtocol.RemoteDaemonMessageProtocol parseFrom(
com.google.protobuf.ByteString data)
throws com.google.protobuf.InvalidProtocolBufferException {
@@ -285,34 +378,62 @@ public final class ClusterProtocol {
}
public Builder toBuilder() { return newBuilder(this); }
+ @java.lang.Override
+ protected Builder newBuilderForType(
+ com.google.protobuf.GeneratedMessage.BuilderParent parent) {
+ Builder builder = new Builder(parent);
+ return builder;
+ }
public static final class Builder extends
- com.google.protobuf.GeneratedMessage.Builder {
- private akka.cluster.ClusterProtocol.RemoteDaemonMessageProtocol result;
-
- // Construct using akka.cluster.ClusterProtocol.RemoteDaemonMessageProtocol.newBuilder()
- private Builder() {}
-
- private static Builder create() {
- Builder builder = new Builder();
- builder.result = new akka.cluster.ClusterProtocol.RemoteDaemonMessageProtocol();
- return builder;
+ com.google.protobuf.GeneratedMessage.Builder
+ implements akka.cluster.ClusterProtocol.RemoteDaemonMessageProtocolOrBuilder {
+ public static final com.google.protobuf.Descriptors.Descriptor
+ getDescriptor() {
+ return akka.cluster.ClusterProtocol.internal_static_RemoteDaemonMessageProtocol_descriptor;
}
- protected akka.cluster.ClusterProtocol.RemoteDaemonMessageProtocol internalGetResult() {
- return result;
+ protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
+ internalGetFieldAccessorTable() {
+ return akka.cluster.ClusterProtocol.internal_static_RemoteDaemonMessageProtocol_fieldAccessorTable;
+ }
+
+ // Construct using akka.cluster.ClusterProtocol.RemoteDaemonMessageProtocol.newBuilder()
+ private Builder() {
+ maybeForceBuilderInitialization();
+ }
+
+ private Builder(BuilderParent parent) {
+ super(parent);
+ maybeForceBuilderInitialization();
+ }
+ private void maybeForceBuilderInitialization() {
+ if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) {
+ getActorUuidFieldBuilder();
+ }
+ }
+ private static Builder create() {
+ return new Builder();
}
public Builder clear() {
- if (result == null) {
- throw new IllegalStateException(
- "Cannot call clear() after build().");
+ super.clear();
+ messageType_ = akka.cluster.ClusterProtocol.RemoteDaemonMessageType.START;
+ bitField0_ = (bitField0_ & ~0x00000001);
+ if (actorUuidBuilder_ == null) {
+ actorUuid_ = akka.cluster.ClusterProtocol.UuidProtocol.getDefaultInstance();
+ } else {
+ actorUuidBuilder_.clear();
}
- result = new akka.cluster.ClusterProtocol.RemoteDaemonMessageProtocol();
+ bitField0_ = (bitField0_ & ~0x00000002);
+ actorAddress_ = "";
+ bitField0_ = (bitField0_ & ~0x00000004);
+ payload_ = com.google.protobuf.ByteString.EMPTY;
+ bitField0_ = (bitField0_ & ~0x00000008);
return this;
}
public Builder clone() {
- return create().mergeFrom(result);
+ return create().mergeFrom(buildPartial());
}
public com.google.protobuf.Descriptors.Descriptor
@@ -324,33 +445,51 @@ public final class ClusterProtocol {
return akka.cluster.ClusterProtocol.RemoteDaemonMessageProtocol.getDefaultInstance();
}
- public boolean isInitialized() {
- return result.isInitialized();
- }
public akka.cluster.ClusterProtocol.RemoteDaemonMessageProtocol build() {
- if (result != null && !isInitialized()) {
+ akka.cluster.ClusterProtocol.RemoteDaemonMessageProtocol result = buildPartial();
+ if (!result.isInitialized()) {
throw newUninitializedMessageException(result);
}
- return buildPartial();
+ return result;
}
private akka.cluster.ClusterProtocol.RemoteDaemonMessageProtocol buildParsed()
throws com.google.protobuf.InvalidProtocolBufferException {
- if (!isInitialized()) {
+ akka.cluster.ClusterProtocol.RemoteDaemonMessageProtocol result = buildPartial();
+ if (!result.isInitialized()) {
throw newUninitializedMessageException(
result).asInvalidProtocolBufferException();
}
- return buildPartial();
+ return result;
}
public akka.cluster.ClusterProtocol.RemoteDaemonMessageProtocol buildPartial() {
- if (result == null) {
- throw new IllegalStateException(
- "build() has already been called on this Builder.");
+ akka.cluster.ClusterProtocol.RemoteDaemonMessageProtocol result = new akka.cluster.ClusterProtocol.RemoteDaemonMessageProtocol(this);
+ int from_bitField0_ = bitField0_;
+ int to_bitField0_ = 0;
+ if (((from_bitField0_ & 0x00000001) == 0x00000001)) {
+ to_bitField0_ |= 0x00000001;
}
- akka.cluster.ClusterProtocol.RemoteDaemonMessageProtocol returnMe = result;
- result = null;
- return returnMe;
+ result.messageType_ = messageType_;
+ if (((from_bitField0_ & 0x00000002) == 0x00000002)) {
+ to_bitField0_ |= 0x00000002;
+ }
+ if (actorUuidBuilder_ == null) {
+ result.actorUuid_ = actorUuid_;
+ } else {
+ result.actorUuid_ = actorUuidBuilder_.build();
+ }
+ if (((from_bitField0_ & 0x00000004) == 0x00000004)) {
+ to_bitField0_ |= 0x00000004;
+ }
+ result.actorAddress_ = actorAddress_;
+ if (((from_bitField0_ & 0x00000008) == 0x00000008)) {
+ to_bitField0_ |= 0x00000008;
+ }
+ result.payload_ = payload_;
+ result.bitField0_ = to_bitField0_;
+ onBuilt();
+ return result;
}
public Builder mergeFrom(com.google.protobuf.Message other) {
@@ -380,6 +519,20 @@ public final class ClusterProtocol {
return this;
}
+ public final boolean isInitialized() {
+ if (!hasMessageType()) {
+
+ return false;
+ }
+ if (hasActorUuid()) {
+ if (!getActorUuid().isInitialized()) {
+
+ return false;
+ }
+ }
+ return true;
+ }
+
public Builder mergeFrom(
com.google.protobuf.CodedInputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
@@ -392,11 +545,13 @@ public final class ClusterProtocol {
switch (tag) {
case 0:
this.setUnknownFields(unknownFields.build());
+ onChanged();
return this;
default: {
if (!parseUnknownField(input, unknownFields,
extensionRegistry, tag)) {
this.setUnknownFields(unknownFields.build());
+ onChanged();
return this;
}
break;
@@ -407,7 +562,8 @@ public final class ClusterProtocol {
if (value == null) {
unknownFields.mergeVarintField(1, rawValue);
} else {
- setMessageType(value);
+ bitField0_ |= 0x00000001;
+ messageType_ = value;
}
break;
}
@@ -421,115 +577,192 @@ public final class ClusterProtocol {
break;
}
case 26: {
- setActorAddress(input.readString());
+ bitField0_ |= 0x00000004;
+ actorAddress_ = input.readBytes();
break;
}
case 42: {
- setPayload(input.readBytes());
+ bitField0_ |= 0x00000008;
+ payload_ = input.readBytes();
break;
}
}
}
}
+ private int bitField0_;
// required .RemoteDaemonMessageType messageType = 1;
+ private akka.cluster.ClusterProtocol.RemoteDaemonMessageType messageType_ = akka.cluster.ClusterProtocol.RemoteDaemonMessageType.START;
public boolean hasMessageType() {
- return result.hasMessageType();
+ return ((bitField0_ & 0x00000001) == 0x00000001);
}
public akka.cluster.ClusterProtocol.RemoteDaemonMessageType getMessageType() {
- return result.getMessageType();
+ return messageType_;
}
public Builder setMessageType(akka.cluster.ClusterProtocol.RemoteDaemonMessageType value) {
if (value == null) {
throw new NullPointerException();
}
- result.hasMessageType = true;
- result.messageType_ = value;
+ bitField0_ |= 0x00000001;
+ messageType_ = value;
+ onChanged();
return this;
}
public Builder clearMessageType() {
- result.hasMessageType = false;
- result.messageType_ = akka.cluster.ClusterProtocol.RemoteDaemonMessageType.START;
+ bitField0_ = (bitField0_ & ~0x00000001);
+ messageType_ = akka.cluster.ClusterProtocol.RemoteDaemonMessageType.START;
+ onChanged();
return this;
}
// optional .UuidProtocol actorUuid = 2;
+ private akka.cluster.ClusterProtocol.UuidProtocol actorUuid_ = akka.cluster.ClusterProtocol.UuidProtocol.getDefaultInstance();
+ private com.google.protobuf.SingleFieldBuilder<
+ akka.cluster.ClusterProtocol.UuidProtocol, akka.cluster.ClusterProtocol.UuidProtocol.Builder, akka.cluster.ClusterProtocol.UuidProtocolOrBuilder> actorUuidBuilder_;
public boolean hasActorUuid() {
- return result.hasActorUuid();
+ return ((bitField0_ & 0x00000002) == 0x00000002);
}
public akka.cluster.ClusterProtocol.UuidProtocol getActorUuid() {
- return result.getActorUuid();
+ if (actorUuidBuilder_ == null) {
+ return actorUuid_;
+ } else {
+ return actorUuidBuilder_.getMessage();
+ }
}
public Builder setActorUuid(akka.cluster.ClusterProtocol.UuidProtocol value) {
- if (value == null) {
- throw new NullPointerException();
+ if (actorUuidBuilder_ == null) {
+ if (value == null) {
+ throw new NullPointerException();
+ }
+ actorUuid_ = value;
+ onChanged();
+ } else {
+ actorUuidBuilder_.setMessage(value);
}
- result.hasActorUuid = true;
- result.actorUuid_ = value;
+ bitField0_ |= 0x00000002;
return this;
}
- public Builder setActorUuid(akka.cluster.ClusterProtocol.UuidProtocol.Builder builderForValue) {
- result.hasActorUuid = true;
- result.actorUuid_ = builderForValue.build();
+ public Builder setActorUuid(
+ akka.cluster.ClusterProtocol.UuidProtocol.Builder builderForValue) {
+ if (actorUuidBuilder_ == null) {
+ actorUuid_ = builderForValue.build();
+ onChanged();
+ } else {
+ actorUuidBuilder_.setMessage(builderForValue.build());
+ }
+ bitField0_ |= 0x00000002;
return this;
}
public Builder mergeActorUuid(akka.cluster.ClusterProtocol.UuidProtocol value) {
- if (result.hasActorUuid() &&
- result.actorUuid_ != akka.cluster.ClusterProtocol.UuidProtocol.getDefaultInstance()) {
- result.actorUuid_ =
- akka.cluster.ClusterProtocol.UuidProtocol.newBuilder(result.actorUuid_).mergeFrom(value).buildPartial();
+ if (actorUuidBuilder_ == null) {
+ if (((bitField0_ & 0x00000002) == 0x00000002) &&
+ actorUuid_ != akka.cluster.ClusterProtocol.UuidProtocol.getDefaultInstance()) {
+ actorUuid_ =
+ akka.cluster.ClusterProtocol.UuidProtocol.newBuilder(actorUuid_).mergeFrom(value).buildPartial();
+ } else {
+ actorUuid_ = value;
+ }
+ onChanged();
} else {
- result.actorUuid_ = value;
+ actorUuidBuilder_.mergeFrom(value);
}
- result.hasActorUuid = true;
+ bitField0_ |= 0x00000002;
return this;
}
public Builder clearActorUuid() {
- result.hasActorUuid = false;
- result.actorUuid_ = akka.cluster.ClusterProtocol.UuidProtocol.getDefaultInstance();
+ if (actorUuidBuilder_ == null) {
+ actorUuid_ = akka.cluster.ClusterProtocol.UuidProtocol.getDefaultInstance();
+ onChanged();
+ } else {
+ actorUuidBuilder_.clear();
+ }
+ bitField0_ = (bitField0_ & ~0x00000002);
return this;
}
+ public akka.cluster.ClusterProtocol.UuidProtocol.Builder getActorUuidBuilder() {
+ bitField0_ |= 0x00000002;
+ onChanged();
+ return getActorUuidFieldBuilder().getBuilder();
+ }
+ public akka.cluster.ClusterProtocol.UuidProtocolOrBuilder getActorUuidOrBuilder() {
+ if (actorUuidBuilder_ != null) {
+ return actorUuidBuilder_.getMessageOrBuilder();
+ } else {
+ return actorUuid_;
+ }
+ }
+ private com.google.protobuf.SingleFieldBuilder<
+ akka.cluster.ClusterProtocol.UuidProtocol, akka.cluster.ClusterProtocol.UuidProtocol.Builder, akka.cluster.ClusterProtocol.UuidProtocolOrBuilder>
+ getActorUuidFieldBuilder() {
+ if (actorUuidBuilder_ == null) {
+ actorUuidBuilder_ = new com.google.protobuf.SingleFieldBuilder<
+ akka.cluster.ClusterProtocol.UuidProtocol, akka.cluster.ClusterProtocol.UuidProtocol.Builder, akka.cluster.ClusterProtocol.UuidProtocolOrBuilder>(
+ actorUuid_,
+ getParentForChildren(),
+ isClean());
+ actorUuid_ = null;
+ }
+ return actorUuidBuilder_;
+ }
// optional string actorAddress = 3;
+ private java.lang.Object actorAddress_ = "";
public boolean hasActorAddress() {
- return result.hasActorAddress();
+ return ((bitField0_ & 0x00000004) == 0x00000004);
}
- public java.lang.String getActorAddress() {
- return result.getActorAddress();
+ public String getActorAddress() {
+ java.lang.Object ref = actorAddress_;
+ if (!(ref instanceof String)) {
+ String s = ((com.google.protobuf.ByteString) ref).toStringUtf8();
+ actorAddress_ = s;
+ return s;
+ } else {
+ return (String) ref;
+ }
}
- public Builder setActorAddress(java.lang.String value) {
+ public Builder setActorAddress(String value) {
if (value == null) {
throw new NullPointerException();
}
- result.hasActorAddress = true;
- result.actorAddress_ = value;
+ bitField0_ |= 0x00000004;
+ actorAddress_ = value;
+ onChanged();
return this;
}
public Builder clearActorAddress() {
- result.hasActorAddress = false;
- result.actorAddress_ = getDefaultInstance().getActorAddress();
+ bitField0_ = (bitField0_ & ~0x00000004);
+ actorAddress_ = getDefaultInstance().getActorAddress();
+ onChanged();
return this;
}
+ void setActorAddress(com.google.protobuf.ByteString value) {
+ bitField0_ |= 0x00000004;
+ actorAddress_ = value;
+ onChanged();
+ }
// optional bytes payload = 5;
+ private com.google.protobuf.ByteString payload_ = com.google.protobuf.ByteString.EMPTY;
public boolean hasPayload() {
- return result.hasPayload();
+ return ((bitField0_ & 0x00000008) == 0x00000008);
}
public com.google.protobuf.ByteString getPayload() {
- return result.getPayload();
+ return payload_;
}
public Builder setPayload(com.google.protobuf.ByteString value) {
if (value == null) {
throw new NullPointerException();
}
- result.hasPayload = true;
- result.payload_ = value;
+ bitField0_ |= 0x00000008;
+ payload_ = value;
+ onChanged();
return this;
}
public Builder clearPayload() {
- result.hasPayload = false;
- result.payload_ = getDefaultInstance().getPayload();
+ bitField0_ = (bitField0_ & ~0x00000008);
+ payload_ = getDefaultInstance().getPayload();
+ onChanged();
return this;
}
@@ -538,18 +771,38 @@ public final class ClusterProtocol {
static {
defaultInstance = new RemoteDaemonMessageProtocol(true);
- akka.cluster.ClusterProtocol.internalForceInit();
defaultInstance.initFields();
}
// @@protoc_insertion_point(class_scope:RemoteDaemonMessageProtocol)
}
+ public interface DurableMailboxMessageProtocolOrBuilder
+ extends com.google.protobuf.MessageOrBuilder {
+
+ // required string ownerActorAddress = 1;
+ boolean hasOwnerActorAddress();
+ String getOwnerActorAddress();
+
+ // optional string senderActorAddress = 2;
+ boolean hasSenderActorAddress();
+ String getSenderActorAddress();
+
+ // optional .UuidProtocol futureUuid = 3;
+ boolean hasFutureUuid();
+ akka.cluster.ClusterProtocol.UuidProtocol getFutureUuid();
+ akka.cluster.ClusterProtocol.UuidProtocolOrBuilder getFutureUuidOrBuilder();
+
+ // required bytes message = 4;
+ boolean hasMessage();
+ com.google.protobuf.ByteString getMessage();
+ }
public static final class DurableMailboxMessageProtocol extends
- com.google.protobuf.GeneratedMessage {
+ com.google.protobuf.GeneratedMessage
+ implements DurableMailboxMessageProtocolOrBuilder {
// Use DurableMailboxMessageProtocol.newBuilder() to construct.
- private DurableMailboxMessageProtocol() {
- initFields();
+ private DurableMailboxMessageProtocol(Builder builder) {
+ super(builder);
}
private DurableMailboxMessageProtocol(boolean noInit) {}
@@ -572,60 +825,137 @@ public final class ClusterProtocol {
return akka.cluster.ClusterProtocol.internal_static_DurableMailboxMessageProtocol_fieldAccessorTable;
}
+ private int bitField0_;
// required string ownerActorAddress = 1;
public static final int OWNERACTORADDRESS_FIELD_NUMBER = 1;
- private boolean hasOwnerActorAddress;
- private java.lang.String ownerActorAddress_ = "";
- public boolean hasOwnerActorAddress() { return hasOwnerActorAddress; }
- public java.lang.String getOwnerActorAddress() { return ownerActorAddress_; }
+ private java.lang.Object ownerActorAddress_;
+ public boolean hasOwnerActorAddress() {
+ return ((bitField0_ & 0x00000001) == 0x00000001);
+ }
+ public String getOwnerActorAddress() {
+ java.lang.Object ref = ownerActorAddress_;
+ if (ref instanceof String) {
+ return (String) ref;
+ } else {
+ com.google.protobuf.ByteString bs =
+ (com.google.protobuf.ByteString) ref;
+ String s = bs.toStringUtf8();
+ if (com.google.protobuf.Internal.isValidUtf8(bs)) {
+ ownerActorAddress_ = s;
+ }
+ return s;
+ }
+ }
+ private com.google.protobuf.ByteString getOwnerActorAddressBytes() {
+ java.lang.Object ref = ownerActorAddress_;
+ if (ref instanceof String) {
+ com.google.protobuf.ByteString b =
+ com.google.protobuf.ByteString.copyFromUtf8((String) ref);
+ ownerActorAddress_ = b;
+ return b;
+ } else {
+ return (com.google.protobuf.ByteString) ref;
+ }
+ }
// optional string senderActorAddress = 2;
public static final int SENDERACTORADDRESS_FIELD_NUMBER = 2;
- private boolean hasSenderActorAddress;
- private java.lang.String senderActorAddress_ = "";
- public boolean hasSenderActorAddress() { return hasSenderActorAddress; }
- public java.lang.String getSenderActorAddress() { return senderActorAddress_; }
+ private java.lang.Object senderActorAddress_;
+ public boolean hasSenderActorAddress() {
+ return ((bitField0_ & 0x00000002) == 0x00000002);
+ }
+ public String getSenderActorAddress() {
+ java.lang.Object ref = senderActorAddress_;
+ if (ref instanceof String) {
+ return (String) ref;
+ } else {
+ com.google.protobuf.ByteString bs =
+ (com.google.protobuf.ByteString) ref;
+ String s = bs.toStringUtf8();
+ if (com.google.protobuf.Internal.isValidUtf8(bs)) {
+ senderActorAddress_ = s;
+ }
+ return s;
+ }
+ }
+ private com.google.protobuf.ByteString getSenderActorAddressBytes() {
+ java.lang.Object ref = senderActorAddress_;
+ if (ref instanceof String) {
+ com.google.protobuf.ByteString b =
+ com.google.protobuf.ByteString.copyFromUtf8((String) ref);
+ senderActorAddress_ = b;
+ return b;
+ } else {
+ return (com.google.protobuf.ByteString) ref;
+ }
+ }
// optional .UuidProtocol futureUuid = 3;
public static final int FUTUREUUID_FIELD_NUMBER = 3;
- private boolean hasFutureUuid;
private akka.cluster.ClusterProtocol.UuidProtocol futureUuid_;
- public boolean hasFutureUuid() { return hasFutureUuid; }
- public akka.cluster.ClusterProtocol.UuidProtocol getFutureUuid() { return futureUuid_; }
+ public boolean hasFutureUuid() {
+ return ((bitField0_ & 0x00000004) == 0x00000004);
+ }
+ public akka.cluster.ClusterProtocol.UuidProtocol getFutureUuid() {
+ return futureUuid_;
+ }
+ public akka.cluster.ClusterProtocol.UuidProtocolOrBuilder getFutureUuidOrBuilder() {
+ return futureUuid_;
+ }
// required bytes message = 4;
public static final int MESSAGE_FIELD_NUMBER = 4;
- private boolean hasMessage;
- private com.google.protobuf.ByteString message_ = com.google.protobuf.ByteString.EMPTY;
- public boolean hasMessage() { return hasMessage; }
- public com.google.protobuf.ByteString getMessage() { return message_; }
+ private com.google.protobuf.ByteString message_;
+ public boolean hasMessage() {
+ return ((bitField0_ & 0x00000008) == 0x00000008);
+ }
+ public com.google.protobuf.ByteString getMessage() {
+ return message_;
+ }
private void initFields() {
+ ownerActorAddress_ = "";
+ senderActorAddress_ = "";
futureUuid_ = akka.cluster.ClusterProtocol.UuidProtocol.getDefaultInstance();
+ message_ = com.google.protobuf.ByteString.EMPTY;
}
+ private byte memoizedIsInitialized = -1;
public final boolean isInitialized() {
- if (!hasOwnerActorAddress) return false;
- if (!hasMessage) return false;
- if (hasFutureUuid()) {
- if (!getFutureUuid().isInitialized()) return false;
+ byte isInitialized = memoizedIsInitialized;
+ if (isInitialized != -1) return isInitialized == 1;
+
+ if (!hasOwnerActorAddress()) {
+ memoizedIsInitialized = 0;
+ return false;
}
+ if (!hasMessage()) {
+ memoizedIsInitialized = 0;
+ return false;
+ }
+ if (hasFutureUuid()) {
+ if (!getFutureUuid().isInitialized()) {
+ memoizedIsInitialized = 0;
+ return false;
+ }
+ }
+ memoizedIsInitialized = 1;
return true;
}
public void writeTo(com.google.protobuf.CodedOutputStream output)
throws java.io.IOException {
getSerializedSize();
- if (hasOwnerActorAddress()) {
- output.writeString(1, getOwnerActorAddress());
+ if (((bitField0_ & 0x00000001) == 0x00000001)) {
+ output.writeBytes(1, getOwnerActorAddressBytes());
}
- if (hasSenderActorAddress()) {
- output.writeString(2, getSenderActorAddress());
+ if (((bitField0_ & 0x00000002) == 0x00000002)) {
+ output.writeBytes(2, getSenderActorAddressBytes());
}
- if (hasFutureUuid()) {
- output.writeMessage(3, getFutureUuid());
+ if (((bitField0_ & 0x00000004) == 0x00000004)) {
+ output.writeMessage(3, futureUuid_);
}
- if (hasMessage()) {
- output.writeBytes(4, getMessage());
+ if (((bitField0_ & 0x00000008) == 0x00000008)) {
+ output.writeBytes(4, message_);
}
getUnknownFields().writeTo(output);
}
@@ -636,27 +966,34 @@ public final class ClusterProtocol {
if (size != -1) return size;
size = 0;
- if (hasOwnerActorAddress()) {
+ if (((bitField0_ & 0x00000001) == 0x00000001)) {
size += com.google.protobuf.CodedOutputStream
- .computeStringSize(1, getOwnerActorAddress());
+ .computeBytesSize(1, getOwnerActorAddressBytes());
}
- if (hasSenderActorAddress()) {
+ if (((bitField0_ & 0x00000002) == 0x00000002)) {
size += com.google.protobuf.CodedOutputStream
- .computeStringSize(2, getSenderActorAddress());
+ .computeBytesSize(2, getSenderActorAddressBytes());
}
- if (hasFutureUuid()) {
+ if (((bitField0_ & 0x00000004) == 0x00000004)) {
size += com.google.protobuf.CodedOutputStream
- .computeMessageSize(3, getFutureUuid());
+ .computeMessageSize(3, futureUuid_);
}
- if (hasMessage()) {
+ if (((bitField0_ & 0x00000008) == 0x00000008)) {
size += com.google.protobuf.CodedOutputStream
- .computeBytesSize(4, getMessage());
+ .computeBytesSize(4, message_);
}
size += getUnknownFields().getSerializedSize();
memoizedSerializedSize = size;
return size;
}
+ private static final long serialVersionUID = 0L;
+ @java.lang.Override
+ protected java.lang.Object writeReplace()
+ throws java.io.ObjectStreamException {
+ return super.writeReplace();
+ }
+
public static akka.cluster.ClusterProtocol.DurableMailboxMessageProtocol parseFrom(
com.google.protobuf.ByteString data)
throws com.google.protobuf.InvalidProtocolBufferException {
@@ -731,34 +1068,62 @@ public final class ClusterProtocol {
}
public Builder toBuilder() { return newBuilder(this); }
+ @java.lang.Override
+ protected Builder newBuilderForType(
+ com.google.protobuf.GeneratedMessage.BuilderParent parent) {
+ Builder builder = new Builder(parent);
+ return builder;
+ }
public static final class Builder extends
- com.google.protobuf.GeneratedMessage.Builder {
- private akka.cluster.ClusterProtocol.DurableMailboxMessageProtocol result;
-
- // Construct using akka.cluster.ClusterProtocol.DurableMailboxMessageProtocol.newBuilder()
- private Builder() {}
-
- private static Builder create() {
- Builder builder = new Builder();
- builder.result = new akka.cluster.ClusterProtocol.DurableMailboxMessageProtocol();
- return builder;
+ com.google.protobuf.GeneratedMessage.Builder
+ implements akka.cluster.ClusterProtocol.DurableMailboxMessageProtocolOrBuilder {
+ public static final com.google.protobuf.Descriptors.Descriptor
+ getDescriptor() {
+ return akka.cluster.ClusterProtocol.internal_static_DurableMailboxMessageProtocol_descriptor;
}
- protected akka.cluster.ClusterProtocol.DurableMailboxMessageProtocol internalGetResult() {
- return result;
+ protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
+ internalGetFieldAccessorTable() {
+ return akka.cluster.ClusterProtocol.internal_static_DurableMailboxMessageProtocol_fieldAccessorTable;
+ }
+
+ // Construct using akka.cluster.ClusterProtocol.DurableMailboxMessageProtocol.newBuilder()
+ private Builder() {
+ maybeForceBuilderInitialization();
+ }
+
+ private Builder(BuilderParent parent) {
+ super(parent);
+ maybeForceBuilderInitialization();
+ }
+ private void maybeForceBuilderInitialization() {
+ if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) {
+ getFutureUuidFieldBuilder();
+ }
+ }
+ private static Builder create() {
+ return new Builder();
}
public Builder clear() {
- if (result == null) {
- throw new IllegalStateException(
- "Cannot call clear() after build().");
+ super.clear();
+ ownerActorAddress_ = "";
+ bitField0_ = (bitField0_ & ~0x00000001);
+ senderActorAddress_ = "";
+ bitField0_ = (bitField0_ & ~0x00000002);
+ if (futureUuidBuilder_ == null) {
+ futureUuid_ = akka.cluster.ClusterProtocol.UuidProtocol.getDefaultInstance();
+ } else {
+ futureUuidBuilder_.clear();
}
- result = new akka.cluster.ClusterProtocol.DurableMailboxMessageProtocol();
+ bitField0_ = (bitField0_ & ~0x00000004);
+ message_ = com.google.protobuf.ByteString.EMPTY;
+ bitField0_ = (bitField0_ & ~0x00000008);
return this;
}
public Builder clone() {
- return create().mergeFrom(result);
+ return create().mergeFrom(buildPartial());
}
public com.google.protobuf.Descriptors.Descriptor
@@ -770,33 +1135,51 @@ public final class ClusterProtocol {
return akka.cluster.ClusterProtocol.DurableMailboxMessageProtocol.getDefaultInstance();
}
- public boolean isInitialized() {
- return result.isInitialized();
- }
public akka.cluster.ClusterProtocol.DurableMailboxMessageProtocol build() {
- if (result != null && !isInitialized()) {
+ akka.cluster.ClusterProtocol.DurableMailboxMessageProtocol result = buildPartial();
+ if (!result.isInitialized()) {
throw newUninitializedMessageException(result);
}
- return buildPartial();
+ return result;
}
private akka.cluster.ClusterProtocol.DurableMailboxMessageProtocol buildParsed()
throws com.google.protobuf.InvalidProtocolBufferException {
- if (!isInitialized()) {
+ akka.cluster.ClusterProtocol.DurableMailboxMessageProtocol result = buildPartial();
+ if (!result.isInitialized()) {
throw newUninitializedMessageException(
result).asInvalidProtocolBufferException();
}
- return buildPartial();
+ return result;
}
public akka.cluster.ClusterProtocol.DurableMailboxMessageProtocol buildPartial() {
- if (result == null) {
- throw new IllegalStateException(
- "build() has already been called on this Builder.");
+ akka.cluster.ClusterProtocol.DurableMailboxMessageProtocol result = new akka.cluster.ClusterProtocol.DurableMailboxMessageProtocol(this);
+ int from_bitField0_ = bitField0_;
+ int to_bitField0_ = 0;
+ if (((from_bitField0_ & 0x00000001) == 0x00000001)) {
+ to_bitField0_ |= 0x00000001;
}
- akka.cluster.ClusterProtocol.DurableMailboxMessageProtocol returnMe = result;
- result = null;
- return returnMe;
+ result.ownerActorAddress_ = ownerActorAddress_;
+ if (((from_bitField0_ & 0x00000002) == 0x00000002)) {
+ to_bitField0_ |= 0x00000002;
+ }
+ result.senderActorAddress_ = senderActorAddress_;
+ if (((from_bitField0_ & 0x00000004) == 0x00000004)) {
+ to_bitField0_ |= 0x00000004;
+ }
+ if (futureUuidBuilder_ == null) {
+ result.futureUuid_ = futureUuid_;
+ } else {
+ result.futureUuid_ = futureUuidBuilder_.build();
+ }
+ if (((from_bitField0_ & 0x00000008) == 0x00000008)) {
+ to_bitField0_ |= 0x00000008;
+ }
+ result.message_ = message_;
+ result.bitField0_ = to_bitField0_;
+ onBuilt();
+ return result;
}
public Builder mergeFrom(com.google.protobuf.Message other) {
@@ -826,6 +1209,24 @@ public final class ClusterProtocol {
return this;
}
+ public final boolean isInitialized() {
+ if (!hasOwnerActorAddress()) {
+
+ return false;
+ }
+ if (!hasMessage()) {
+
+ return false;
+ }
+ if (hasFutureUuid()) {
+ if (!getFutureUuid().isInitialized()) {
+
+ return false;
+ }
+ }
+ return true;
+ }
+
public Builder mergeFrom(
com.google.protobuf.CodedInputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
@@ -838,21 +1239,25 @@ public final class ClusterProtocol {
switch (tag) {
case 0:
this.setUnknownFields(unknownFields.build());
+ onChanged();
return this;
default: {
if (!parseUnknownField(input, unknownFields,
extensionRegistry, tag)) {
this.setUnknownFields(unknownFields.build());
+ onChanged();
return this;
}
break;
}
case 10: {
- setOwnerActorAddress(input.readString());
+ bitField0_ |= 0x00000001;
+ ownerActorAddress_ = input.readBytes();
break;
}
case 18: {
- setSenderActorAddress(input.readString());
+ bitField0_ |= 0x00000002;
+ senderActorAddress_ = input.readBytes();
break;
}
case 26: {
@@ -865,111 +1270,199 @@ public final class ClusterProtocol {
break;
}
case 34: {
- setMessage(input.readBytes());
+ bitField0_ |= 0x00000008;
+ message_ = input.readBytes();
break;
}
}
}
}
+ private int bitField0_;
// required string ownerActorAddress = 1;
+ private java.lang.Object ownerActorAddress_ = "";
public boolean hasOwnerActorAddress() {
- return result.hasOwnerActorAddress();
+ return ((bitField0_ & 0x00000001) == 0x00000001);
}
- public java.lang.String getOwnerActorAddress() {
- return result.getOwnerActorAddress();
+ public String getOwnerActorAddress() {
+ java.lang.Object ref = ownerActorAddress_;
+ if (!(ref instanceof String)) {
+ String s = ((com.google.protobuf.ByteString) ref).toStringUtf8();
+ ownerActorAddress_ = s;
+ return s;
+ } else {
+ return (String) ref;
+ }
}
- public Builder setOwnerActorAddress(java.lang.String value) {
+ public Builder setOwnerActorAddress(String value) {
if (value == null) {
throw new NullPointerException();
}
- result.hasOwnerActorAddress = true;
- result.ownerActorAddress_ = value;
+ bitField0_ |= 0x00000001;
+ ownerActorAddress_ = value;
+ onChanged();
return this;
}
public Builder clearOwnerActorAddress() {
- result.hasOwnerActorAddress = false;
- result.ownerActorAddress_ = getDefaultInstance().getOwnerActorAddress();
+ bitField0_ = (bitField0_ & ~0x00000001);
+ ownerActorAddress_ = getDefaultInstance().getOwnerActorAddress();
+ onChanged();
return this;
}
+ void setOwnerActorAddress(com.google.protobuf.ByteString value) {
+ bitField0_ |= 0x00000001;
+ ownerActorAddress_ = value;
+ onChanged();
+ }
// optional string senderActorAddress = 2;
+ private java.lang.Object senderActorAddress_ = "";
public boolean hasSenderActorAddress() {
- return result.hasSenderActorAddress();
+ return ((bitField0_ & 0x00000002) == 0x00000002);
}
- public java.lang.String getSenderActorAddress() {
- return result.getSenderActorAddress();
+ public String getSenderActorAddress() {
+ java.lang.Object ref = senderActorAddress_;
+ if (!(ref instanceof String)) {
+ String s = ((com.google.protobuf.ByteString) ref).toStringUtf8();
+ senderActorAddress_ = s;
+ return s;
+ } else {
+ return (String) ref;
+ }
}
- public Builder setSenderActorAddress(java.lang.String value) {
+ public Builder setSenderActorAddress(String value) {
if (value == null) {
throw new NullPointerException();
}
- result.hasSenderActorAddress = true;
- result.senderActorAddress_ = value;
+ bitField0_ |= 0x00000002;
+ senderActorAddress_ = value;
+ onChanged();
return this;
}
public Builder clearSenderActorAddress() {
- result.hasSenderActorAddress = false;
- result.senderActorAddress_ = getDefaultInstance().getSenderActorAddress();
+ bitField0_ = (bitField0_ & ~0x00000002);
+ senderActorAddress_ = getDefaultInstance().getSenderActorAddress();
+ onChanged();
return this;
}
+ void setSenderActorAddress(com.google.protobuf.ByteString value) {
+ bitField0_ |= 0x00000002;
+ senderActorAddress_ = value;
+ onChanged();
+ }
// optional .UuidProtocol futureUuid = 3;
+ private akka.cluster.ClusterProtocol.UuidProtocol futureUuid_ = akka.cluster.ClusterProtocol.UuidProtocol.getDefaultInstance();
+ private com.google.protobuf.SingleFieldBuilder<
+ akka.cluster.ClusterProtocol.UuidProtocol, akka.cluster.ClusterProtocol.UuidProtocol.Builder, akka.cluster.ClusterProtocol.UuidProtocolOrBuilder> futureUuidBuilder_;
public boolean hasFutureUuid() {
- return result.hasFutureUuid();
+ return ((bitField0_ & 0x00000004) == 0x00000004);
}
public akka.cluster.ClusterProtocol.UuidProtocol getFutureUuid() {
- return result.getFutureUuid();
+ if (futureUuidBuilder_ == null) {
+ return futureUuid_;
+ } else {
+ return futureUuidBuilder_.getMessage();
+ }
}
public Builder setFutureUuid(akka.cluster.ClusterProtocol.UuidProtocol value) {
- if (value == null) {
- throw new NullPointerException();
+ if (futureUuidBuilder_ == null) {
+ if (value == null) {
+ throw new NullPointerException();
+ }
+ futureUuid_ = value;
+ onChanged();
+ } else {
+ futureUuidBuilder_.setMessage(value);
}
- result.hasFutureUuid = true;
- result.futureUuid_ = value;
+ bitField0_ |= 0x00000004;
return this;
}
- public Builder setFutureUuid(akka.cluster.ClusterProtocol.UuidProtocol.Builder builderForValue) {
- result.hasFutureUuid = true;
- result.futureUuid_ = builderForValue.build();
+ public Builder setFutureUuid(
+ akka.cluster.ClusterProtocol.UuidProtocol.Builder builderForValue) {
+ if (futureUuidBuilder_ == null) {
+ futureUuid_ = builderForValue.build();
+ onChanged();
+ } else {
+ futureUuidBuilder_.setMessage(builderForValue.build());
+ }
+ bitField0_ |= 0x00000004;
return this;
}
public Builder mergeFutureUuid(akka.cluster.ClusterProtocol.UuidProtocol value) {
- if (result.hasFutureUuid() &&
- result.futureUuid_ != akka.cluster.ClusterProtocol.UuidProtocol.getDefaultInstance()) {
- result.futureUuid_ =
- akka.cluster.ClusterProtocol.UuidProtocol.newBuilder(result.futureUuid_).mergeFrom(value).buildPartial();
+ if (futureUuidBuilder_ == null) {
+ if (((bitField0_ & 0x00000004) == 0x00000004) &&
+ futureUuid_ != akka.cluster.ClusterProtocol.UuidProtocol.getDefaultInstance()) {
+ futureUuid_ =
+ akka.cluster.ClusterProtocol.UuidProtocol.newBuilder(futureUuid_).mergeFrom(value).buildPartial();
+ } else {
+ futureUuid_ = value;
+ }
+ onChanged();
} else {
- result.futureUuid_ = value;
+ futureUuidBuilder_.mergeFrom(value);
}
- result.hasFutureUuid = true;
+ bitField0_ |= 0x00000004;
return this;
}
public Builder clearFutureUuid() {
- result.hasFutureUuid = false;
- result.futureUuid_ = akka.cluster.ClusterProtocol.UuidProtocol.getDefaultInstance();
+ if (futureUuidBuilder_ == null) {
+ futureUuid_ = akka.cluster.ClusterProtocol.UuidProtocol.getDefaultInstance();
+ onChanged();
+ } else {
+ futureUuidBuilder_.clear();
+ }
+ bitField0_ = (bitField0_ & ~0x00000004);
return this;
}
+ public akka.cluster.ClusterProtocol.UuidProtocol.Builder getFutureUuidBuilder() {
+ bitField0_ |= 0x00000004;
+ onChanged();
+ return getFutureUuidFieldBuilder().getBuilder();
+ }
+ public akka.cluster.ClusterProtocol.UuidProtocolOrBuilder getFutureUuidOrBuilder() {
+ if (futureUuidBuilder_ != null) {
+ return futureUuidBuilder_.getMessageOrBuilder();
+ } else {
+ return futureUuid_;
+ }
+ }
+ private com.google.protobuf.SingleFieldBuilder<
+ akka.cluster.ClusterProtocol.UuidProtocol, akka.cluster.ClusterProtocol.UuidProtocol.Builder, akka.cluster.ClusterProtocol.UuidProtocolOrBuilder>
+ getFutureUuidFieldBuilder() {
+ if (futureUuidBuilder_ == null) {
+ futureUuidBuilder_ = new com.google.protobuf.SingleFieldBuilder<
+ akka.cluster.ClusterProtocol.UuidProtocol, akka.cluster.ClusterProtocol.UuidProtocol.Builder, akka.cluster.ClusterProtocol.UuidProtocolOrBuilder>(
+ futureUuid_,
+ getParentForChildren(),
+ isClean());
+ futureUuid_ = null;
+ }
+ return futureUuidBuilder_;
+ }
// required bytes message = 4;
+ private com.google.protobuf.ByteString message_ = com.google.protobuf.ByteString.EMPTY;
public boolean hasMessage() {
- return result.hasMessage();
+ return ((bitField0_ & 0x00000008) == 0x00000008);
}
public com.google.protobuf.ByteString getMessage() {
- return result.getMessage();
+ return message_;
}
public Builder setMessage(com.google.protobuf.ByteString value) {
if (value == null) {
throw new NullPointerException();
}
- result.hasMessage = true;
- result.message_ = value;
+ bitField0_ |= 0x00000008;
+ message_ = value;
+ onChanged();
return this;
}
public Builder clearMessage() {
- result.hasMessage = false;
- result.message_ = getDefaultInstance().getMessage();
+ bitField0_ = (bitField0_ & ~0x00000008);
+ message_ = getDefaultInstance().getMessage();
+ onChanged();
return this;
}
@@ -978,18 +1471,29 @@ public final class ClusterProtocol {
static {
defaultInstance = new DurableMailboxMessageProtocol(true);
- akka.cluster.ClusterProtocol.internalForceInit();
defaultInstance.initFields();
}
// @@protoc_insertion_point(class_scope:DurableMailboxMessageProtocol)
}
+ public interface UuidProtocolOrBuilder
+ extends com.google.protobuf.MessageOrBuilder {
+
+ // required uint64 high = 1;
+ boolean hasHigh();
+ long getHigh();
+
+ // required uint64 low = 2;
+ boolean hasLow();
+ long getLow();
+ }
public static final class UuidProtocol extends
- com.google.protobuf.GeneratedMessage {
+ com.google.protobuf.GeneratedMessage
+ implements UuidProtocolOrBuilder {
// Use UuidProtocol.newBuilder() to construct.
- private UuidProtocol() {
- initFields();
+ private UuidProtocol(Builder builder) {
+ super(builder);
}
private UuidProtocol(boolean noInit) {}
@@ -1012,36 +1516,56 @@ public final class ClusterProtocol {
return akka.cluster.ClusterProtocol.internal_static_UuidProtocol_fieldAccessorTable;
}
+ private int bitField0_;
// required uint64 high = 1;
public static final int HIGH_FIELD_NUMBER = 1;
- private boolean hasHigh;
- private long high_ = 0L;
- public boolean hasHigh() { return hasHigh; }
- public long getHigh() { return high_; }
+ private long high_;
+ public boolean hasHigh() {
+ return ((bitField0_ & 0x00000001) == 0x00000001);
+ }
+ public long getHigh() {
+ return high_;
+ }
// required uint64 low = 2;
public static final int LOW_FIELD_NUMBER = 2;
- private boolean hasLow;
- private long low_ = 0L;
- public boolean hasLow() { return hasLow; }
- public long getLow() { return low_; }
+ private long low_;
+ public boolean hasLow() {
+ return ((bitField0_ & 0x00000002) == 0x00000002);
+ }
+ public long getLow() {
+ return low_;
+ }
private void initFields() {
+ high_ = 0L;
+ low_ = 0L;
}
+ private byte memoizedIsInitialized = -1;
public final boolean isInitialized() {
- if (!hasHigh) return false;
- if (!hasLow) return false;
+ byte isInitialized = memoizedIsInitialized;
+ if (isInitialized != -1) return isInitialized == 1;
+
+ if (!hasHigh()) {
+ memoizedIsInitialized = 0;
+ return false;
+ }
+ if (!hasLow()) {
+ memoizedIsInitialized = 0;
+ return false;
+ }
+ memoizedIsInitialized = 1;
return true;
}
public void writeTo(com.google.protobuf.CodedOutputStream output)
throws java.io.IOException {
getSerializedSize();
- if (hasHigh()) {
- output.writeUInt64(1, getHigh());
+ if (((bitField0_ & 0x00000001) == 0x00000001)) {
+ output.writeUInt64(1, high_);
}
- if (hasLow()) {
- output.writeUInt64(2, getLow());
+ if (((bitField0_ & 0x00000002) == 0x00000002)) {
+ output.writeUInt64(2, low_);
}
getUnknownFields().writeTo(output);
}
@@ -1052,19 +1576,26 @@ public final class ClusterProtocol {
if (size != -1) return size;
size = 0;
- if (hasHigh()) {
+ if (((bitField0_ & 0x00000001) == 0x00000001)) {
size += com.google.protobuf.CodedOutputStream
- .computeUInt64Size(1, getHigh());
+ .computeUInt64Size(1, high_);
}
- if (hasLow()) {
+ if (((bitField0_ & 0x00000002) == 0x00000002)) {
size += com.google.protobuf.CodedOutputStream
- .computeUInt64Size(2, getLow());
+ .computeUInt64Size(2, low_);
}
size += getUnknownFields().getSerializedSize();
memoizedSerializedSize = size;
return size;
}
+ private static final long serialVersionUID = 0L;
+ @java.lang.Override
+ protected java.lang.Object writeReplace()
+ throws java.io.ObjectStreamException {
+ return super.writeReplace();
+ }
+
public static akka.cluster.ClusterProtocol.UuidProtocol parseFrom(
com.google.protobuf.ByteString data)
throws com.google.protobuf.InvalidProtocolBufferException {
@@ -1139,34 +1670,53 @@ public final class ClusterProtocol {
}
public Builder toBuilder() { return newBuilder(this); }
+ @java.lang.Override
+ protected Builder newBuilderForType(
+ com.google.protobuf.GeneratedMessage.BuilderParent parent) {
+ Builder builder = new Builder(parent);
+ return builder;
+ }
public static final class Builder extends
- com.google.protobuf.GeneratedMessage.Builder {
- private akka.cluster.ClusterProtocol.UuidProtocol result;
-
- // Construct using akka.cluster.ClusterProtocol.UuidProtocol.newBuilder()
- private Builder() {}
-
- private static Builder create() {
- Builder builder = new Builder();
- builder.result = new akka.cluster.ClusterProtocol.UuidProtocol();
- return builder;
+ com.google.protobuf.GeneratedMessage.Builder
+ implements akka.cluster.ClusterProtocol.UuidProtocolOrBuilder {
+ public static final com.google.protobuf.Descriptors.Descriptor
+ getDescriptor() {
+ return akka.cluster.ClusterProtocol.internal_static_UuidProtocol_descriptor;
}
- protected akka.cluster.ClusterProtocol.UuidProtocol internalGetResult() {
- return result;
+ protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
+ internalGetFieldAccessorTable() {
+ return akka.cluster.ClusterProtocol.internal_static_UuidProtocol_fieldAccessorTable;
+ }
+
+ // Construct using akka.cluster.ClusterProtocol.UuidProtocol.newBuilder()
+ private Builder() {
+ maybeForceBuilderInitialization();
+ }
+
+ private Builder(BuilderParent parent) {
+ super(parent);
+ maybeForceBuilderInitialization();
+ }
+ private void maybeForceBuilderInitialization() {
+ if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) {
+ }
+ }
+ private static Builder create() {
+ return new Builder();
}
public Builder clear() {
- if (result == null) {
- throw new IllegalStateException(
- "Cannot call clear() after build().");
- }
- result = new akka.cluster.ClusterProtocol.UuidProtocol();
+ super.clear();
+ high_ = 0L;
+ bitField0_ = (bitField0_ & ~0x00000001);
+ low_ = 0L;
+ bitField0_ = (bitField0_ & ~0x00000002);
return this;
}
public Builder clone() {
- return create().mergeFrom(result);
+ return create().mergeFrom(buildPartial());
}
public com.google.protobuf.Descriptors.Descriptor
@@ -1178,33 +1728,39 @@ public final class ClusterProtocol {
return akka.cluster.ClusterProtocol.UuidProtocol.getDefaultInstance();
}
- public boolean isInitialized() {
- return result.isInitialized();
- }
public akka.cluster.ClusterProtocol.UuidProtocol build() {
- if (result != null && !isInitialized()) {
+ akka.cluster.ClusterProtocol.UuidProtocol result = buildPartial();
+ if (!result.isInitialized()) {
throw newUninitializedMessageException(result);
}
- return buildPartial();
+ return result;
}
private akka.cluster.ClusterProtocol.UuidProtocol buildParsed()
throws com.google.protobuf.InvalidProtocolBufferException {
- if (!isInitialized()) {
+ akka.cluster.ClusterProtocol.UuidProtocol result = buildPartial();
+ if (!result.isInitialized()) {
throw newUninitializedMessageException(
result).asInvalidProtocolBufferException();
}
- return buildPartial();
+ return result;
}
public akka.cluster.ClusterProtocol.UuidProtocol buildPartial() {
- if (result == null) {
- throw new IllegalStateException(
- "build() has already been called on this Builder.");
+ akka.cluster.ClusterProtocol.UuidProtocol result = new akka.cluster.ClusterProtocol.UuidProtocol(this);
+ int from_bitField0_ = bitField0_;
+ int to_bitField0_ = 0;
+ if (((from_bitField0_ & 0x00000001) == 0x00000001)) {
+ to_bitField0_ |= 0x00000001;
}
- akka.cluster.ClusterProtocol.UuidProtocol returnMe = result;
- result = null;
- return returnMe;
+ result.high_ = high_;
+ if (((from_bitField0_ & 0x00000002) == 0x00000002)) {
+ to_bitField0_ |= 0x00000002;
+ }
+ result.low_ = low_;
+ result.bitField0_ = to_bitField0_;
+ onBuilt();
+ return result;
}
public Builder mergeFrom(com.google.protobuf.Message other) {
@@ -1228,6 +1784,18 @@ public final class ClusterProtocol {
return this;
}
+ public final boolean isInitialized() {
+ if (!hasHigh()) {
+
+ return false;
+ }
+ if (!hasLow()) {
+
+ return false;
+ }
+ return true;
+ }
+
public Builder mergeFrom(
com.google.protobuf.CodedInputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
@@ -1240,61 +1808,72 @@ public final class ClusterProtocol {
switch (tag) {
case 0:
this.setUnknownFields(unknownFields.build());
+ onChanged();
return this;
default: {
if (!parseUnknownField(input, unknownFields,
extensionRegistry, tag)) {
this.setUnknownFields(unknownFields.build());
+ onChanged();
return this;
}
break;
}
case 8: {
- setHigh(input.readUInt64());
+ bitField0_ |= 0x00000001;
+ high_ = input.readUInt64();
break;
}
case 16: {
- setLow(input.readUInt64());
+ bitField0_ |= 0x00000002;
+ low_ = input.readUInt64();
break;
}
}
}
}
+ private int bitField0_;
// required uint64 high = 1;
+ private long high_ ;
public boolean hasHigh() {
- return result.hasHigh();
+ return ((bitField0_ & 0x00000001) == 0x00000001);
}
public long getHigh() {
- return result.getHigh();
+ return high_;
}
public Builder setHigh(long value) {
- result.hasHigh = true;
- result.high_ = value;
+ bitField0_ |= 0x00000001;
+ high_ = value;
+ onChanged();
return this;
}
public Builder clearHigh() {
- result.hasHigh = false;
- result.high_ = 0L;
+ bitField0_ = (bitField0_ & ~0x00000001);
+ high_ = 0L;
+ onChanged();
return this;
}
// required uint64 low = 2;
+ private long low_ ;
public boolean hasLow() {
- return result.hasLow();
+ return ((bitField0_ & 0x00000002) == 0x00000002);
}
public long getLow() {
- return result.getLow();
+ return low_;
}
public Builder setLow(long value) {
- result.hasLow = true;
- result.low_ = value;
+ bitField0_ |= 0x00000002;
+ low_ = value;
+ onChanged();
return this;
}
public Builder clearLow() {
- result.hasLow = false;
- result.low_ = 0L;
+ bitField0_ = (bitField0_ & ~0x00000002);
+ low_ = 0L;
+ onChanged();
return this;
}
@@ -1303,7 +1882,6 @@ public final class ClusterProtocol {
static {
defaultInstance = new UuidProtocol(true);
- akka.cluster.ClusterProtocol.internalForceInit();
defaultInstance.initFields();
}
@@ -1390,7 +1968,5 @@ public final class ClusterProtocol {
}, assigner);
}
- public static void internalForceInit() {}
-
// @@protoc_insertion_point(outer_class_scope)
}
diff --git a/akka-cluster/src/main/scala/akka/cluster/Cluster.scala b/akka-cluster/src/main/scala/akka/cluster/Cluster.scala
index 3fbc35492d..1b7c7d0939 100644
--- a/akka-cluster/src/main/scala/akka/cluster/Cluster.scala
+++ b/akka-cluster/src/main/scala/akka/cluster/Cluster.scala
@@ -29,6 +29,7 @@ import Helpers._
import akka.actor._
import Actor._
import Status._
+import DeploymentConfig.{ ReplicationScheme, ReplicationStrategy, Transient, WriteThrough, WriteBehind }
import akka.event.EventHandler
import akka.dispatch.{ Dispatchers, Future }
import akka.remoteinterface._
@@ -463,7 +464,15 @@ class DefaultClusterNode private[akka] (
* available durable store.
*/
def store[T <: Actor](address: String, actorClass: Class[T], format: Serializer): ClusterNode =
- store(Actor.actorOf(actorClass, address).start, 0, false, format)
+ store(Actor.actorOf(actorClass, address).start, 0, Transient, false, format)
+
+ /**
+ * Clusters an actor of a specific type. If the actor is already clustered then the clustered version will be updated
+ * with the actor passed in as argument. You can use this to save off snapshots of the actor to a highly
+ * available durable store.
+ */
+ def store[T <: Actor](address: String, actorClass: Class[T], replicationScheme: ReplicationScheme, format: Serializer): ClusterNode =
+ store(Actor.actorOf(actorClass, address).start, 0, replicationScheme, false, format)
/**
* Clusters an actor of a specific type. If the actor is already clustered then the clustered version will be updated
@@ -471,7 +480,15 @@ class DefaultClusterNode private[akka] (
* available durable store.
*/
def store[T <: Actor](address: String, actorClass: Class[T], replicationFactor: Int, format: Serializer): ClusterNode =
- store(Actor.actorOf(actorClass, address).start, replicationFactor, false, format)
+ store(Actor.actorOf(actorClass, address).start, replicationFactor, Transient, false, format)
+
+ /**
+ * Clusters an actor of a specific type. If the actor is already clustered then the clustered version will be updated
+ * with the actor passed in as argument. You can use this to save off snapshots of the actor to a highly
+ * available durable store.
+ */
+ def store[T <: Actor](address: String, actorClass: Class[T], replicationFactor: Int, replicationScheme: ReplicationScheme, format: Serializer): ClusterNode =
+ store(Actor.actorOf(actorClass, address).start, replicationFactor, replicationScheme, false, format)
/**
* Clusters an actor of a specific type. If the actor is already clustered then the clustered version will be updated
@@ -479,7 +496,15 @@ class DefaultClusterNode private[akka] (
* available durable store.
*/
def store[T <: Actor](address: String, actorClass: Class[T], serializeMailbox: Boolean, format: Serializer): ClusterNode =
- store(Actor.actorOf(actorClass, address).start, 0, serializeMailbox, format)
+ store(Actor.actorOf(actorClass, address).start, 0, Transient, serializeMailbox, format)
+
+ /**
+ * Clusters an actor of a specific type. If the actor is already clustered then the clustered version will be updated
+ * with the actor passed in as argument. You can use this to save off snapshots of the actor to a highly
+ * available durable store.
+ */
+ def store[T <: Actor](address: String, actorClass: Class[T], replicationScheme: ReplicationScheme, serializeMailbox: Boolean, format: Serializer): ClusterNode =
+ store(Actor.actorOf(actorClass, address).start, 0, replicationScheme, serializeMailbox, format)
/**
* Clusters an actor of a specific type. If the actor is already clustered then the clustered version will be updated
@@ -487,7 +512,15 @@ class DefaultClusterNode private[akka] (
* available durable store.
*/
def store[T <: Actor](address: String, actorClass: Class[T], replicationFactor: Int, serializeMailbox: Boolean, format: Serializer): ClusterNode =
- store(Actor.actorOf(actorClass, address).start, replicationFactor, serializeMailbox, format)
+ store(Actor.actorOf(actorClass, address).start, replicationFactor, Transient, serializeMailbox, format)
+
+ /**
+ * Clusters an actor of a specific type. If the actor is already clustered then the clustered version will be updated
+ * with the actor passed in as argument. You can use this to save off snapshots of the actor to a highly
+ * available durable store.
+ */
+ def store[T <: Actor](address: String, actorClass: Class[T], replicationFactor: Int, replicationScheme: ReplicationScheme, serializeMailbox: Boolean, format: Serializer): ClusterNode =
+ store(Actor.actorOf(actorClass, address).start, replicationFactor, replicationScheme, serializeMailbox, format)
/**
* Clusters an actor with UUID. If the actor is already clustered then the clustered version will be updated
@@ -495,7 +528,15 @@ class DefaultClusterNode private[akka] (
* available durable store.
*/
def store(actorRef: ActorRef, format: Serializer): ClusterNode =
- store(actorRef, 0, false, format)
+ store(actorRef, 0, Transient, false, format)
+
+ /**
+ * Clusters an actor with UUID. If the actor is already clustered then the clustered version will be updated
+ * with the actor passed in as argument. You can use this to save off snapshots of the actor to a highly
+ * available durable store.
+ */
+ def store(actorRef: ActorRef, replicationScheme: ReplicationScheme, format: Serializer): ClusterNode =
+ store(actorRef, 0, replicationScheme, false, format)
/**
* Clusters an actor with UUID. If the actor is already clustered then the clustered version will be updated
@@ -503,7 +544,15 @@ class DefaultClusterNode private[akka] (
* available durable store.
*/
def store(actorRef: ActorRef, replicationFactor: Int, format: Serializer): ClusterNode =
- store(actorRef, replicationFactor, false, format)
+ store(actorRef, replicationFactor, Transient, false, format)
+
+ /**
+ * Clusters an actor with UUID. If the actor is already clustered then the clustered version will be updated
+ * with the actor passed in as argument. You can use this to save off snapshots of the actor to a highly
+ * available durable store.
+ */
+ def store(actorRef: ActorRef, replicationFactor: Int, replicationScheme: ReplicationScheme, format: Serializer): ClusterNode =
+ store(actorRef, replicationFactor, replicationScheme, false, format)
/**
* Clusters an actor with UUID. If the actor is already clustered then the clustered version will be updated
@@ -511,20 +560,47 @@ class DefaultClusterNode private[akka] (
* available durable store.
*/
def store(actorRef: ActorRef, serializeMailbox: Boolean, format: Serializer): ClusterNode =
- store(actorRef, 0, serializeMailbox, format)
-
- /**
- * Needed to have reflection through structural typing work.
- */
- def store(actorRef: ActorRef, replicationFactor: Int, serializeMailbox: Boolean, format: AnyRef): ClusterNode =
- store(actorRef, replicationFactor, serializeMailbox, format.asInstanceOf[Serializer])
+ store(actorRef, 0, Transient, serializeMailbox, format)
/**
* Clusters an actor with UUID. If the actor is already clustered then the clustered version will be updated
* with the actor passed in as argument. You can use this to save off snapshots of the actor to a highly
* available durable store.
*/
- def store(actorRef: ActorRef, replicationFactor: Int, serializeMailbox: Boolean): ClusterNode = if (isConnected.isOn) {
+ def store(actorRef: ActorRef, replicationFactor: Int, serializeMailbox: Boolean, format: Serializer): ClusterNode =
+ store(actorRef, replicationFactor, Transient, serializeMailbox, format)
+
+ /**
+ * Clusters an actor with UUID. If the actor is already clustered then the clustered version will be updated
+ * with the actor passed in as argument. You can use this to save off snapshots of the actor to a highly
+ * available durable store.
+ */
+ def store(actorRef: ActorRef, replicationScheme: ReplicationScheme, serializeMailbox: Boolean, format: Serializer): ClusterNode =
+ store(actorRef, 0, replicationScheme, serializeMailbox, format)
+
+ /**
+ * Needed to have reflection through structural typing work.
+ */
+ def store(actorRef: ActorRef, replicationFactor: Int, replicationScheme: ReplicationScheme, serializeMailbox: Boolean, format: AnyRef): ClusterNode =
+ store(actorRef, replicationFactor, replicationScheme, serializeMailbox, format.asInstanceOf[Serializer])
+
+ /**
+ * Needed to have reflection through structural typing work.
+ */
+ def store(actorRef: ActorRef, replicationFactor: Int, serializeMailbox: Boolean, format: AnyRef): ClusterNode =
+ store(actorRef, replicationFactor, Transient, serializeMailbox, format)
+
+ /**
+ * Clusters an actor with UUID. If the actor is already clustered then the clustered version will be updated
+ * with the actor passed in as argument. You can use this to save off snapshots of the actor to a highly
+ * available durable store.
+ */
+ def store(
+ actorRef: ActorRef,
+ replicationFactor: Int,
+ replicationScheme: ReplicationScheme,
+ serializeMailbox: Boolean,
+ format: Serializer): ClusterNode = if (isConnected.isOn) {
import akka.serialization.ActorSerialization._
@@ -535,12 +611,14 @@ class DefaultClusterNode private[akka] (
EventHandler.debug(this,
"Storing actor [%s] with UUID [%s] in cluster".format(actorRef.address, uuid))
- val actorBytes = if (shouldCompressData) LZF.compress(toBinary(actorRef, serializeMailbox))
- else toBinary(actorRef)
+ val actorBytes =
+ if (shouldCompressData) LZF.compress(toBinary(actorRef, serializeMailbox, replicationScheme)(format))
+ else toBinary(actorRef, serializeMailbox, replicationScheme)(format)
+
val actorRegistryPath = actorRegistryPathFor(uuid)
// create UUID -> Array[Byte] for actor registry
- if (zkClient.exists(actorRegistryPath)) zkClient.writeData(actorRegistryPath, actorBytes) // FIXME check for size and warn if too big
+ if (zkClient.exists(actorRegistryPath)) zkClient.writeData(actorRegistryPath, actorBytes) // FIXME Store actor bytes in Data Grid not ZooKeeper
else {
zkClient.retryUntilConnected(new Callable[Either[String, Exception]]() {
def call: Either[String, Exception] = {
@@ -590,9 +668,7 @@ class DefaultClusterNode private[akka] (
(connection !! (command, remoteDaemonAckTimeout)) match {
case Some(Success) ⇒
- EventHandler.debug(this,
- "Replica for [%s] successfully created on [%s]"
- .format(actorRef.address, connection))
+ EventHandler.debug(this, "Replica for [%s] successfully created".format(actorRef.address))
case Some(Failure(cause)) ⇒
EventHandler.error(cause, this, cause.toString)
@@ -616,8 +692,9 @@ class DefaultClusterNode private[akka] (
releaseActorOnAllNodes(uuid)
locallyCheckedOutActors.remove(uuid)
+
// warning: ordering matters here
- ignore[ZkNoNodeException](zkClient.deleteRecursive(actorAddressToUuidsPathFor(actorAddressForUuid(uuid)))) // remove ADDRESS to UUID mapping
+ ignore[ZkNoNodeException](zkClient.deleteRecursive(actorAddressToUuidsPathFor(actorAddressForUuid(uuid)))) // FIXME remove ADDRESS to UUID mapping?
ignore[ZkNoNodeException](zkClient.deleteRecursive(actorAtNodePathFor(nodeAddress.nodeName, uuid)))
ignore[ZkNoNodeException](zkClient.deleteRecursive(actorRegistryPathFor(uuid)))
ignore[ZkNoNodeException](zkClient.deleteRecursive(actorLocationsPathFor(uuid)))
@@ -662,20 +739,17 @@ class DefaultClusterNode private[akka] (
* Checks out an actor for use on this node, e.g. checked out as a 'LocalActorRef' but it makes it available
* for remote access through lookup by its UUID.
*/
- def use[T <: Actor](actorAddress: String): Option[LocalActorRef] = use(actorAddress, formatForActor(actorAddress))
+ def use[T <: Actor](actorAddress: String): Option[ActorRef] = use(actorAddress, formatForActor(actorAddress))
/**
* Checks out an actor for use on this node, e.g. checked out as a 'LocalActorRef' but it makes it available
* for remote access through lookup by its UUID.
*/
- def use[T <: Actor](actorAddress: String): Option[LocalActorRef] = if (isConnected.isOn) {
+ def use[T <: Actor](actorAddress: String, format: Serializer): Option[ActorRef] = if (isConnected.isOn) {
import akka.serialization.ActorSerialization._
actorUuidsForActorAddress(actorAddress) map { uuid ⇒
- EventHandler.debug(this,
- "Checking out actor with UUID [%s] to be used on node [%s] as local actor"
- .format(uuid, nodeAddress.nodeName))
ignore[ZkNodeExistsException](zkClient.createPersistent(actorAtNodePathFor(nodeAddress.nodeName, uuid), true))
ignore[ZkNodeExistsException](zkClient.createEphemeral(actorLocationsPathFor(uuid, nodeAddress)))
@@ -697,12 +771,12 @@ class DefaultClusterNode private[akka] (
}) match {
case Left(bytes) ⇒
locallyCheckedOutActors += (uuid -> bytes)
- // FIXME switch to ReplicatedActorRef here
- // val actor = new ReplicatedActorRef(fromBinary[T](bytes, remoteServerAddress)(format))
- val actor = fromBinary[T](bytes, remoteServerAddress)
- // remoteService.register(UUID_PREFIX + uuid, actor) // FIXME is Actor.remote.register(UUID, ..) correct here?
+ val actor = fromBinary[T](bytes, remoteServerAddress)(format)
+ EventHandler.debug(this,
+ "Checking out actor [%s] to be used on node [%s] as local actor"
+ .format(actor, nodeAddress.nodeName))
actor.start()
- actor.asInstanceOf[LocalActorRef]
+ actor
case Right(exception) ⇒ throw exception
}
} headOption // FIXME should not be an array at all coming here but an Option[ActorRef]
@@ -715,14 +789,15 @@ class DefaultClusterNode private[akka] (
isConnected ifOn {
EventHandler.debug(this,
"Using (checking out) all actors with UUID [%s] on all nodes in cluster".format(uuid))
+
val command = RemoteDaemonMessageProtocol.newBuilder
.setMessageType(USE)
.setActorUuid(uuidToUuidProtocol(uuid))
.build
+
membershipNodes foreach { node ⇒
replicaConnections.get(node) foreach {
- case (_, connection) ⇒
- connection ! command
+ case (_, connection) ⇒ connection ! command
}
}
}
@@ -786,8 +861,8 @@ class DefaultClusterNode private[akka] (
def ref(actorAddress: String, router: RouterType): ActorRef = if (isConnected.isOn) {
val addresses = addressesForActor(actorAddress)
EventHandler.debug(this,
- "Checking out cluster actor ref with address [%s] and router [%s] connected to [\n\t%s]"
- .format(actorAddress, router, addresses.mkString("\n\t")))
+ "Checking out cluster actor ref with address [%s] and router [%s] on [%s] connected to [\n\t%s]"
+ .format(actorAddress, router, remoteServerAddress, addresses.map(_._2).mkString("\n\t")))
val actorRef = Router newRouter (router, addresses, actorAddress, Actor.TIMEOUT)
addresses foreach { case (_, address) ⇒ clusterActorRefs.put(address, actorRef) }
@@ -1230,7 +1305,7 @@ class DefaultClusterNode private[akka] (
homeAddress.setAccessible(true)
homeAddress.set(actor, Some(remoteServerAddress))
- remoteService.register(uuid, actor) // FIXME is Actor.remote.register(UUID, ..) correct here?
+ remoteService.register(actorAddress, actor)
}
}
@@ -1473,8 +1548,6 @@ object RemoteClusterDaemon {
val computeGridDispatcher = Dispatchers.newDispatcher("akka:cloud:cluster:compute-grid").build
}
-// FIXME supervise RemoteClusterDaemon
-
/**
* @author Jonas Bonér
*/
diff --git a/akka-cluster/src/main/scala/akka/cluster/ClusterActorRef.scala b/akka-cluster/src/main/scala/akka/cluster/ClusterActorRef.scala
index f107904892..40b3dbc133 100644
--- a/akka-cluster/src/main/scala/akka/cluster/ClusterActorRef.scala
+++ b/akka-cluster/src/main/scala/akka/cluster/ClusterActorRef.scala
@@ -6,12 +6,16 @@ package akka.cluster
import Cluster._
import akka.actor._
-import akka.actor.Actor._
+import Actor._
+import akka.dispatch._
+import akka.util._
+import ReflectiveAccess._
+import ClusterModule._
import akka.event.EventHandler
-import akka.dispatch.Promise
import java.net.InetSocketAddress
import java.util.concurrent.atomic.AtomicReference
+import java.util.{ Map ⇒ JMap }
import com.eaio.uuid.UUID
@@ -20,21 +24,20 @@ import com.eaio.uuid.UUID
*/
class ClusterActorRef private[akka] (
inetSocketAddresses: Array[Tuple2[UUID, InetSocketAddress]],
- actorAddress: String,
- timeout: Long,
- val replicationStrategy: ReplicationStrategy)
- extends RemoteActorRef(null, actorAddress, timeout, None) { // FIXME UGLY HACK - should not extend RemoteActorRef
- this: ClusterActorRef with Router.Router ⇒
+ val address: String,
+ _timeout: Long)
+ extends ActorRef with ScalaActorRef { this: Router.Router ⇒
- EventHandler.debug(this,
- "Creating a ClusterActorRef for actor with address [%s] with connections [\n\t%s]"
- .format(actorAddress, inetSocketAddresses.mkString("\n\t")))
+ timeout = _timeout
private[akka] val inetSocketAddressToActorRefMap = new AtomicReference[Map[InetSocketAddress, ActorRef]](
(Map[InetSocketAddress, ActorRef]() /: inetSocketAddresses) {
- case (map, (uuid, inetSocketAddress)) ⇒ map + (inetSocketAddress -> createRemoteActorRef(actorAddress, inetSocketAddress))
+ case (map, (uuid, inetSocketAddress)) ⇒ map + (inetSocketAddress -> createRemoteActorRef(address, inetSocketAddress))
})
+ ClusterModule.ensureEnabled()
+ start()
+
def connections: Map[InetSocketAddress, ActorRef] = inetSocketAddressToActorRefMap.get
override def postMessageToMailbox(message: Any, senderOption: Option[ActorRef]): Unit =
@@ -60,4 +63,53 @@ class ClusterActorRef private[akka] (
private def createRemoteActorRef(actorAddress: String, inetSocketAddress: InetSocketAddress) = {
RemoteActorRef(inetSocketAddress, actorAddress, Actor.TIMEOUT, None)
}
+
+ def start(): ActorRef = synchronized {
+ _status = ActorRefInternals.RUNNING
+ this
+ }
+
+ def stop() {
+ synchronized {
+ if (_status == ActorRefInternals.RUNNING) {
+ _status = ActorRefInternals.SHUTDOWN
+ postMessageToMailbox(RemoteActorSystemMessage.Stop, None)
+ }
+ }
+ }
+
+ // ==== NOT SUPPORTED ====
+ // FIXME move these methods and the same ones in RemoteActorRef to a base class - now duplicated
+ def dispatcher_=(md: MessageDispatcher) {
+ unsupported
+ }
+ def dispatcher: MessageDispatcher = unsupported
+ def link(actorRef: ActorRef) {
+ unsupported
+ }
+ def unlink(actorRef: ActorRef) {
+ unsupported
+ }
+ def startLink(actorRef: ActorRef): ActorRef = unsupported
+ def supervisor: Option[ActorRef] = unsupported
+ def linkedActors: JMap[Uuid, ActorRef] = unsupported
+ protected[akka] def mailbox: AnyRef = unsupported
+ protected[akka] def mailbox_=(value: AnyRef): AnyRef = unsupported
+ protected[akka] def handleTrapExit(dead: ActorRef, reason: Throwable) {
+ unsupported
+ }
+ protected[akka] def restart(reason: Throwable, maxNrOfRetries: Option[Int], withinTimeRange: Option[Int]) {
+ unsupported
+ }
+ protected[akka] def restartLinkedActors(reason: Throwable, maxNrOfRetries: Option[Int], withinTimeRange: Option[Int]) {
+ unsupported
+ }
+ protected[akka] def invoke(messageHandle: MessageInvocation) {
+ unsupported
+ }
+ protected[akka] def supervisor_=(sup: Option[ActorRef]) {
+ unsupported
+ }
+ protected[akka] def actorInstance: AtomicReference[Actor] = unsupported
+ private def unsupported = throw new UnsupportedOperationException("Not supported for RemoteActorRef")
}
diff --git a/akka-cluster/src/main/scala/akka/cluster/ClusterDeployer.scala b/akka-cluster/src/main/scala/akka/cluster/ClusterDeployer.scala
index 19b89628a1..070a52d96b 100644
--- a/akka-cluster/src/main/scala/akka/cluster/ClusterDeployer.scala
+++ b/akka-cluster/src/main/scala/akka/cluster/ClusterDeployer.scala
@@ -27,7 +27,7 @@ import java.util.concurrent.atomic.AtomicReference
/**
* A ClusterDeployer is responsible for deploying a Deploy.
*
- * big question is: what does Deploy mean?
+ * FIXME Document: what does Deploy mean?
*
* @author Jonas Bonér
*/
@@ -35,11 +35,16 @@ object ClusterDeployer {
val clusterName = Cluster.name
val nodeName = Config.nodename
val clusterPath = "/%s" format clusterName
- val clusterDeploymentLockPath = clusterPath + "/deployment-lock"
+
val deploymentPath = clusterPath + "/deployment"
- val baseNodes = List(clusterPath, clusterDeploymentLockPath, deploymentPath)
val deploymentAddressPath = deploymentPath + "/%s"
+ val deploymentCoordinationPath = clusterPath + "/deployment-coordination"
+ val deploymentInProgressLockPath = deploymentCoordinationPath + "/in-progress"
+ val isDeploymentCompletedInClusterLockPath = deploymentCoordinationPath + "/completed" // should not be part of baseNodes
+
+ val baseNodes = List(clusterPath, deploymentPath, deploymentCoordinationPath, deploymentInProgressLockPath)
+
private val isConnected = new Switch(false)
private val deploymentCompleted = new CountDownLatch(1)
@@ -49,7 +54,7 @@ object ClusterDeployer {
Cluster.connectionTimeout,
Cluster.defaultSerializer)
- private val clusterDeploymentLockListener = new LockListener {
+ private val deploymentInProgressLockListener = new LockListener {
def lockAcquired() {
EventHandler.debug(this, "Clustered deployment started")
}
@@ -60,13 +65,11 @@ object ClusterDeployer {
}
}
- private val deploymentLock = new WriteLock(
- zkClient.connection.getZookeeper, clusterDeploymentLockPath, null, clusterDeploymentLockListener) {
- private val ownerIdField = classOf[WriteLock].getDeclaredField("ownerId")
- ownerIdField.setAccessible(true)
-
- def leader: String = ownerIdField.get(this).asInstanceOf[String]
- }
+ private val deploymentInProgressLock = new WriteLock(
+ zkClient.connection.getZookeeper,
+ deploymentInProgressLockPath,
+ null,
+ deploymentInProgressLockListener)
private val systemDeployments: List[Deploy] = Nil
@@ -79,6 +82,7 @@ object ClusterDeployer {
deployment ← zkClient.readData(deploymentAddressPath.format(child)).asInstanceOf[Deploy]
} zkClient.delete(deploymentAddressPath.format(deployment.address))
+ invalidateDeploymentInCluster()
} catch {
case e: Exception ⇒
handleError(new DeploymentException("Could not undeploy all deployment data in ZooKeeper due to: " + e))
@@ -124,8 +128,6 @@ object ClusterDeployer {
}
private[akka] def init(deployments: List[Deploy]) {
- println("===============================================================")
- println("------------ INIT 1")
isConnected switchOn {
EventHandler.info(this, "Initializing cluster deployer")
@@ -141,31 +143,21 @@ object ClusterDeployer {
}
}
- println("------------ INIT 2")
val allDeployments = deployments ::: systemDeployments
- ///===========================================================
- // FIXME need a flag 'deploymentDone' in ZK and to wrap the deployment in 'if (!deploymentDone) { .. }', since now the deployment is only protected by lock during the actual deployment, if node comes in later then deployment is repeated on that node again
- ///===========================================================
+ if (!isDeploymentCompletedInCluster) {
+ if (deploymentInProgressLock.lock()) {
+ // try to be the one doing the clustered deployment
+ EventHandler.info(this, "Deploying to cluster [\n" + allDeployments.mkString("\n\t") + "\n]")
+ allDeployments foreach (deploy(_)) // deploy
+ markDeploymentCompletedInCluster()
+ deploymentInProgressLock.unlock() // signal deployment complete
- if (deploymentLock.lock()) {
- println("------------ INIT 3")
- // try to be the one doing the clustered deployment
- EventHandler.info(this, "Deploying to cluster [\n" + allDeployments.mkString("\n\t") + "\n]")
-
- println("------------ INIT 4")
- allDeployments foreach (deploy(_)) // deploy
- println("------------ INIT 5")
-
- // FIXME need to set deployment done flag
-
- deploymentLock.unlock() // signal deployment complete
- } else {
- println("------------ INIT WAITING")
- deploymentCompleted.await() // wait until deployment is completed by other "master" node
+ } else {
+ deploymentCompleted.await() // wait until deployment is completed by other "master" node
+ }
}
- println("------------ INIT 6")
// fetch clustered deployments and deploy them locally
fetchDeploymentsFromCluster foreach (LocalDeployer.deploy(_))
}
@@ -183,14 +175,29 @@ object ClusterDeployer {
zkClient.writeData(path, deployment)
} catch {
case e: NullPointerException ⇒
- handleError(new DeploymentException("Could not store deployment data [" + deployment + "] in ZooKeeper since client session is closed"))
+ handleError(new DeploymentException(
+ "Could not store deployment data [" + deployment +
+ "] in ZooKeeper since client session is closed"))
case e: Exception ⇒
- handleError(new DeploymentException("Could not store deployment data [" + deployment + "] in ZooKeeper due to: " + e))
+ handleError(new DeploymentException(
+ "Could not store deployment data [" +
+ deployment + "] in ZooKeeper due to: " + e))
}
}
}
}
+ private def markDeploymentCompletedInCluster() {
+ ignore[ZkNodeExistsException](zkClient.create(isDeploymentCompletedInClusterLockPath, null, CreateMode.PERSISTENT))
+ }
+
+ private def isDeploymentCompletedInCluster = zkClient.exists(isDeploymentCompletedInClusterLockPath)
+
+ // FIXME in future - add watch to this path to be able to trigger redeployment, and use this method to trigger redeployment
+ private def invalidateDeploymentInCluster() {
+ ignore[ZkNoNodeException](zkClient.delete(isDeploymentCompletedInClusterLockPath))
+ }
+
private def ensureRunning[T](body: ⇒ T): T = {
if (isConnected.isOn) body
else throw new IllegalStateException("ClusterDeployer is not running")
diff --git a/akka-cluster/src/main/scala/akka/cluster/ReplicatedClusterRef.scala b/akka-cluster/src/main/scala/akka/cluster/ReplicatedClusterRef.scala
deleted file mode 100644
index 4b075c7f91..0000000000
--- a/akka-cluster/src/main/scala/akka/cluster/ReplicatedClusterRef.scala
+++ /dev/null
@@ -1,105 +0,0 @@
-package akka.cluster
-
-/**
- * Copyright (C) 2009-2011 Scalable Solutions AB
- */
-import Cluster._
-
-import akka.actor._
-import akka.remote.MessageSerializer
-import akka.event.EventHandler
-import akka.config.Supervision._
-import akka.dispatch._
-
-import java.net.InetSocketAddress
-import java.util.concurrent.atomic.AtomicReference
-import java.util.{ Map ⇒ JMap }
-
-/**
- * @author Jonas Bonér
- */
-trait Replicable { this: Actor ⇒
-}
-
-/**
- * @author Jonas Bonér
- */
-sealed trait ReplicationStrategy
-
-object ReplicationStrategy {
- case object Transient extends ReplicationStrategy
- case object WriteThrough extends ReplicationStrategy
- case object WriteBehind extends ReplicationStrategy
-}
-
-/**
- * @author Jonas Bonér
- */
-class ReplicatedActorRef private[akka] (actorRef: ActorRef, val address: String) extends ActorRef with ScalaActorRef {
-
- private lazy val txLog = {
- EventHandler.debug(this, "Creating a ReplicatedActorRef for Actor [%s]".format(address))
- TransactionLog.newLogFor(uuid.toString)
- }
-
- def invoke(messageHandle: MessageInvocation) {
- actorRef.invoke(messageHandle)
- txLog.recordEntry(MessageSerializer.serialize(messageHandle.message).toByteArray)
- }
-
- def start(): ActorRef = {
- EventHandler.debug(this, "Starting ReplicatedActorRef for Actor [%s] with transaction log [%s]"
- .format(address, txLog.logId))
- actorRef.start()
- }
-
- def stop() {
- txLog.delete()
- actorRef.stop()
- }
-
- override def setFaultHandler(handler: FaultHandlingStrategy) {
- actorRef.setFaultHandler(handler)
- }
- override def getFaultHandler: FaultHandlingStrategy = actorRef.getFaultHandler()
- override def setLifeCycle(lifeCycle: LifeCycle) {
- actorRef.setLifeCycle(lifeCycle)
- }
- override def getLifeCycle: LifeCycle = actorRef.getLifeCycle
- def dispatcher_=(md: MessageDispatcher) {
- actorRef.dispatcher_=(md)
- }
- def dispatcher: MessageDispatcher = actorRef.dispatcher
- def link(actorRef: ActorRef) {
- actorRef.link(actorRef)
- }
- def unlink(actorRef: ActorRef) {
- actorRef.unlink(actorRef)
- }
- def startLink(actorRef: ActorRef): ActorRef = actorRef.startLink(actorRef)
- def supervisor: Option[ActorRef] = actorRef.supervisor
- def linkedActors: JMap[Uuid, ActorRef] = actorRef.linkedActors
- protected[akka] def postMessageToMailbox(message: Any, senderOption: Option[ActorRef]) {
- actorRef.postMessageToMailbox(message, senderOption)
- }
- protected[akka] def postMessageToMailboxAndCreateFutureResultWithTimeout[T](
- message: Any,
- timeout: Long,
- senderOption: Option[ActorRef],
- senderFuture: Option[Promise[T]]): Promise[T] = actorRef.postMessageToMailboxAndCreateFutureResultWithTimeout(message, timeout, senderOption, senderFuture)
- protected[akka] def actorInstance: AtomicReference[Actor] = actorRef.actorInstance
- protected[akka] def supervisor_=(sup: Option[ActorRef]) {
- actorRef.supervisor_=(sup)
- }
- protected[akka] def mailbox: AnyRef = actorRef.mailbox
- protected[akka] def mailbox_=(value: AnyRef): AnyRef = actorRef.mailbox_=(value)
- protected[akka] def handleTrapExit(dead: ActorRef, reason: Throwable) {
- actorRef.handleTrapExit(dead, reason)
- }
- protected[akka] def restart(reason: Throwable, maxNrOfRetries: Option[Int], withinTimeRange: Option[Int]) {
- actorRef.restart(reason, maxNrOfRetries, withinTimeRange)
- }
- protected[akka] def restartLinkedActors(reason: Throwable, maxNrOfRetries: Option[Int], withinTimeRange: Option[Int]) {
- actorRef.restartLinkedActors(reason, maxNrOfRetries, withinTimeRange)
- }
-}
diff --git a/akka-cluster/src/main/scala/akka/cluster/Routing.scala b/akka-cluster/src/main/scala/akka/cluster/Routing.scala
index 1bde759ca6..d3bc4904f7 100644
--- a/akka-cluster/src/main/scala/akka/cluster/Routing.scala
+++ b/akka-cluster/src/main/scala/akka/cluster/Routing.scala
@@ -27,12 +27,11 @@ object Router {
routerType: RouterType,
inetSocketAddresses: Array[Tuple2[UUID, InetSocketAddress]],
actorAddress: String,
- timeout: Long,
- replicationStrategy: ReplicationStrategy = ReplicationStrategy.WriteThrough): ClusterActorRef = {
+ timeout: Long): ClusterActorRef = {
routerType match {
- case Direct ⇒ new ClusterActorRef(inetSocketAddresses, actorAddress, timeout, replicationStrategy) with Direct
- case Random ⇒ new ClusterActorRef(inetSocketAddresses, actorAddress, timeout, replicationStrategy) with Random
- case RoundRobin ⇒ new ClusterActorRef(inetSocketAddresses, actorAddress, timeout, replicationStrategy) with RoundRobin
+ case Direct ⇒ new ClusterActorRef(inetSocketAddresses, actorAddress, timeout) with Direct
+ case Random ⇒ new ClusterActorRef(inetSocketAddresses, actorAddress, timeout) with Random
+ case RoundRobin ⇒ new ClusterActorRef(inetSocketAddresses, actorAddress, timeout) with RoundRobin
case LeastCPU ⇒ sys.error("Router LeastCPU not supported yet")
case LeastRAM ⇒ sys.error("Router LeastRAM not supported yet")
case LeastMessages ⇒ sys.error("Router LeastMessages not supported yet")
diff --git a/akka-cluster/src/main/scala/akka/cluster/TransactionLog.scala b/akka-cluster/src/main/scala/akka/cluster/TransactionLog.scala
index f5c96250b4..281d2f91e5 100644
--- a/akka-cluster/src/main/scala/akka/cluster/TransactionLog.scala
+++ b/akka-cluster/src/main/scala/akka/cluster/TransactionLog.scala
@@ -9,16 +9,23 @@ import org.apache.zookeeper.CreateMode
import org.I0Itec.zkclient.exception._
+import akka.AkkaException
import akka.config._
import Config._
import akka.util._
+import akka.actor._
+import DeploymentConfig.{ ReplicationScheme, ReplicationStrategy, Transient, WriteThrough, WriteBehind }
import akka.event.EventHandler
-import akka.dispatch.{ DefaultPromise, Promise }
-import akka.AkkaException
-
+import akka.dispatch.{ DefaultPromise, Promise, MessageInvocation }
+import akka.remote.MessageSerializer
+import akka.serialization.ActorSerialization._
import akka.cluster.zookeeper._
+import akka.serialization.{ Serializer, Compression }
+import Compression.LZF
+import akka.serialization.ActorSerialization._
import java.util.Enumeration
+import java.util.concurrent.atomic.AtomicLong
// FIXME allow user to choose dynamically between 'async' and 'sync' tx logging (asyncAddEntry(byte[] data, AddCallback cb, Object ctx))
// FIXME clean up old entries in log after doing a snapshot
@@ -41,25 +48,47 @@ class ReplicationException(message: String) extends AkkaException(message)
*
* @author Jonas Bonér
*/
-class TransactionLog private (ledger: LedgerHandle, val id: String, val isAsync: Boolean) {
+class TransactionLog private (
+ ledger: LedgerHandle,
+ val id: String,
+ val isAsync: Boolean,
+ replicationScheme: ReplicationScheme,
+ format: Serializer) {
import TransactionLog._
val logId = ledger.getId
val txLogPath = transactionLogNode + "/" + id
val snapshotPath = txLogPath + "/snapshot"
+ val nrOfEntries = new AtomicLong(0)
private val isOpen = new Switch(true)
+ /**
+ * TODO document method
+ */
+ def recordEntry(messageHandle: MessageInvocation, actorRef: ActorRef) {
+ if (nrOfEntries.incrementAndGet % snapshotFrequency == 0) {
+ val snapshot =
+ // FIXME ReplicationStrategy Transient is always used
+ if (Cluster.shouldCompressData) LZF.compress(toBinary(actorRef, false, replicationScheme)(format))
+ else toBinary(actorRef, false, replicationScheme)(format)
+ recordSnapshot(snapshot)
+ }
+ recordEntry(MessageSerializer.serialize(messageHandle.message).toByteArray)
+ }
+
/**
* TODO document method
*/
def recordEntry(entry: Array[Byte]) {
if (isOpen.isOn) {
+ val bytes = if (Cluster.shouldCompressData) LZF.compress(entry)
+ else entry
try {
if (isAsync) {
ledger.asyncAddEntry(
- entry,
+ bytes,
new AsyncCallback.AddCallback {
def addComplete(
returnCode: Int,
@@ -73,7 +102,7 @@ class TransactionLog private (ledger: LedgerHandle, val id: String, val isAsync:
},
null)
} else {
- handleReturnCode(ledger.addEntry(entry))
+ handleReturnCode(ledger.addEntry(bytes))
val entryId = ledger.getLastAddPushed
EventHandler.debug(this, "Writing entry [%s] to log [%s]".format(entryId, logId))
}
@@ -88,10 +117,12 @@ class TransactionLog private (ledger: LedgerHandle, val id: String, val isAsync:
*/
def recordSnapshot(snapshot: Array[Byte]) {
if (isOpen.isOn) {
+ val bytes = if (Cluster.shouldCompressData) LZF.compress(snapshot)
+ else snapshot
try {
if (isAsync) {
ledger.asyncAddEntry(
- snapshot,
+ bytes,
new AsyncCallback.AddCallback {
def addComplete(
returnCode: Int,
@@ -104,7 +135,7 @@ class TransactionLog private (ledger: LedgerHandle, val id: String, val isAsync:
},
null)
} else {
- handleReturnCode(ledger.addEntry(snapshot))
+ handleReturnCode(ledger.addEntry(bytes))
storeSnapshotMetaDataInZooKeeper(ledger.getLastAddPushed)
}
} catch {
@@ -121,7 +152,7 @@ class TransactionLog private (ledger: LedgerHandle, val id: String, val isAsync:
/**
* TODO document method
*/
- def entriesFromLatestSnapshot: Tuple2[Array[Byte], Vector[Array[Byte]]] = {
+ def toByteArraysLatestSnapshot: (Array[Byte], Vector[Array[Byte]]) = {
val snapshotId = latestSnapshotId
EventHandler.debug(this,
"Reading entries from snapshot id [%s] for log [%s]".format(snapshotId, logId))
@@ -133,9 +164,9 @@ class TransactionLog private (ledger: LedgerHandle, val id: String, val isAsync:
*/
def entriesInRange(from: Long, to: Long): Vector[Array[Byte]] = if (isOpen.isOn) {
try {
- if (from < 0) throw new IllegalArgumentException("'from' can't be negative [" + from + "]")
- if (to < 0) throw new IllegalArgumentException("'to' can't be negative [" + from + "]")
- if (to < from) throw new IllegalArgumentException("'to' can't be smaller than 'from' [" + from + "," + to + "]")
+ if (from < 0) throw new IllegalArgumentException("'from' index can't be negative [" + from + "]")
+ if (to < 0) throw new IllegalArgumentException("'to' index can't be negative [" + from + "]")
+ if (to < from) throw new IllegalArgumentException("'to' index can't be smaller than 'from' index [" + from + "," + to + "]")
EventHandler.debug(this,
"Reading entries [%s -> %s] for log [%s]".format(from, to, logId))
@@ -150,10 +181,7 @@ class TransactionLog private (ledger: LedgerHandle, val id: String, val isAsync:
enumeration: Enumeration[LedgerEntry],
ctx: AnyRef) {
val future = ctx.asInstanceOf[Promise[Vector[Array[Byte]]]]
- var entries = Vector[Array[Byte]]()
- while (enumeration.hasMoreElements) {
- entries = entries :+ enumeration.nextElement.getEntry
- }
+ val entries = toByteArrays(enumeration)
if (returnCode == BKException.Code.OK) future.completeWithResult(entries)
else future.completeWithException(BKException.create(returnCode))
}
@@ -161,12 +189,7 @@ class TransactionLog private (ledger: LedgerHandle, val id: String, val isAsync:
future)
await(future)
} else {
- val enumeration = ledger.readEntries(from, to)
- var entries = Vector[Array[Byte]]()
- while (enumeration.hasMoreElements) {
- entries = entries :+ enumeration.nextElement.getEntry
- }
- entries
+ toByteArrays(ledger.readEntries(from, to))
}
} catch {
case e ⇒ handleError(e)
@@ -190,8 +213,7 @@ class TransactionLog private (ledger: LedgerHandle, val id: String, val isAsync:
} catch {
case e: ZkNoNodeException ⇒
handleError(new ReplicationException(
- "Transaction log for UUID [" + id +
- "] does not have a snapshot recorded in ZooKeeper"))
+ "Transaction log for UUID [" + id + "] does not have a snapshot recorded in ZooKeeper"))
case e ⇒ handleError(e)
}
}
@@ -208,7 +230,7 @@ class TransactionLog private (ledger: LedgerHandle, val id: String, val isAsync:
logId,
new AsyncCallback.DeleteCallback {
def deleteComplete(returnCode: Int, ctx: AnyRef) {
- handleReturnCode(returnCode)
+ (returnCode)
}
},
null)
@@ -248,6 +270,18 @@ class TransactionLog private (ledger: LedgerHandle, val id: String, val isAsync:
}
}
+ private def toByteArrays(enumeration: Enumeration[LedgerEntry]): Vector[Array[Byte]] = {
+ var entries = Vector[Array[Byte]]()
+ while (enumeration.hasMoreElements) {
+ val bytes = enumeration.nextElement.getEntry
+ val entry =
+ if (Cluster.shouldCompressData) LZF.uncompress(bytes)
+ else bytes
+ entries = entries :+ entry
+ }
+ entries
+ }
+
private def storeSnapshotMetaDataInZooKeeper(snapshotId: Long) {
if (isOpen.isOn) {
try {
@@ -265,8 +299,7 @@ class TransactionLog private (ledger: LedgerHandle, val id: String, val isAsync:
"Could not store transaction log snapshot meta-data in ZooKeeper for UUID [" +
id + "]"))
}
- EventHandler.debug(this,
- "Writing snapshot [%s] to log [%s]".format(snapshotId, logId))
+ EventHandler.debug(this, "Writing snapshot [%s] to log [%s]".format(snapshotId, logId))
} else transactionClosedError
}
@@ -292,12 +325,13 @@ object TransactionLog {
case "CRC32" ⇒ BookKeeper.DigestType.CRC32
case "MAC" ⇒ BookKeeper.DigestType.MAC
case unknown ⇒ throw new ConfigurationException(
- "akka.cluster.replication.digest-type is invalid [" + unknown + "]")
+ "akka.cluster.replication.digest-type is invalid [" + unknown + "], must be either 'CRC32' or 'MAC'")
}
val password = config.getString("akka.cluster.replication.password", "secret").getBytes("UTF-8")
val ensembleSize = config.getInt("akka.cluster.replication.ensemble-size", 3)
val quorumSize = config.getInt("akka.cluster.replication.quorum-size", 2)
- val timeout = 5000 // FIXME make configurable
+ val snapshotFrequency = config.getInt("akka.cluster.replication.snapshot-frequency", 1000)
+ val timeout = Duration(config.getInt("akka.cluster.replication.timeout", 30), TIME_UNIT).toMillis
private[akka] val transactionLogNode = "/transaction-log-ids"
@@ -333,8 +367,13 @@ object TransactionLog {
(bk, zk)
}
- private[akka] def apply(ledger: LedgerHandle, id: String, isAsync: Boolean = false) =
- new TransactionLog(ledger, id, isAsync)
+ private[akka] def apply(
+ ledger: LedgerHandle,
+ id: String,
+ isAsync: Boolean,
+ replicationScheme: ReplicationScheme,
+ format: Serializer) =
+ new TransactionLog(ledger, id, isAsync, replicationScheme, format)
/**
* Shuts down the transaction log.
@@ -355,7 +394,12 @@ object TransactionLog {
/**
* TODO document method
*/
- def newLogFor(id: String, isAsync: Boolean = false): TransactionLog = {
+ def newLogFor(
+ id: String,
+ isAsync: Boolean,
+ replicationScheme: ReplicationScheme,
+ format: Serializer): TransactionLog = {
+
val txLogPath = transactionLogNode + "/" + id
val ledger = try {
@@ -399,13 +443,18 @@ object TransactionLog {
}
EventHandler.info(this, "Created new transaction log [%s] for UUID [%s]".format(logId, id))
- TransactionLog(ledger, id, isAsync)
+ TransactionLog(ledger, id, isAsync, replicationScheme, format)
}
/**
* TODO document method
*/
- def logFor(id: String, isAsync: Boolean = false): TransactionLog = {
+ def logFor(
+ id: String,
+ isAsync: Boolean,
+ replicationScheme: ReplicationScheme,
+ format: Serializer): TransactionLog = {
+
val txLogPath = transactionLogNode + "/" + id
val logId = try {
@@ -444,7 +493,7 @@ object TransactionLog {
case e ⇒ handleError(e)
}
- TransactionLog(ledger, id, isAsync)
+ TransactionLog(ledger, id, isAsync, replicationScheme, format)
}
private[akka] def await[T](future: Promise[T]): T = {
@@ -489,15 +538,10 @@ object LocalBookKeeperEnsemble {
def shutdown() {
isRunning switchOff {
EventHandler.info(this, "Shutting down LocalBookKeeperEnsemble...")
- println("***************************** 1")
localBookKeeper.bs.foreach(_.shutdown()) // stop bookies
- println("***************************** 2")
localBookKeeper.zkc.close() // stop zk client
- println("***************************** 3")
localBookKeeper.zks.shutdown() // stop zk server
- println("***************************** 4")
localBookKeeper.serverFactory.shutdown() // stop zk NIOServer
- println("***************************** 5")
EventHandler.info(this, "LocalBookKeeperEnsemble shut down successfully")
}
}
diff --git a/akka-cluster/src/test/scala/akka/cluster/ReplicationSpec.scala b/akka-cluster/src/test/scala/akka/cluster/ReplicationSpec.scala
index 42423a3afc..79b706c9ea 100644
--- a/akka-cluster/src/test/scala/akka/cluster/ReplicationSpec.scala
+++ b/akka-cluster/src/test/scala/akka/cluster/ReplicationSpec.scala
@@ -32,31 +32,31 @@ class ReplicationSpec extends WordSpec with MustMatchers with BeforeAndAfterAll
"A Transaction Log" should {
"be able to record entries - synchronous" in {
val uuid = (new UUID).toString
- val txlog = TransactionLog.newLogFor(uuid)
+ val txlog = TransactionLog.newLogFor(uuid, false, null, Format.Default)
val entry = "hello".getBytes("UTF-8")
txlog.recordEntry(entry)
}
"be able to record and delete entries - synchronous" in {
val uuid = (new UUID).toString
- val txlog1 = TransactionLog.newLogFor(uuid)
+ val txlog1 = TransactionLog.newLogFor(uuid, false, null, Format.Default)
val entry = "hello".getBytes("UTF-8")
txlog1.recordEntry(entry)
txlog1.recordEntry(entry)
txlog1.delete
txlog1.close
- intercept[BKNoSuchLedgerExistsException](TransactionLog.logFor(uuid))
+ intercept[BKNoSuchLedgerExistsException](TransactionLog.logFor(uuid, false, null, Format.Default))
}
"be able to record entries and read entries with 'entriesInRange' - synchronous" in {
val uuid = (new UUID).toString
- val txlog1 = TransactionLog.newLogFor(uuid)
+ val txlog1 = TransactionLog.newLogFor(uuid, false, null, Format.Default)
val entry = "hello".getBytes("UTF-8")
txlog1.recordEntry(entry)
txlog1.recordEntry(entry)
txlog1.close
- val txlog2 = TransactionLog.logFor(uuid)
+ val txlog2 = TransactionLog.logFor(uuid, false, null, Format.Default)
val entries = txlog2.entriesInRange(0, 1).map(bytes ⇒ new String(bytes, "UTF-8"))
entries.size must equal(2)
entries(0) must equal("hello")
@@ -66,7 +66,7 @@ class ReplicationSpec extends WordSpec with MustMatchers with BeforeAndAfterAll
"be able to record entries and read entries with 'entries' - synchronous" in {
val uuid = (new UUID).toString
- val txlog1 = TransactionLog.newLogFor(uuid)
+ val txlog1 = TransactionLog.newLogFor(uuid, false, null, Format.Default)
val entry = "hello".getBytes("UTF-8")
txlog1.recordEntry(entry)
txlog1.recordEntry(entry)
@@ -74,7 +74,7 @@ class ReplicationSpec extends WordSpec with MustMatchers with BeforeAndAfterAll
txlog1.recordEntry(entry)
txlog1.close
- val txlog2 = TransactionLog.logFor(uuid)
+ val txlog2 = TransactionLog.logFor(uuid, false, null, Format.Default)
val entries = txlog2.entries.map(bytes ⇒ new String(bytes, "UTF-8"))
entries.size must equal(4)
entries(0) must equal("hello")
@@ -86,7 +86,7 @@ class ReplicationSpec extends WordSpec with MustMatchers with BeforeAndAfterAll
"be able to record a snapshot - synchronous" in {
val uuid = (new UUID).toString
- val txlog1 = TransactionLog.newLogFor(uuid)
+ val txlog1 = TransactionLog.newLogFor(uuid, false, null, Format.Default)
val snapshot = "snapshot".getBytes("UTF-8")
txlog1.recordSnapshot(snapshot)
txlog1.close
@@ -94,7 +94,7 @@ class ReplicationSpec extends WordSpec with MustMatchers with BeforeAndAfterAll
"be able to record and read a snapshot and following entries - synchronous" in {
val uuid = (new UUID).toString
- val txlog1 = TransactionLog.newLogFor(uuid)
+ val txlog1 = TransactionLog.newLogFor(uuid, false, null, Format.Default)
val snapshot = "snapshot".getBytes("UTF-8")
txlog1.recordSnapshot(snapshot)
@@ -105,8 +105,8 @@ class ReplicationSpec extends WordSpec with MustMatchers with BeforeAndAfterAll
txlog1.recordEntry(entry)
txlog1.close
- val txlog2 = TransactionLog.logFor(uuid)
- val (snapshotAsBytes, entriesAsBytes) = txlog2.entriesFromLatestSnapshot
+ val txlog2 = TransactionLog.logFor(uuid, false, null, Format.Default)
+ val (snapshotAsBytes, entriesAsBytes) = txlog2.toByteArraysLatestSnapshot
new String(snapshotAsBytes, "UTF-8") must equal("snapshot")
val entries = entriesAsBytes.map(bytes ⇒ new String(bytes, "UTF-8"))
@@ -120,7 +120,7 @@ class ReplicationSpec extends WordSpec with MustMatchers with BeforeAndAfterAll
"be able to record entries then a snapshot then more entries - and then read from the snapshot and the following entries - synchronous" in {
val uuid = (new UUID).toString
- val txlog1 = TransactionLog.newLogFor(uuid)
+ val txlog1 = TransactionLog.newLogFor(uuid, false, null, Format.Default)
val entry = "hello".getBytes("UTF-8")
txlog1.recordEntry(entry)
@@ -134,8 +134,8 @@ class ReplicationSpec extends WordSpec with MustMatchers with BeforeAndAfterAll
txlog1.recordEntry(entry)
txlog1.close
- val txlog2 = TransactionLog.logFor(uuid)
- val (snapshotAsBytes, entriesAsBytes) = txlog2.entriesFromLatestSnapshot
+ val txlog2 = TransactionLog.logFor(uuid, false, null, Format.Default)
+ val (snapshotAsBytes, entriesAsBytes) = txlog2.toByteArraysLatestSnapshot
new String(snapshotAsBytes, "UTF-8") must equal("snapshot")
val entries = entriesAsBytes.map(bytes ⇒ new String(bytes, "UTF-8"))
@@ -149,7 +149,7 @@ class ReplicationSpec extends WordSpec with MustMatchers with BeforeAndAfterAll
"A Transaction Log" should {
"be able to record entries - asynchronous" in {
val uuid = (new UUID).toString
- val txlog = TransactionLog.newLogFor(uuid, true)
+ val txlog = TransactionLog.newLogFor(uuid, true, null, Format.Default)
val entry = "hello".getBytes("UTF-8")
txlog.recordEntry(entry)
Thread.sleep(100)
@@ -158,24 +158,24 @@ class ReplicationSpec extends WordSpec with MustMatchers with BeforeAndAfterAll
"be able to record and delete entries - asynchronous" in {
val uuid = (new UUID).toString
- val txlog1 = TransactionLog.newLogFor(uuid, true)
+ val txlog1 = TransactionLog.newLogFor(uuid, true, null, Format.Default)
val entry = "hello".getBytes("UTF-8")
txlog1.recordEntry(entry)
txlog1.recordEntry(entry)
txlog1.delete
Thread.sleep(100)
- intercept[BKNoSuchLedgerExistsException](TransactionLog.logFor(uuid, true))
+ intercept[BKNoSuchLedgerExistsException](TransactionLog.logFor(uuid, true, null, Format.Default))
}
"be able to record entries and read entries with 'entriesInRange' - asynchronous" in {
val uuid = (new UUID).toString
- val txlog1 = TransactionLog.newLogFor(uuid, true)
+ val txlog1 = TransactionLog.newLogFor(uuid, true, null, Format.Default)
val entry = "hello".getBytes("UTF-8")
txlog1.recordEntry(entry)
txlog1.recordEntry(entry)
Thread.sleep(100)
txlog1.close
- val txlog2 = TransactionLog.logFor(uuid, true)
+ val txlog2 = TransactionLog.logFor(uuid, true, null, Format.Default)
val entries = txlog2.entriesInRange(0, 1).map(bytes ⇒ new String(bytes, "UTF-8"))
entries.size must equal(2)
entries(0) must equal("hello")
@@ -186,7 +186,7 @@ class ReplicationSpec extends WordSpec with MustMatchers with BeforeAndAfterAll
"be able to record entries and read entries with 'entries' - asynchronous" in {
val uuid = (new UUID).toString
- val txlog1 = TransactionLog.newLogFor(uuid, true)
+ val txlog1 = TransactionLog.newLogFor(uuid, true, null, Format.Default)
val entry = "hello".getBytes("UTF-8")
txlog1.recordEntry(entry)
txlog1.recordEntry(entry)
@@ -195,7 +195,7 @@ class ReplicationSpec extends WordSpec with MustMatchers with BeforeAndAfterAll
Thread.sleep(100)
txlog1.close
- val txlog2 = TransactionLog.logFor(uuid, true)
+ val txlog2 = TransactionLog.logFor(uuid, true, null, Format.Default)
val entries = txlog2.entries.map(bytes ⇒ new String(bytes, "UTF-8"))
entries.size must equal(4)
entries(0) must equal("hello")
@@ -208,7 +208,7 @@ class ReplicationSpec extends WordSpec with MustMatchers with BeforeAndAfterAll
"be able to record a snapshot - asynchronous" in {
val uuid = (new UUID).toString
- val txlog1 = TransactionLog.newLogFor(uuid, true)
+ val txlog1 = TransactionLog.newLogFor(uuid, true, null, Format.Default)
val snapshot = "snapshot".getBytes("UTF-8")
txlog1.recordSnapshot(snapshot)
Thread.sleep(100)
@@ -217,7 +217,7 @@ class ReplicationSpec extends WordSpec with MustMatchers with BeforeAndAfterAll
"be able to record and read a snapshot and following entries - asynchronous" in {
val uuid = (new UUID).toString
- val txlog1 = TransactionLog.newLogFor(uuid, true)
+ val txlog1 = TransactionLog.newLogFor(uuid, true, null, Format.Default)
val snapshot = "snapshot".getBytes("UTF-8")
txlog1.recordSnapshot(snapshot)
@@ -229,8 +229,8 @@ class ReplicationSpec extends WordSpec with MustMatchers with BeforeAndAfterAll
Thread.sleep(100)
txlog1.close
- val txlog2 = TransactionLog.logFor(uuid, true)
- val (snapshotAsBytes, entriesAsBytes) = txlog2.entriesFromLatestSnapshot
+ val txlog2 = TransactionLog.logFor(uuid, true, null, Format.Default)
+ val (snapshotAsBytes, entriesAsBytes) = txlog2.toByteArraysLatestSnapshot
new String(snapshotAsBytes, "UTF-8") must equal("snapshot")
val entries = entriesAsBytes.map(bytes ⇒ new String(bytes, "UTF-8"))
@@ -245,7 +245,7 @@ class ReplicationSpec extends WordSpec with MustMatchers with BeforeAndAfterAll
"be able to record entries then a snapshot then more entries - and then read from the snapshot and the following entries - asynchronous" in {
val uuid = (new UUID).toString
- val txlog1 = TransactionLog.newLogFor(uuid, true)
+ val txlog1 = TransactionLog.newLogFor(uuid, true, null, Format.Default)
val entry = "hello".getBytes("UTF-8")
txlog1.recordEntry(entry)
@@ -258,8 +258,8 @@ class ReplicationSpec extends WordSpec with MustMatchers with BeforeAndAfterAll
Thread.sleep(100)
txlog1.close
- val txlog2 = TransactionLog.logFor(uuid, true)
- val (snapshotAsBytes, entriesAsBytes) = txlog2.entriesFromLatestSnapshot
+ val txlog2 = TransactionLog.logFor(uuid, true, null, Format.Default)
+ val (snapshotAsBytes, entriesAsBytes) = txlog2.toByteArraysLatestSnapshot
new String(snapshotAsBytes, "UTF-8") must equal("snapshot")
val entries = entriesAsBytes.map(bytes ⇒ new String(bytes, "UTF-8"))
entries.size must equal(2)
diff --git a/akka-cluster/src/test/scala/akka/cluster/routing/roundrobin_1_replica/RoundRobin1ReplicaMultiJvmSpec.scala b/akka-cluster/src/test/scala/akka/cluster/routing/roundrobin_1_replica/RoundRobin1ReplicaMultiJvmSpec.scala
index 1f6d60efc3..1c4841ee2e 100644
--- a/akka-cluster/src/test/scala/akka/cluster/routing/roundrobin_1_replica/RoundRobin1ReplicaMultiJvmSpec.scala
+++ b/akka-cluster/src/test/scala/akka/cluster/routing/roundrobin_1_replica/RoundRobin1ReplicaMultiJvmSpec.scala
@@ -8,6 +8,9 @@ import org.scalatest.WordSpec
import org.scalatest.matchers.MustMatchers
import org.scalatest.BeforeAndAfterAll
+import org.apache.bookkeeper.client.{ BookKeeper, BKException }
+import BKException._
+
import akka.cluster._
import akka.actor._
import Actor._
@@ -27,6 +30,9 @@ object RoundRobin1ReplicaMultiJvmSpec {
class RoundRobin1ReplicaMultiJvmNode1 extends WordSpec with MustMatchers with BeforeAndAfterAll {
import RoundRobin1ReplicaMultiJvmSpec._
+ private var bookKeeper: BookKeeper = _
+ private var localBookKeeper: LocalBookKeeper = _
+
"A cluster" must {
"create clustered actor, get a 'local' actor on 'home' node and a 'ref' to actor on remote node" in {
@@ -49,10 +55,13 @@ class RoundRobin1ReplicaMultiJvmNode1 extends WordSpec with MustMatchers with Be
override def beforeAll() = {
Cluster.startLocalCluster()
+ LocalBookKeeperEnsemble.start()
}
override def afterAll() = {
Cluster.shutdownLocalCluster()
+ TransactionLog.shutdown()
+ LocalBookKeeperEnsemble.shutdown()
}
}
diff --git a/akka-cluster/src/test/scala/akka/cluster/routing/roundrobin_2_replicas/RoundRobin2ReplicasMultiJvmSpec.scala b/akka-cluster/src/test/scala/akka/cluster/routing/roundrobin_2_replicas/RoundRobin2ReplicasMultiJvmSpec.scala
index 9f3083868b..c9c864fc27 100644
--- a/akka-cluster/src/test/scala/akka/cluster/routing/roundrobin_2_replicas/RoundRobin2ReplicasMultiJvmSpec.scala
+++ b/akka-cluster/src/test/scala/akka/cluster/routing/roundrobin_2_replicas/RoundRobin2ReplicasMultiJvmSpec.scala
@@ -8,6 +8,9 @@ import org.scalatest.WordSpec
import org.scalatest.matchers.MustMatchers
import org.scalatest.BeforeAndAfterAll
+import org.apache.bookkeeper.client.{ BookKeeper, BKException }
+import BKException._
+
import akka.cluster._
import akka.actor._
import Actor._
@@ -28,6 +31,9 @@ object RoundRobin2ReplicasMultiJvmSpec {
class RoundRobin2ReplicasMultiJvmNode1 extends WordSpec with MustMatchers with BeforeAndAfterAll {
import RoundRobin2ReplicasMultiJvmSpec._
+ private var bookKeeper: BookKeeper = _
+ private var localBookKeeper: LocalBookKeeper = _
+
"A cluster" must {
"create clustered actor, get a 'local' actor on 'home' node and a 'ref' to actor on remote node" in {
@@ -52,10 +58,13 @@ class RoundRobin2ReplicasMultiJvmNode1 extends WordSpec with MustMatchers with B
override def beforeAll() = {
Cluster.startLocalCluster()
+ LocalBookKeeperEnsemble.start()
}
override def afterAll() = {
Cluster.shutdownLocalCluster()
+ TransactionLog.shutdown()
+ LocalBookKeeperEnsemble.shutdown()
}
}
diff --git a/akka-docs/modules/spring.rst b/akka-docs/modules/spring.rst
index 29bf4632cf..d8ec97b72c 100644
--- a/akka-docs/modules/spring.rst
+++ b/akka-docs/modules/spring.rst
@@ -323,7 +323,7 @@ The Akka configuration can be made available as property placeholders by using a
-
+
Camel configuration
diff --git a/akka-docs/scala/http.rst b/akka-docs/scala/http.rst
index 9f15664b70..ba25234f93 100644
--- a/akka-docs/scala/http.rst
+++ b/akka-docs/scala/http.rst
@@ -10,43 +10,10 @@ HTTP
Module stability: **SOLID**
-When using Akkas embedded servlet container
--------------------------------------------
-
-Akka supports the JSR for REST called JAX-RS (JSR-311). It allows you to create interaction with your actors through HTTP + REST
-
-You can deploy your REST services directly into the Akka kernel. All you have to do is to drop the JAR with your application containing the REST services into the ‘$AKKA_HOME/deploy’ directory and specify in your akka.conf what resource packages to scan for (more on that below) and optionally define a “boot class” (if you need to create any actors or do any config). WAR deployment is coming soon.
-
-Boot configuration class
-------------------------
-
-The boot class is needed for Akka to bootstrap the application and should contain the initial supervisor configuration of any actors in the module.
-
-The boot class should be a regular POJO with a default constructor in which the initial configuration is done. The boot class then needs to be defined in the ‘$AKKA_HOME/config/akka.conf’ config file like this:
-
-.. code-block:: ruby
-
- akka {
- boot = ["sample.java.Boot", "sample.scala.Boot"] # FQN to the class doing initial actor
- # supervisor bootstrap, should be defined in default constructor
- ...
- }
-
-After you've placed your service-jar into the $AKKA_HOME/deploy directory, you'll need to tell Akka where to look for your services, and you do that by specifying what packages you want Akka to scan for services, and that's done in akka.conf in the http-section:
-
-.. code-block:: ruby
-
- akka {
- http {
- ...
- resource-packages = ["com.bar","com.foo.bar"] # List with all resource packages for your Jersey services
- ...
- }
-
-When deploying in another servlet container:
+When deploying in a servlet container:
--------------------------------------------
-If you deploy Akka in another JEE container, don't forget to create an Akka initialization and cleanup hook:
+If you deploy Akka in a JEE container, don't forget to create an Akka initialization and cleanup hook:
.. code-block:: scala
@@ -86,32 +53,6 @@ Then you just declare it in your web.xml:
...
-Also, you need to map the servlet that will handle your Jersey/JAX-RS calls, you use Jerseys ServletContainer servlet.
-
-.. code-block:: xml
-
-
- ...
-
- Akka
- com.sun.jersey.spi.container.servlet.ServletContainer
-
-
- com.sun.jersey.config.property.resourceConfigClass
- com.sun.jersey.api.core.PackagesResourceConfig
-
-
- com.sun.jersey.config.property.packages
- your.resource.package.here;and.another.here;and.so.on
-
-
-
- *
- Akka
-
- ...
-
-
Adapting your own Akka Initializer for the Servlet Container
------------------------------------------------------------
@@ -142,16 +83,6 @@ If you want to use akka-camel or any other modules that have their own "Bootable
loader.boot(true, new BootableActorLoaderService with BootableRemoteActorService with CamelService) //<--- Important
}
-Java API: Typed Actors
-----------------------
-
-`Sample module for REST services with Actors in Java `_
-
-Scala API: Actors
------------------
-
-`Sample module for REST services with Actors in Scala `_
-
Using Akka with the Pinky REST/MVC framework
--------------------------------------------
@@ -193,6 +124,10 @@ In order to use Mist you have to register the MistServlet in *web.xml* or do the
akkaMistServlet
akka.http.AkkaMistServlet
+
+ root-endpoint
+ address_of_root_endpoint_actor
+
diff --git a/akka-docs/scala/security.rst b/akka-docs/scala/security.rst
deleted file mode 100644
index 9d186b5ea2..0000000000
--- a/akka-docs/scala/security.rst
+++ /dev/null
@@ -1,266 +0,0 @@
-HTTP Security
-=============
-
-.. sidebar:: Contents
-
- .. contents:: :local:
-
-Module stability: **IN PROGRESS**
-
-Akka supports security for access to RESTful Actors through `HTTP Authentication `_. The security is implemented as a jersey ResourceFilter which delegates the actual authentication to an authentication actor.
-
-Akka provides authentication via the following authentication schemes:
-
-* `Basic Authentication `_
-* `Digest Authentication `_
-* `Kerberos SPNEGO Authentication `_
-
-The authentication is performed by implementations of akka.security.AuthenticationActor.
-
-Akka provides a trait for each authentication scheme:
-
-* BasicAuthenticationActor
-* DigestAuthenticationActor
-* SpnegoAuthenticationActor
-
-
-Setup
------
-
-To secure your RESTful actors you need to perform the following steps:
-
-1. configure the resource filter factory 'akka.security.AkkaSecurityFilterFactory' in the 'akka.conf' like this:
-
-.. code-block:: ruby
-
- akka {
- ...
- rest {
- filters="akka.security.AkkaSecurityFilterFactory"
- }
- ...
- }
-
-2. Configure an implementation of an authentication actor in 'akka.conf':
-
-.. code-block:: ruby
-
- akka {
- ...
- rest {
- filters= ...
- authenticator = "akka.security.samples.BasicAuthenticationService"
- }
- ...
- }
-
-3. Start your authentication actor in your 'Boot' class. The security package consists of the following parts:
-
-4. Secure your RESTful actors using class or resource level annotations:
-
-* @DenyAll
-* @RolesAllowed(listOfRoles)
-* @PermitAll
-
-Security Samples
-----------------
-
-The akka-samples-security module contains a small sample application with sample implementations for each authentication scheme.
-You can start the sample app using the jetty plugin: mvn jetty:run.
-
-The RESTful actor can then be accessed using your browser of choice under:
-
-* permit access only to users having the “chef” role: ``_
-* public access: ``_
-
-You can access the secured resource using any user for basic authentication (which is the default authenticator in the sample app).
-
-Digest authentication can be directly enabled in the sample app. Kerberos/SPNEGO authentication is a bit more involved an is described below.
-
-
-Kerberos/SPNEGO Authentication
-------------------------------
-
-Kerberos is a network authentication protocol, (see ``_). It provides strong authentication for client/server applications.
-In a kerberos enabled environment a user will need to sign on only once. Subsequent authentication to applications is handled transparently by kerberos.
-
-Most prominently the kerberos protocol is used to authenticate users in a windows network. When deploying web applications to a corporate intranet an important feature will be to support the single sign on (SSO), which comes to make the application kerberos aware.
-
-How does it work (at least for REST actors)?
-
-- When accessing a secured resource the server will check the request for the *Authorization* header as with basic or digest authentication.
-- If it is not set, the server will respond with a challenge to "Negotiate". The negotiation is in fact the NEGO part of the `SPNEGO `_ specification
-- The browser will then try to acquire a so called *service ticket* from a ticket granting service, i.e. the kerberos server
-- The browser will send the *service ticket* to the web application encoded in the header value of the *Authorization* header
-- The web application must validate the ticket based on a shared secret between the web application and the kerberos server. As a result the web application will know the name of the user
-
-To activate the kerberos/SPNEGO authentication for your REST actor you need to enable the kerberos/SPNEGOauthentication actor in the akka.conf like this:
-
-.. code-block:: ruby
-
- akka {
- ...
- rest {
- filters= ...
- authenticator = "akka.security.samples.SpnegoAuthenticationService"
- }
- ...
- }
-
-Furthermore you must provide the SpnegoAuthenticator with the following information.
-
-- Service principal name: the name of your web application in the kerberos servers user database. This name is always has the form ``HTTP/{server}@{realm}``
-- Path to the keytab file: this is a kind of certificate for your web application to acquire tickets from the kerberos server
-
-.. code-block:: ruby
-
- akka {
- ...
- rest {
- filters= ...
- authenticator = "akka.security.samples.SpnegoAuthenticationService"
- kerberos {
- servicePrincipal = "HTTP/{server}@{realm}"
- keyTabLocation = "URL to keytab"
- # kerberosDebug = "true"
- }
- }
- ...
- }
-
-
-How to setup kerberos on localhost for Ubuntu
----------------------------------------------
-
-This is a short step by step description of howto set up a kerberos server on an ubuntu system.
-
-1. Install the Heimdal Kerberos Server and Client
-
-::
-
- sudo apt-get install heimdal-clients heimdal-clients-x heimdal-kdc krb5-config
- ...
-
-2. Set up your kerberos realm. In this example the realm is of course … EXAMPLE.COM
-
-::
-
- eckart@dilbert:~$ sudo kadmin -l
- kadmin> init EXAMPLE.COM
- Realm max ticket life [unlimited]:
- Realm max renewable ticket life [unlimited]:
- kadmin> quit
-
-3. Tell your kerberos clients what your realm is and where to find the kerberos server (aka the Key Distribution Centre or KDC)
-
-Edit the kerberos config file: /etc/krb5.conf and configure …
-…the default realm:
-
-::
-
- [libdefaults]
- default_realm = EXAMPLE.COM
-
-… where to find the KDC for your realm
-
-::
-
- [realms]
- EXAMPLE.COM = {
- kdc = localhost
- }
-
-…which hostnames or domains map to which realm (a kerberos realm is **not** a DNS domain):
-
-::
-
- [domain_realm]
- localhost = EXAMPLE.COM
-
-4. Add the principals
-The user principal:
-
-::
-
- eckart@dilbert:~$ sudo kadmin -l
- kadmin> add zaphod
- Max ticket life [1 day]:
- Max renewable life [1 week]:
- Principal expiration time [never]:
- Password expiration time [never]:
- Attributes []:
- zaphod@EXAMPLE.COM's Password:
- Verifying - zaphod@EXAMPLE.COM's Password:
- kadmin> quit
-
-The service principal:
-
-::
-
- eckart@dilbert:~$ sudo kadmin -l
- kadmin> add HTTP/localhost@EXAMPLE.COM
- Max ticket life [1 day]:
- Max renewable life [1 week]:
- Principal expiration time [never]:
- Password expiration time [never]:
- Attributes []:
- HTTP/localhost@EXAMPLE.COM's Password:
- Verifying - HTTP/localhost@EXAMPLE.COM's Password:
- kadmin> quit
-
-We can now try to acquire initial tickets for the principals to see if everything worked.
-
-::
-
- eckart@dilbert:~$ kinit zaphod
- zaphod@EXAMPLE.COM's Password:
-
-If this method returns withour error we have a success.
-We can additionally list the acquired tickets:
-
-::
-
- eckart@dilbert:~$ klist
- Credentials cache: FILE:/tmp/krb5cc_1000
- Principal: zaphod@EXAMPLE.COM
-
- Issued Expires Principal
- Oct 24 21:51:59 Oct 25 06:51:59 krbtgt/EXAMPLE.COM@EXAMPLE.COM
-
-This seems correct. To remove the ticket cache simply type kdestroy.
-
-5. Create a keytab for your service principal
-
-::
-
- eckart@dilbert:~$ ktutil -k http.keytab add -p HTTP/localhost@EXAMPLE.COM -V 1 -e aes256-cts-hmac-sha1-96
- Password:
- Verifying - Password:
- eckart@dilbert:~$
-
-This command will create a keytab file for the service principal named ``http.keytab`` in the current directory. You can specify other encryption methods than ‘aes256-cts-hmac-sha1-96’, but this is the e default encryption method for the heimdal client, so there is no additional configuration needed. You can specify other encryption types in the krb5.conf.
-
-Note that you might need to install the unlimited strength policy files for java from here: ``_ to use the aes256 encryption from your application.
-
-Again we can test if the keytab generation worked with the kinit command:
-
-::
-
- eckart@dilbert:~$ kinit -t http.keytab HTTP/localhost@EXAMPLE.COM
- eckart@dilbert:~$ klist
- Credentials cache: FILE:/tmp/krb5cc_1000
- Principal: HTTP/localhost@EXAMPLE.COM
-
- Issued Expires Principal
- Oct 24 21:59:20 Oct 25 06:59:20 krbtgt/EXAMPLE.COM@EXAMPLE.COM
-
-Now point the configuration of the key in 'akka.conf' to the correct location and set the correct service principal name. The web application should now startup and produce at least a 401 response with a header ``WWW-Authenticate`` = "Negotiate". The last step is to configure the browser.
-
-6. Set up Firefox to use Kerberos/SPNEGO
-This is done by typing ``about:config``. Filter the config entries for ``network.neg`` and set the config entries ``network.negotiate-auth.delegation-uris`` and ``network.negotiate-auth.trusted-uris`` to ``localhost``.
-and now …
-
-7. Access the RESTful Actor.
-
-8. Have fun
-… but acquire an initial ticket for the user principal first: ``kinit zaphod``
diff --git a/akka-durable-mailboxes/akka-mailboxes-common/src/main/java/akka/actor/mailbox/MailboxProtocol.java b/akka-durable-mailboxes/akka-mailboxes-common/src/main/java/akka/actor/mailbox/MailboxProtocol.java
index 1fcb87781c..bc128610cf 100644
--- a/akka-durable-mailboxes/akka-mailboxes-common/src/main/java/akka/actor/mailbox/MailboxProtocol.java
+++ b/akka-durable-mailboxes/akka-mailboxes-common/src/main/java/akka/actor/mailbox/MailboxProtocol.java
@@ -8,11 +8,32 @@ public final class MailboxProtocol {
public static void registerAllExtensions(
com.google.protobuf.ExtensionRegistry registry) {
}
+ public interface DurableMailboxMessageProtocolOrBuilder
+ extends com.google.protobuf.MessageOrBuilder {
+
+ // required string ownerAddress = 1;
+ boolean hasOwnerAddress();
+ String getOwnerAddress();
+
+ // optional string senderAddress = 2;
+ boolean hasSenderAddress();
+ String getSenderAddress();
+
+ // optional .UuidProtocol futureUuid = 3;
+ boolean hasFutureUuid();
+ akka.actor.mailbox.MailboxProtocol.UuidProtocol getFutureUuid();
+ akka.actor.mailbox.MailboxProtocol.UuidProtocolOrBuilder getFutureUuidOrBuilder();
+
+ // required bytes message = 4;
+ boolean hasMessage();
+ com.google.protobuf.ByteString getMessage();
+ }
public static final class DurableMailboxMessageProtocol extends
- com.google.protobuf.GeneratedMessage {
+ com.google.protobuf.GeneratedMessage
+ implements DurableMailboxMessageProtocolOrBuilder {
// Use DurableMailboxMessageProtocol.newBuilder() to construct.
- private DurableMailboxMessageProtocol() {
- initFields();
+ private DurableMailboxMessageProtocol(Builder builder) {
+ super(builder);
}
private DurableMailboxMessageProtocol(boolean noInit) {}
@@ -35,60 +56,137 @@ public final class MailboxProtocol {
return akka.actor.mailbox.MailboxProtocol.internal_static_DurableMailboxMessageProtocol_fieldAccessorTable;
}
+ private int bitField0_;
// required string ownerAddress = 1;
public static final int OWNERADDRESS_FIELD_NUMBER = 1;
- private boolean hasOwnerAddress;
- private java.lang.String ownerAddress_ = "";
- public boolean hasOwnerAddress() { return hasOwnerAddress; }
- public java.lang.String getOwnerAddress() { return ownerAddress_; }
+ private java.lang.Object ownerAddress_;
+ public boolean hasOwnerAddress() {
+ return ((bitField0_ & 0x00000001) == 0x00000001);
+ }
+ public String getOwnerAddress() {
+ java.lang.Object ref = ownerAddress_;
+ if (ref instanceof String) {
+ return (String) ref;
+ } else {
+ com.google.protobuf.ByteString bs =
+ (com.google.protobuf.ByteString) ref;
+ String s = bs.toStringUtf8();
+ if (com.google.protobuf.Internal.isValidUtf8(bs)) {
+ ownerAddress_ = s;
+ }
+ return s;
+ }
+ }
+ private com.google.protobuf.ByteString getOwnerAddressBytes() {
+ java.lang.Object ref = ownerAddress_;
+ if (ref instanceof String) {
+ com.google.protobuf.ByteString b =
+ com.google.protobuf.ByteString.copyFromUtf8((String) ref);
+ ownerAddress_ = b;
+ return b;
+ } else {
+ return (com.google.protobuf.ByteString) ref;
+ }
+ }
// optional string senderAddress = 2;
public static final int SENDERADDRESS_FIELD_NUMBER = 2;
- private boolean hasSenderAddress;
- private java.lang.String senderAddress_ = "";
- public boolean hasSenderAddress() { return hasSenderAddress; }
- public java.lang.String getSenderAddress() { return senderAddress_; }
+ private java.lang.Object senderAddress_;
+ public boolean hasSenderAddress() {
+ return ((bitField0_ & 0x00000002) == 0x00000002);
+ }
+ public String getSenderAddress() {
+ java.lang.Object ref = senderAddress_;
+ if (ref instanceof String) {
+ return (String) ref;
+ } else {
+ com.google.protobuf.ByteString bs =
+ (com.google.protobuf.ByteString) ref;
+ String s = bs.toStringUtf8();
+ if (com.google.protobuf.Internal.isValidUtf8(bs)) {
+ senderAddress_ = s;
+ }
+ return s;
+ }
+ }
+ private com.google.protobuf.ByteString getSenderAddressBytes() {
+ java.lang.Object ref = senderAddress_;
+ if (ref instanceof String) {
+ com.google.protobuf.ByteString b =
+ com.google.protobuf.ByteString.copyFromUtf8((String) ref);
+ senderAddress_ = b;
+ return b;
+ } else {
+ return (com.google.protobuf.ByteString) ref;
+ }
+ }
// optional .UuidProtocol futureUuid = 3;
public static final int FUTUREUUID_FIELD_NUMBER = 3;
- private boolean hasFutureUuid;
private akka.actor.mailbox.MailboxProtocol.UuidProtocol futureUuid_;
- public boolean hasFutureUuid() { return hasFutureUuid; }
- public akka.actor.mailbox.MailboxProtocol.UuidProtocol getFutureUuid() { return futureUuid_; }
+ public boolean hasFutureUuid() {
+ return ((bitField0_ & 0x00000004) == 0x00000004);
+ }
+ public akka.actor.mailbox.MailboxProtocol.UuidProtocol getFutureUuid() {
+ return futureUuid_;
+ }
+ public akka.actor.mailbox.MailboxProtocol.UuidProtocolOrBuilder getFutureUuidOrBuilder() {
+ return futureUuid_;
+ }
// required bytes message = 4;
public static final int MESSAGE_FIELD_NUMBER = 4;
- private boolean hasMessage;
- private com.google.protobuf.ByteString message_ = com.google.protobuf.ByteString.EMPTY;
- public boolean hasMessage() { return hasMessage; }
- public com.google.protobuf.ByteString getMessage() { return message_; }
+ private com.google.protobuf.ByteString message_;
+ public boolean hasMessage() {
+ return ((bitField0_ & 0x00000008) == 0x00000008);
+ }
+ public com.google.protobuf.ByteString getMessage() {
+ return message_;
+ }
private void initFields() {
+ ownerAddress_ = "";
+ senderAddress_ = "";
futureUuid_ = akka.actor.mailbox.MailboxProtocol.UuidProtocol.getDefaultInstance();
+ message_ = com.google.protobuf.ByteString.EMPTY;
}
+ private byte memoizedIsInitialized = -1;
public final boolean isInitialized() {
- if (!hasOwnerAddress) return false;
- if (!hasMessage) return false;
- if (hasFutureUuid()) {
- if (!getFutureUuid().isInitialized()) return false;
+ byte isInitialized = memoizedIsInitialized;
+ if (isInitialized != -1) return isInitialized == 1;
+
+ if (!hasOwnerAddress()) {
+ memoizedIsInitialized = 0;
+ return false;
}
+ if (!hasMessage()) {
+ memoizedIsInitialized = 0;
+ return false;
+ }
+ if (hasFutureUuid()) {
+ if (!getFutureUuid().isInitialized()) {
+ memoizedIsInitialized = 0;
+ return false;
+ }
+ }
+ memoizedIsInitialized = 1;
return true;
}
public void writeTo(com.google.protobuf.CodedOutputStream output)
throws java.io.IOException {
getSerializedSize();
- if (hasOwnerAddress()) {
- output.writeString(1, getOwnerAddress());
+ if (((bitField0_ & 0x00000001) == 0x00000001)) {
+ output.writeBytes(1, getOwnerAddressBytes());
}
- if (hasSenderAddress()) {
- output.writeString(2, getSenderAddress());
+ if (((bitField0_ & 0x00000002) == 0x00000002)) {
+ output.writeBytes(2, getSenderAddressBytes());
}
- if (hasFutureUuid()) {
- output.writeMessage(3, getFutureUuid());
+ if (((bitField0_ & 0x00000004) == 0x00000004)) {
+ output.writeMessage(3, futureUuid_);
}
- if (hasMessage()) {
- output.writeBytes(4, getMessage());
+ if (((bitField0_ & 0x00000008) == 0x00000008)) {
+ output.writeBytes(4, message_);
}
getUnknownFields().writeTo(output);
}
@@ -99,27 +197,34 @@ public final class MailboxProtocol {
if (size != -1) return size;
size = 0;
- if (hasOwnerAddress()) {
+ if (((bitField0_ & 0x00000001) == 0x00000001)) {
size += com.google.protobuf.CodedOutputStream
- .computeStringSize(1, getOwnerAddress());
+ .computeBytesSize(1, getOwnerAddressBytes());
}
- if (hasSenderAddress()) {
+ if (((bitField0_ & 0x00000002) == 0x00000002)) {
size += com.google.protobuf.CodedOutputStream
- .computeStringSize(2, getSenderAddress());
+ .computeBytesSize(2, getSenderAddressBytes());
}
- if (hasFutureUuid()) {
+ if (((bitField0_ & 0x00000004) == 0x00000004)) {
size += com.google.protobuf.CodedOutputStream
- .computeMessageSize(3, getFutureUuid());
+ .computeMessageSize(3, futureUuid_);
}
- if (hasMessage()) {
+ if (((bitField0_ & 0x00000008) == 0x00000008)) {
size += com.google.protobuf.CodedOutputStream
- .computeBytesSize(4, getMessage());
+ .computeBytesSize(4, message_);
}
size += getUnknownFields().getSerializedSize();
memoizedSerializedSize = size;
return size;
}
+ private static final long serialVersionUID = 0L;
+ @java.lang.Override
+ protected java.lang.Object writeReplace()
+ throws java.io.ObjectStreamException {
+ return super.writeReplace();
+ }
+
public static akka.actor.mailbox.MailboxProtocol.DurableMailboxMessageProtocol parseFrom(
com.google.protobuf.ByteString data)
throws com.google.protobuf.InvalidProtocolBufferException {
@@ -194,34 +299,62 @@ public final class MailboxProtocol {
}
public Builder toBuilder() { return newBuilder(this); }
+ @java.lang.Override
+ protected Builder newBuilderForType(
+ com.google.protobuf.GeneratedMessage.BuilderParent parent) {
+ Builder builder = new Builder(parent);
+ return builder;
+ }
public static final class Builder extends
- com.google.protobuf.GeneratedMessage.Builder {
- private akka.actor.mailbox.MailboxProtocol.DurableMailboxMessageProtocol result;
-
- // Construct using akka.actor.mailbox.MailboxProtocol.DurableMailboxMessageProtocol.newBuilder()
- private Builder() {}
-
- private static Builder create() {
- Builder builder = new Builder();
- builder.result = new akka.actor.mailbox.MailboxProtocol.DurableMailboxMessageProtocol();
- return builder;
+ com.google.protobuf.GeneratedMessage.Builder
+ implements akka.actor.mailbox.MailboxProtocol.DurableMailboxMessageProtocolOrBuilder {
+ public static final com.google.protobuf.Descriptors.Descriptor
+ getDescriptor() {
+ return akka.actor.mailbox.MailboxProtocol.internal_static_DurableMailboxMessageProtocol_descriptor;
}
- protected akka.actor.mailbox.MailboxProtocol.DurableMailboxMessageProtocol internalGetResult() {
- return result;
+ protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
+ internalGetFieldAccessorTable() {
+ return akka.actor.mailbox.MailboxProtocol.internal_static_DurableMailboxMessageProtocol_fieldAccessorTable;
+ }
+
+ // Construct using akka.actor.mailbox.MailboxProtocol.DurableMailboxMessageProtocol.newBuilder()
+ private Builder() {
+ maybeForceBuilderInitialization();
+ }
+
+ private Builder(BuilderParent parent) {
+ super(parent);
+ maybeForceBuilderInitialization();
+ }
+ private void maybeForceBuilderInitialization() {
+ if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) {
+ getFutureUuidFieldBuilder();
+ }
+ }
+ private static Builder create() {
+ return new Builder();
}
public Builder clear() {
- if (result == null) {
- throw new IllegalStateException(
- "Cannot call clear() after build().");
+ super.clear();
+ ownerAddress_ = "";
+ bitField0_ = (bitField0_ & ~0x00000001);
+ senderAddress_ = "";
+ bitField0_ = (bitField0_ & ~0x00000002);
+ if (futureUuidBuilder_ == null) {
+ futureUuid_ = akka.actor.mailbox.MailboxProtocol.UuidProtocol.getDefaultInstance();
+ } else {
+ futureUuidBuilder_.clear();
}
- result = new akka.actor.mailbox.MailboxProtocol.DurableMailboxMessageProtocol();
+ bitField0_ = (bitField0_ & ~0x00000004);
+ message_ = com.google.protobuf.ByteString.EMPTY;
+ bitField0_ = (bitField0_ & ~0x00000008);
return this;
}
public Builder clone() {
- return create().mergeFrom(result);
+ return create().mergeFrom(buildPartial());
}
public com.google.protobuf.Descriptors.Descriptor
@@ -233,33 +366,51 @@ public final class MailboxProtocol {
return akka.actor.mailbox.MailboxProtocol.DurableMailboxMessageProtocol.getDefaultInstance();
}
- public boolean isInitialized() {
- return result.isInitialized();
- }
public akka.actor.mailbox.MailboxProtocol.DurableMailboxMessageProtocol build() {
- if (result != null && !isInitialized()) {
+ akka.actor.mailbox.MailboxProtocol.DurableMailboxMessageProtocol result = buildPartial();
+ if (!result.isInitialized()) {
throw newUninitializedMessageException(result);
}
- return buildPartial();
+ return result;
}
private akka.actor.mailbox.MailboxProtocol.DurableMailboxMessageProtocol buildParsed()
throws com.google.protobuf.InvalidProtocolBufferException {
- if (!isInitialized()) {
+ akka.actor.mailbox.MailboxProtocol.DurableMailboxMessageProtocol result = buildPartial();
+ if (!result.isInitialized()) {
throw newUninitializedMessageException(
result).asInvalidProtocolBufferException();
}
- return buildPartial();
+ return result;
}
public akka.actor.mailbox.MailboxProtocol.DurableMailboxMessageProtocol buildPartial() {
- if (result == null) {
- throw new IllegalStateException(
- "build() has already been called on this Builder.");
+ akka.actor.mailbox.MailboxProtocol.DurableMailboxMessageProtocol result = new akka.actor.mailbox.MailboxProtocol.DurableMailboxMessageProtocol(this);
+ int from_bitField0_ = bitField0_;
+ int to_bitField0_ = 0;
+ if (((from_bitField0_ & 0x00000001) == 0x00000001)) {
+ to_bitField0_ |= 0x00000001;
}
- akka.actor.mailbox.MailboxProtocol.DurableMailboxMessageProtocol returnMe = result;
- result = null;
- return returnMe;
+ result.ownerAddress_ = ownerAddress_;
+ if (((from_bitField0_ & 0x00000002) == 0x00000002)) {
+ to_bitField0_ |= 0x00000002;
+ }
+ result.senderAddress_ = senderAddress_;
+ if (((from_bitField0_ & 0x00000004) == 0x00000004)) {
+ to_bitField0_ |= 0x00000004;
+ }
+ if (futureUuidBuilder_ == null) {
+ result.futureUuid_ = futureUuid_;
+ } else {
+ result.futureUuid_ = futureUuidBuilder_.build();
+ }
+ if (((from_bitField0_ & 0x00000008) == 0x00000008)) {
+ to_bitField0_ |= 0x00000008;
+ }
+ result.message_ = message_;
+ result.bitField0_ = to_bitField0_;
+ onBuilt();
+ return result;
}
public Builder mergeFrom(com.google.protobuf.Message other) {
@@ -289,6 +440,24 @@ public final class MailboxProtocol {
return this;
}
+ public final boolean isInitialized() {
+ if (!hasOwnerAddress()) {
+
+ return false;
+ }
+ if (!hasMessage()) {
+
+ return false;
+ }
+ if (hasFutureUuid()) {
+ if (!getFutureUuid().isInitialized()) {
+
+ return false;
+ }
+ }
+ return true;
+ }
+
public Builder mergeFrom(
com.google.protobuf.CodedInputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
@@ -301,21 +470,25 @@ public final class MailboxProtocol {
switch (tag) {
case 0:
this.setUnknownFields(unknownFields.build());
+ onChanged();
return this;
default: {
if (!parseUnknownField(input, unknownFields,
extensionRegistry, tag)) {
this.setUnknownFields(unknownFields.build());
+ onChanged();
return this;
}
break;
}
case 10: {
- setOwnerAddress(input.readString());
+ bitField0_ |= 0x00000001;
+ ownerAddress_ = input.readBytes();
break;
}
case 18: {
- setSenderAddress(input.readString());
+ bitField0_ |= 0x00000002;
+ senderAddress_ = input.readBytes();
break;
}
case 26: {
@@ -328,111 +501,199 @@ public final class MailboxProtocol {
break;
}
case 34: {
- setMessage(input.readBytes());
+ bitField0_ |= 0x00000008;
+ message_ = input.readBytes();
break;
}
}
}
}
+ private int bitField0_;
// required string ownerAddress = 1;
+ private java.lang.Object ownerAddress_ = "";
public boolean hasOwnerAddress() {
- return result.hasOwnerAddress();
+ return ((bitField0_ & 0x00000001) == 0x00000001);
}
- public java.lang.String getOwnerAddress() {
- return result.getOwnerAddress();
+ public String getOwnerAddress() {
+ java.lang.Object ref = ownerAddress_;
+ if (!(ref instanceof String)) {
+ String s = ((com.google.protobuf.ByteString) ref).toStringUtf8();
+ ownerAddress_ = s;
+ return s;
+ } else {
+ return (String) ref;
+ }
}
- public Builder setOwnerAddress(java.lang.String value) {
+ public Builder setOwnerAddress(String value) {
if (value == null) {
throw new NullPointerException();
}
- result.hasOwnerAddress = true;
- result.ownerAddress_ = value;
+ bitField0_ |= 0x00000001;
+ ownerAddress_ = value;
+ onChanged();
return this;
}
public Builder clearOwnerAddress() {
- result.hasOwnerAddress = false;
- result.ownerAddress_ = getDefaultInstance().getOwnerAddress();
+ bitField0_ = (bitField0_ & ~0x00000001);
+ ownerAddress_ = getDefaultInstance().getOwnerAddress();
+ onChanged();
return this;
}
+ void setOwnerAddress(com.google.protobuf.ByteString value) {
+ bitField0_ |= 0x00000001;
+ ownerAddress_ = value;
+ onChanged();
+ }
// optional string senderAddress = 2;
+ private java.lang.Object senderAddress_ = "";
public boolean hasSenderAddress() {
- return result.hasSenderAddress();
+ return ((bitField0_ & 0x00000002) == 0x00000002);
}
- public java.lang.String getSenderAddress() {
- return result.getSenderAddress();
+ public String getSenderAddress() {
+ java.lang.Object ref = senderAddress_;
+ if (!(ref instanceof String)) {
+ String s = ((com.google.protobuf.ByteString) ref).toStringUtf8();
+ senderAddress_ = s;
+ return s;
+ } else {
+ return (String) ref;
+ }
}
- public Builder setSenderAddress(java.lang.String value) {
+ public Builder setSenderAddress(String value) {
if (value == null) {
throw new NullPointerException();
}
- result.hasSenderAddress = true;
- result.senderAddress_ = value;
+ bitField0_ |= 0x00000002;
+ senderAddress_ = value;
+ onChanged();
return this;
}
public Builder clearSenderAddress() {
- result.hasSenderAddress = false;
- result.senderAddress_ = getDefaultInstance().getSenderAddress();
+ bitField0_ = (bitField0_ & ~0x00000002);
+ senderAddress_ = getDefaultInstance().getSenderAddress();
+ onChanged();
return this;
}
+ void setSenderAddress(com.google.protobuf.ByteString value) {
+ bitField0_ |= 0x00000002;
+ senderAddress_ = value;
+ onChanged();
+ }
// optional .UuidProtocol futureUuid = 3;
+ private akka.actor.mailbox.MailboxProtocol.UuidProtocol futureUuid_ = akka.actor.mailbox.MailboxProtocol.UuidProtocol.getDefaultInstance();
+ private com.google.protobuf.SingleFieldBuilder<
+ akka.actor.mailbox.MailboxProtocol.UuidProtocol, akka.actor.mailbox.MailboxProtocol.UuidProtocol.Builder, akka.actor.mailbox.MailboxProtocol.UuidProtocolOrBuilder> futureUuidBuilder_;
public boolean hasFutureUuid() {
- return result.hasFutureUuid();
+ return ((bitField0_ & 0x00000004) == 0x00000004);
}
public akka.actor.mailbox.MailboxProtocol.UuidProtocol getFutureUuid() {
- return result.getFutureUuid();
+ if (futureUuidBuilder_ == null) {
+ return futureUuid_;
+ } else {
+ return futureUuidBuilder_.getMessage();
+ }
}
public Builder setFutureUuid(akka.actor.mailbox.MailboxProtocol.UuidProtocol value) {
- if (value == null) {
- throw new NullPointerException();
+ if (futureUuidBuilder_ == null) {
+ if (value == null) {
+ throw new NullPointerException();
+ }
+ futureUuid_ = value;
+ onChanged();
+ } else {
+ futureUuidBuilder_.setMessage(value);
}
- result.hasFutureUuid = true;
- result.futureUuid_ = value;
+ bitField0_ |= 0x00000004;
return this;
}
- public Builder setFutureUuid(akka.actor.mailbox.MailboxProtocol.UuidProtocol.Builder builderForValue) {
- result.hasFutureUuid = true;
- result.futureUuid_ = builderForValue.build();
+ public Builder setFutureUuid(
+ akka.actor.mailbox.MailboxProtocol.UuidProtocol.Builder builderForValue) {
+ if (futureUuidBuilder_ == null) {
+ futureUuid_ = builderForValue.build();
+ onChanged();
+ } else {
+ futureUuidBuilder_.setMessage(builderForValue.build());
+ }
+ bitField0_ |= 0x00000004;
return this;
}
public Builder mergeFutureUuid(akka.actor.mailbox.MailboxProtocol.UuidProtocol value) {
- if (result.hasFutureUuid() &&
- result.futureUuid_ != akka.actor.mailbox.MailboxProtocol.UuidProtocol.getDefaultInstance()) {
- result.futureUuid_ =
- akka.actor.mailbox.MailboxProtocol.UuidProtocol.newBuilder(result.futureUuid_).mergeFrom(value).buildPartial();
+ if (futureUuidBuilder_ == null) {
+ if (((bitField0_ & 0x00000004) == 0x00000004) &&
+ futureUuid_ != akka.actor.mailbox.MailboxProtocol.UuidProtocol.getDefaultInstance()) {
+ futureUuid_ =
+ akka.actor.mailbox.MailboxProtocol.UuidProtocol.newBuilder(futureUuid_).mergeFrom(value).buildPartial();
+ } else {
+ futureUuid_ = value;
+ }
+ onChanged();
} else {
- result.futureUuid_ = value;
+ futureUuidBuilder_.mergeFrom(value);
}
- result.hasFutureUuid = true;
+ bitField0_ |= 0x00000004;
return this;
}
public Builder clearFutureUuid() {
- result.hasFutureUuid = false;
- result.futureUuid_ = akka.actor.mailbox.MailboxProtocol.UuidProtocol.getDefaultInstance();
+ if (futureUuidBuilder_ == null) {
+ futureUuid_ = akka.actor.mailbox.MailboxProtocol.UuidProtocol.getDefaultInstance();
+ onChanged();
+ } else {
+ futureUuidBuilder_.clear();
+ }
+ bitField0_ = (bitField0_ & ~0x00000004);
return this;
}
+ public akka.actor.mailbox.MailboxProtocol.UuidProtocol.Builder getFutureUuidBuilder() {
+ bitField0_ |= 0x00000004;
+ onChanged();
+ return getFutureUuidFieldBuilder().getBuilder();
+ }
+ public akka.actor.mailbox.MailboxProtocol.UuidProtocolOrBuilder getFutureUuidOrBuilder() {
+ if (futureUuidBuilder_ != null) {
+ return futureUuidBuilder_.getMessageOrBuilder();
+ } else {
+ return futureUuid_;
+ }
+ }
+ private com.google.protobuf.SingleFieldBuilder<
+ akka.actor.mailbox.MailboxProtocol.UuidProtocol, akka.actor.mailbox.MailboxProtocol.UuidProtocol.Builder, akka.actor.mailbox.MailboxProtocol.UuidProtocolOrBuilder>
+ getFutureUuidFieldBuilder() {
+ if (futureUuidBuilder_ == null) {
+ futureUuidBuilder_ = new com.google.protobuf.SingleFieldBuilder<
+ akka.actor.mailbox.MailboxProtocol.UuidProtocol, akka.actor.mailbox.MailboxProtocol.UuidProtocol.Builder, akka.actor.mailbox.MailboxProtocol.UuidProtocolOrBuilder>(
+ futureUuid_,
+ getParentForChildren(),
+ isClean());
+ futureUuid_ = null;
+ }
+ return futureUuidBuilder_;
+ }
// required bytes message = 4;
+ private com.google.protobuf.ByteString message_ = com.google.protobuf.ByteString.EMPTY;
public boolean hasMessage() {
- return result.hasMessage();
+ return ((bitField0_ & 0x00000008) == 0x00000008);
}
public com.google.protobuf.ByteString getMessage() {
- return result.getMessage();
+ return message_;
}
public Builder setMessage(com.google.protobuf.ByteString value) {
if (value == null) {
throw new NullPointerException();
}
- result.hasMessage = true;
- result.message_ = value;
+ bitField0_ |= 0x00000008;
+ message_ = value;
+ onChanged();
return this;
}
public Builder clearMessage() {
- result.hasMessage = false;
- result.message_ = getDefaultInstance().getMessage();
+ bitField0_ = (bitField0_ & ~0x00000008);
+ message_ = getDefaultInstance().getMessage();
+ onChanged();
return this;
}
@@ -441,18 +702,29 @@ public final class MailboxProtocol {
static {
defaultInstance = new DurableMailboxMessageProtocol(true);
- akka.actor.mailbox.MailboxProtocol.internalForceInit();
defaultInstance.initFields();
}
// @@protoc_insertion_point(class_scope:DurableMailboxMessageProtocol)
}
+ public interface UuidProtocolOrBuilder
+ extends com.google.protobuf.MessageOrBuilder {
+
+ // required uint64 high = 1;
+ boolean hasHigh();
+ long getHigh();
+
+ // required uint64 low = 2;
+ boolean hasLow();
+ long getLow();
+ }
public static final class UuidProtocol extends
- com.google.protobuf.GeneratedMessage {
+ com.google.protobuf.GeneratedMessage
+ implements UuidProtocolOrBuilder {
// Use UuidProtocol.newBuilder() to construct.
- private UuidProtocol() {
- initFields();
+ private UuidProtocol(Builder builder) {
+ super(builder);
}
private UuidProtocol(boolean noInit) {}
@@ -475,36 +747,56 @@ public final class MailboxProtocol {
return akka.actor.mailbox.MailboxProtocol.internal_static_UuidProtocol_fieldAccessorTable;
}
+ private int bitField0_;
// required uint64 high = 1;
public static final int HIGH_FIELD_NUMBER = 1;
- private boolean hasHigh;
- private long high_ = 0L;
- public boolean hasHigh() { return hasHigh; }
- public long getHigh() { return high_; }
+ private long high_;
+ public boolean hasHigh() {
+ return ((bitField0_ & 0x00000001) == 0x00000001);
+ }
+ public long getHigh() {
+ return high_;
+ }
// required uint64 low = 2;
public static final int LOW_FIELD_NUMBER = 2;
- private boolean hasLow;
- private long low_ = 0L;
- public boolean hasLow() { return hasLow; }
- public long getLow() { return low_; }
+ private long low_;
+ public boolean hasLow() {
+ return ((bitField0_ & 0x00000002) == 0x00000002);
+ }
+ public long getLow() {
+ return low_;
+ }
private void initFields() {
+ high_ = 0L;
+ low_ = 0L;
}
+ private byte memoizedIsInitialized = -1;
public final boolean isInitialized() {
- if (!hasHigh) return false;
- if (!hasLow) return false;
+ byte isInitialized = memoizedIsInitialized;
+ if (isInitialized != -1) return isInitialized == 1;
+
+ if (!hasHigh()) {
+ memoizedIsInitialized = 0;
+ return false;
+ }
+ if (!hasLow()) {
+ memoizedIsInitialized = 0;
+ return false;
+ }
+ memoizedIsInitialized = 1;
return true;
}
public void writeTo(com.google.protobuf.CodedOutputStream output)
throws java.io.IOException {
getSerializedSize();
- if (hasHigh()) {
- output.writeUInt64(1, getHigh());
+ if (((bitField0_ & 0x00000001) == 0x00000001)) {
+ output.writeUInt64(1, high_);
}
- if (hasLow()) {
- output.writeUInt64(2, getLow());
+ if (((bitField0_ & 0x00000002) == 0x00000002)) {
+ output.writeUInt64(2, low_);
}
getUnknownFields().writeTo(output);
}
@@ -515,19 +807,26 @@ public final class MailboxProtocol {
if (size != -1) return size;
size = 0;
- if (hasHigh()) {
+ if (((bitField0_ & 0x00000001) == 0x00000001)) {
size += com.google.protobuf.CodedOutputStream
- .computeUInt64Size(1, getHigh());
+ .computeUInt64Size(1, high_);
}
- if (hasLow()) {
+ if (((bitField0_ & 0x00000002) == 0x00000002)) {
size += com.google.protobuf.CodedOutputStream
- .computeUInt64Size(2, getLow());
+ .computeUInt64Size(2, low_);
}
size += getUnknownFields().getSerializedSize();
memoizedSerializedSize = size;
return size;
}
+ private static final long serialVersionUID = 0L;
+ @java.lang.Override
+ protected java.lang.Object writeReplace()
+ throws java.io.ObjectStreamException {
+ return super.writeReplace();
+ }
+
public static akka.actor.mailbox.MailboxProtocol.UuidProtocol parseFrom(
com.google.protobuf.ByteString data)
throws com.google.protobuf.InvalidProtocolBufferException {
@@ -602,34 +901,53 @@ public final class MailboxProtocol {
}
public Builder toBuilder() { return newBuilder(this); }
+ @java.lang.Override
+ protected Builder newBuilderForType(
+ com.google.protobuf.GeneratedMessage.BuilderParent parent) {
+ Builder builder = new Builder(parent);
+ return builder;
+ }
public static final class Builder extends
- com.google.protobuf.GeneratedMessage.Builder {
- private akka.actor.mailbox.MailboxProtocol.UuidProtocol result;
-
- // Construct using akka.actor.mailbox.MailboxProtocol.UuidProtocol.newBuilder()
- private Builder() {}
-
- private static Builder create() {
- Builder builder = new Builder();
- builder.result = new akka.actor.mailbox.MailboxProtocol.UuidProtocol();
- return builder;
+ com.google.protobuf.GeneratedMessage.Builder
+ implements akka.actor.mailbox.MailboxProtocol.UuidProtocolOrBuilder {
+ public static final com.google.protobuf.Descriptors.Descriptor
+ getDescriptor() {
+ return akka.actor.mailbox.MailboxProtocol.internal_static_UuidProtocol_descriptor;
}
- protected akka.actor.mailbox.MailboxProtocol.UuidProtocol internalGetResult() {
- return result;
+ protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
+ internalGetFieldAccessorTable() {
+ return akka.actor.mailbox.MailboxProtocol.internal_static_UuidProtocol_fieldAccessorTable;
+ }
+
+ // Construct using akka.actor.mailbox.MailboxProtocol.UuidProtocol.newBuilder()
+ private Builder() {
+ maybeForceBuilderInitialization();
+ }
+
+ private Builder(BuilderParent parent) {
+ super(parent);
+ maybeForceBuilderInitialization();
+ }
+ private void maybeForceBuilderInitialization() {
+ if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) {
+ }
+ }
+ private static Builder create() {
+ return new Builder();
}
public Builder clear() {
- if (result == null) {
- throw new IllegalStateException(
- "Cannot call clear() after build().");
- }
- result = new akka.actor.mailbox.MailboxProtocol.UuidProtocol();
+ super.clear();
+ high_ = 0L;
+ bitField0_ = (bitField0_ & ~0x00000001);
+ low_ = 0L;
+ bitField0_ = (bitField0_ & ~0x00000002);
return this;
}
public Builder clone() {
- return create().mergeFrom(result);
+ return create().mergeFrom(buildPartial());
}
public com.google.protobuf.Descriptors.Descriptor
@@ -641,33 +959,39 @@ public final class MailboxProtocol {
return akka.actor.mailbox.MailboxProtocol.UuidProtocol.getDefaultInstance();
}
- public boolean isInitialized() {
- return result.isInitialized();
- }
public akka.actor.mailbox.MailboxProtocol.UuidProtocol build() {
- if (result != null && !isInitialized()) {
+ akka.actor.mailbox.MailboxProtocol.UuidProtocol result = buildPartial();
+ if (!result.isInitialized()) {
throw newUninitializedMessageException(result);
}
- return buildPartial();
+ return result;
}
private akka.actor.mailbox.MailboxProtocol.UuidProtocol buildParsed()
throws com.google.protobuf.InvalidProtocolBufferException {
- if (!isInitialized()) {
+ akka.actor.mailbox.MailboxProtocol.UuidProtocol result = buildPartial();
+ if (!result.isInitialized()) {
throw newUninitializedMessageException(
result).asInvalidProtocolBufferException();
}
- return buildPartial();
+ return result;
}
public akka.actor.mailbox.MailboxProtocol.UuidProtocol buildPartial() {
- if (result == null) {
- throw new IllegalStateException(
- "build() has already been called on this Builder.");
+ akka.actor.mailbox.MailboxProtocol.UuidProtocol result = new akka.actor.mailbox.MailboxProtocol.UuidProtocol(this);
+ int from_bitField0_ = bitField0_;
+ int to_bitField0_ = 0;
+ if (((from_bitField0_ & 0x00000001) == 0x00000001)) {
+ to_bitField0_ |= 0x00000001;
}
- akka.actor.mailbox.MailboxProtocol.UuidProtocol returnMe = result;
- result = null;
- return returnMe;
+ result.high_ = high_;
+ if (((from_bitField0_ & 0x00000002) == 0x00000002)) {
+ to_bitField0_ |= 0x00000002;
+ }
+ result.low_ = low_;
+ result.bitField0_ = to_bitField0_;
+ onBuilt();
+ return result;
}
public Builder mergeFrom(com.google.protobuf.Message other) {
@@ -691,6 +1015,18 @@ public final class MailboxProtocol {
return this;
}
+ public final boolean isInitialized() {
+ if (!hasHigh()) {
+
+ return false;
+ }
+ if (!hasLow()) {
+
+ return false;
+ }
+ return true;
+ }
+
public Builder mergeFrom(
com.google.protobuf.CodedInputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
@@ -703,61 +1039,72 @@ public final class MailboxProtocol {
switch (tag) {
case 0:
this.setUnknownFields(unknownFields.build());
+ onChanged();
return this;
default: {
if (!parseUnknownField(input, unknownFields,
extensionRegistry, tag)) {
this.setUnknownFields(unknownFields.build());
+ onChanged();
return this;
}
break;
}
case 8: {
- setHigh(input.readUInt64());
+ bitField0_ |= 0x00000001;
+ high_ = input.readUInt64();
break;
}
case 16: {
- setLow(input.readUInt64());
+ bitField0_ |= 0x00000002;
+ low_ = input.readUInt64();
break;
}
}
}
}
+ private int bitField0_;
// required uint64 high = 1;
+ private long high_ ;
public boolean hasHigh() {
- return result.hasHigh();
+ return ((bitField0_ & 0x00000001) == 0x00000001);
}
public long getHigh() {
- return result.getHigh();
+ return high_;
}
public Builder setHigh(long value) {
- result.hasHigh = true;
- result.high_ = value;
+ bitField0_ |= 0x00000001;
+ high_ = value;
+ onChanged();
return this;
}
public Builder clearHigh() {
- result.hasHigh = false;
- result.high_ = 0L;
+ bitField0_ = (bitField0_ & ~0x00000001);
+ high_ = 0L;
+ onChanged();
return this;
}
// required uint64 low = 2;
+ private long low_ ;
public boolean hasLow() {
- return result.hasLow();
+ return ((bitField0_ & 0x00000002) == 0x00000002);
}
public long getLow() {
- return result.getLow();
+ return low_;
}
public Builder setLow(long value) {
- result.hasLow = true;
- result.low_ = value;
+ bitField0_ |= 0x00000002;
+ low_ = value;
+ onChanged();
return this;
}
public Builder clearLow() {
- result.hasLow = false;
- result.low_ = 0L;
+ bitField0_ = (bitField0_ & ~0x00000002);
+ low_ = 0L;
+ onChanged();
return this;
}
@@ -766,7 +1113,6 @@ public final class MailboxProtocol {
static {
defaultInstance = new UuidProtocol(true);
- akka.actor.mailbox.MailboxProtocol.internalForceInit();
defaultInstance.initFields();
}
@@ -829,7 +1175,5 @@ public final class MailboxProtocol {
}, assigner);
}
- public static void internalForceInit() {}
-
// @@protoc_insertion_point(outer_class_scope)
}
diff --git a/akka-http/src/main/scala/akka/http/AkkaRestServlet.scala b/akka-http/src/main/scala/akka/http/AkkaRestServlet.scala
deleted file mode 100644
index 79b5337f93..0000000000
--- a/akka-http/src/main/scala/akka/http/AkkaRestServlet.scala
+++ /dev/null
@@ -1,38 +0,0 @@
-/**
- * Copyright (C) 2009-2011 Scalable Solutions AB
- */
-package akka.http
-
-import com.sun.jersey.spi.container.servlet.ServletContainer
-
-/**
- * This is just a simple wrapper on top of ServletContainer to inject some config from the akka.conf
- * If you were using akka.comet.AkkaServlet before, but only use it for Jersey, you should switch to this servlet instead
- */
-class AkkaRestServlet extends ServletContainer {
- import akka.config.Config.{ config ⇒ c }
-
- val initParams = new java.util.HashMap[String, String]
-
- addInitParameter("com.sun.jersey.config.property.packages", c.getList("akka.http.resource-packages").mkString(";"))
- addInitParameter("com.sun.jersey.spi.container.ResourceFilters", c.getList("akka.http.filters").mkString(","))
-
- /**
- * Provide a fallback for default values
- */
- override def getInitParameter(key: String) =
- Option(super.getInitParameter(key)).getOrElse(initParams get key)
-
- /**
- * Provide a fallback for default values
- */
- override def getInitParameterNames() = {
- import scala.collection.JavaConversions._
- initParams.keySet.iterator ++ super.getInitParameterNames
- }
-
- /**
- * Provide possibility to add config params
- */
- def addInitParameter(param: String, value: String): Unit = initParams.put(param, value)
-}
diff --git a/akka-http/src/main/scala/akka/http/Mist.scala b/akka-http/src/main/scala/akka/http/Mist.scala
index 423ad9b291..d5139795ba 100644
--- a/akka-http/src/main/scala/akka/http/Mist.scala
+++ b/akka-http/src/main/scala/akka/http/Mist.scala
@@ -11,6 +11,7 @@ import akka.config.ConfigurationException
import javax.servlet.http.{ HttpServletResponse, HttpServletRequest }
import javax.servlet.http.HttpServlet
import javax.servlet.Filter
+import java.lang.UnsupportedOperationException
/**
* @author Garrick Evans
@@ -71,27 +72,39 @@ trait Mist {
/**
* The root endpoint actor
*/
- protected val _root = Actor.registry.actorFor(RootActorID).getOrElse(
- throw new ConfigurationException("akka.http.root-actor-id configuration option does not have a valid actor address [" + RootActorID + "]"))
+ def root: ActorRef
/**
* Server-specific method factory
*/
- protected var _factory: Option[RequestMethodFactory] = None
+ protected var factory: Option[RequestMethodFactory] = None
/**
* Handles all servlet requests
*/
protected def mistify(request: HttpServletRequest,
- response: HttpServletResponse)(builder: (() ⇒ tAsyncRequestContext) ⇒ RequestMethod) = {
- def suspend: tAsyncRequestContext = {
+ response: HttpServletResponse) = {
+
+ val builder: (() ⇒ tAsyncRequestContext) ⇒ RequestMethod =
+ request.getMethod.toUpperCase match {
+ case "DELETE" ⇒ factory.get.Delete
+ case "GET" ⇒ factory.get.Get
+ case "HEAD" ⇒ factory.get.Head
+ case "OPTIONS" ⇒ factory.get.Options
+ case "POST" ⇒ factory.get.Post
+ case "PUT" ⇒ factory.get.Put
+ case "TRACE" ⇒ factory.get.Trace
+ case unknown ⇒ throw new UnsupportedOperationException(unknown)
+ }
+
+ def suspend(closeConnection: Boolean): tAsyncRequestContext = {
// set to right now, which is effectively "already expired"
response.setDateHeader("Expires", System.currentTimeMillis)
response.setHeader("Cache-Control", "no-cache, must-revalidate")
// no keep-alive?
- if (ConnectionClose) response.setHeader("Connection", "close")
+ if (closeConnection) response.setHeader("Connection", "close")
// suspend the request
// TODO: move this out to the specialized support if jetty asyncstart doesnt let us update TOs
@@ -100,8 +113,8 @@ trait Mist {
// shoot the message to the root endpoint for processing
// IMPORTANT: the suspend method is invoked on the server thread not in the actor
- val method = builder(suspend _)
- if (method.go) _root ! method
+ val method = builder(() ⇒ suspend(ConnectionClose))
+ if (method.go) root ! method
}
/**
@@ -111,7 +124,7 @@ trait Mist {
def initMist(context: ServletContext) {
val server = context.getServerInfo
val (major, minor) = (context.getMajorVersion, context.getMinorVersion)
- _factory = if (major >= 3) {
+ factory = if (major >= 3) {
Some(Servlet30ContextMethodFactory)
} else if (server.toLowerCase startsWith JettyServer) {
Some(JettyContinuationMethodFactory)
@@ -121,11 +134,23 @@ trait Mist {
}
}
+trait RootEndpointLocator {
+ var root: ActorRef = null
+
+ def configureRoot(address: String) {
+ def findRoot(address: String): ActorRef =
+ Actor.registry.actorFor(address).getOrElse(
+ throw new ConfigurationException("akka.http.root-actor-id configuration option does not have a valid actor address [" + address + "]"))
+
+ root = if ((address eq null) || address == "") findRoot(MistSettings.RootActorID) else findRoot(address)
+ }
+}
+
/**
* AkkaMistServlet adds support to bridge Http and Actors in an asynchronous fashion
* Async impls currently supported: Servlet3.0, Jetty Continuations
*/
-class AkkaMistServlet extends HttpServlet with Mist {
+class AkkaMistServlet extends HttpServlet with Mist with RootEndpointLocator {
import javax.servlet.{ ServletConfig }
/**
@@ -134,22 +159,17 @@ class AkkaMistServlet extends HttpServlet with Mist {
override def init(config: ServletConfig) {
super.init(config)
initMist(config.getServletContext)
+ configureRoot(config.getServletContext.getInitParameter("root-endpoint"))
}
- protected override def doDelete(req: HttpServletRequest, res: HttpServletResponse) = mistify(req, res)(_factory.get.Delete)
- protected override def doGet(req: HttpServletRequest, res: HttpServletResponse) = mistify(req, res)(_factory.get.Get)
- protected override def doHead(req: HttpServletRequest, res: HttpServletResponse) = mistify(req, res)(_factory.get.Head)
- protected override def doOptions(req: HttpServletRequest, res: HttpServletResponse) = mistify(req, res)(_factory.get.Options)
- protected override def doPost(req: HttpServletRequest, res: HttpServletResponse) = mistify(req, res)(_factory.get.Post)
- protected override def doPut(req: HttpServletRequest, res: HttpServletResponse) = mistify(req, res)(_factory.get.Put)
- protected override def doTrace(req: HttpServletRequest, res: HttpServletResponse) = mistify(req, res)(_factory.get.Trace)
+ protected override def service(req: HttpServletRequest, res: HttpServletResponse) = mistify(req, res)
}
/**
* Proof-of-concept, use at own risk
* Will be officially supported in a later release
*/
-class AkkaMistFilter extends Filter with Mist {
+class AkkaMistFilter extends Filter with Mist with RootEndpointLocator {
import javax.servlet.{ ServletRequest, ServletResponse, FilterConfig, FilterChain }
/**
@@ -157,6 +177,7 @@ class AkkaMistFilter extends Filter with Mist {
*/
def init(config: FilterConfig) {
initMist(config.getServletContext)
+ configureRoot(config.getServletContext.getInitParameter("root-endpoint"))
}
/**
@@ -165,16 +186,7 @@ class AkkaMistFilter extends Filter with Mist {
override def doFilter(req: ServletRequest, res: ServletResponse, chain: FilterChain) {
(req, res) match {
case (hreq: HttpServletRequest, hres: HttpServletResponse) ⇒
- hreq.getMethod.toUpperCase match {
- case "DELETE" ⇒ mistify(hreq, hres)(_factory.get.Delete)
- case "GET" ⇒ mistify(hreq, hres)(_factory.get.Get)
- case "HEAD" ⇒ mistify(hreq, hres)(_factory.get.Head)
- case "OPTIONS" ⇒ mistify(hreq, hres)(_factory.get.Options)
- case "POST" ⇒ mistify(hreq, hres)(_factory.get.Post)
- case "PUT" ⇒ mistify(hreq, hres)(_factory.get.Put)
- case "TRACE" ⇒ mistify(hreq, hres)(_factory.get.Trace)
- case unknown ⇒ {}
- }
+ mistify(hreq, hres)
chain.doFilter(req, res)
case _ ⇒ chain.doFilter(req, res)
}
@@ -276,7 +288,7 @@ class RootEndpoint extends Actor with Endpoint {
def recv: Receive = {
case NoneAvailable(uri, req) ⇒ _na(uri, req)
- case unknown ⇒ {}
+ case unknown ⇒
}
/**
@@ -329,24 +341,22 @@ trait RequestMethod {
def request = context.get.getRequest.asInstanceOf[HttpServletRequest]
def response = context.get.getResponse.asInstanceOf[HttpServletResponse]
- def getHeaderOrElse(name: String, default: Function[Any, String]): String =
+ def getHeaderOrElse(name: String, default: ⇒ String): String =
request.getHeader(name) match {
- case null ⇒ default(null)
+ case null ⇒ default
case s ⇒ s
}
- def getParameterOrElse(name: String, default: Function[Any, String]): String =
+ def getParameterOrElse(name: String, default: ⇒ String): String =
request.getParameter(name) match {
- case null ⇒ default(null)
+ case null ⇒ default
case s ⇒ s
}
- def complete(status: Int, body: String): Boolean = complete(status, body, Headers())
-
- def complete(status: Int, body: String, headers: Headers): Boolean =
+ def complete(status: Int, body: String, headers: Headers = Headers()): Boolean =
rawComplete { res ⇒
res.setStatus(status)
- headers foreach { h ⇒ response.setHeader(h._1, h._2) }
+ headers foreach { case (name, value) ⇒ response.setHeader(name, value) }
res.getWriter.write(body)
res.getWriter.close
res.flushBuffer
diff --git a/akka-http/src/main/scala/akka/http/Servlet30Context.scala b/akka-http/src/main/scala/akka/http/Servlet30Context.scala
index 2d354d9a10..cbafffaea3 100644
--- a/akka-http/src/main/scala/akka/http/Servlet30Context.scala
+++ b/akka-http/src/main/scala/akka/http/Servlet30Context.scala
@@ -45,8 +45,8 @@ trait Servlet30Context extends AsyncListener {
//
def onComplete(e: AsyncEvent) {}
def onError(e: AsyncEvent) = e.getThrowable match {
- case null ⇒ {}
- case t ⇒ {}
+ case null ⇒
+ case t ⇒ EventHandler.error(t, this, t.getMessage)
}
def onStartAsync(e: AsyncEvent) {}
def onTimeout(e: AsyncEvent) = {
diff --git a/akka-http/src/main/scala/akka/security/Security.scala b/akka-http/src/main/scala/akka/security/Security.scala
deleted file mode 100644
index 969ddeabc2..0000000000
--- a/akka-http/src/main/scala/akka/security/Security.scala
+++ /dev/null
@@ -1,563 +0,0 @@
-/*
- * Copyright 2007-2008 WorldWide Conferencing, LLC
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing,
- * software distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions
- * and limitations under the License.
- */
-
-/*
- * AKKA AAS (Authentication and Authorization Service)
- * Rework of lift's (www.liftweb.com) HTTP Authentication module
- * All cred to the Lift team (www.liftweb.com), especially David Pollak and Tim Perrett
- */
-
-package akka.security
-
-import akka.actor.{ Scheduler, Actor, ActorRef, IllegalActorStateException }
-import akka.event.EventHandler
-import akka.actor.Actor._
-import akka.config.{ Config, ConfigurationException }
-
-import com.sun.jersey.api.model.AbstractMethod
-import com.sun.jersey.spi.container.{ ResourceFilterFactory, ContainerRequest, ContainerRequestFilter, ContainerResponse, ContainerResponseFilter, ResourceFilter }
-import com.sun.jersey.core.util.Base64
-
-import javax.ws.rs.core.{ SecurityContext, Context, Response }
-import javax.ws.rs.WebApplicationException
-import javax.annotation.security.{ DenyAll, PermitAll, RolesAllowed }
-import java.security.Principal
-import java.util.concurrent.TimeUnit
-
-case object OK
-
-/**
- * Authenticate represents a message to authenticate a request
- */
-case class Authenticate(val req: ContainerRequest, val rolesAllowed: List[String])
-
-/**
- * User info represents a sign-on with associated credentials/roles
- */
-case class UserInfo(val username: String, val password: String, val roles: List[String])
-
-trait Credentials
-
-case class BasicCredentials(username: String, password: String) extends Credentials
-
-case class DigestCredentials(method: String,
- userName: String,
- realm: String,
- nonce: String,
- uri: String,
- qop: String,
- nc: String,
- cnonce: String,
- response: String,
- opaque: String) extends Credentials
-
-case class SpnegoCredentials(token: Array[Byte]) extends Credentials
-
-/**
- * Jersey Filter for invocation intercept and authorization/authentication
- */
-class AkkaSecurityFilterFactory extends ResourceFilterFactory {
- class Filter(actor: ActorRef, rolesAllowed: Option[List[String]])
- extends ResourceFilter with ContainerRequestFilter {
-
- override def getRequestFilter: ContainerRequestFilter = this
-
- override def getResponseFilter: ContainerResponseFilter = null
-
- /**
- * Here's where the magic happens. The request is authenticated by
- * sending a request for authentication to the configured authenticator actor
- */
- override def filter(request: ContainerRequest): ContainerRequest =
- rolesAllowed match {
- case Some(roles) ⇒ {
- val result = (authenticator !! Authenticate(request, roles)).as[AnyRef]
- result match {
- case Some(OK) ⇒ request
- case Some(r) if r.isInstanceOf[Response] ⇒
- throw new WebApplicationException(r.asInstanceOf[Response])
- case None ⇒ throw new WebApplicationException(408)
- case unknown ⇒ {
- throw new WebApplicationException(Response.Status.INTERNAL_SERVER_ERROR)
- }
- }
- }
- case None ⇒ throw new WebApplicationException(Response.Status.FORBIDDEN)
- }
- }
-
- lazy val authenticatorFQN = {
- val auth = Config.config.getString("akka.http.authenticator", "N/A")
- if (auth == "N/A") throw new IllegalActorStateException("The config option 'akka.http.authenticator' is not defined in 'akka.conf'")
- auth
- }
-
- /**
- * Currently we always take the first, since there usually should be at most one authentication actor, but a round-robin
- * strategy could be implemented in the future
- */
- def authenticator: ActorRef = Actor.registry.actorFor(authenticatorFQN)
- .getOrElse(throw new ConfigurationException(
- "akka.http.authenticator configuration option does not have a valid actor address [" + authenticatorFQN + "]"))
-
- def mkFilter(roles: Option[List[String]]): java.util.List[ResourceFilter] =
- java.util.Collections.singletonList(new Filter(authenticator, roles))
-
- /**
- * The create method is invoked for each resource, and we look for javax.annotation.security annotations
- * and create the appropriate Filter configurations for each.
- */
- override def create(am: AbstractMethod): java.util.List[ResourceFilter] = {
-
- //DenyAll takes precedence
- if (am.isAnnotationPresent(classOf[DenyAll]))
- return mkFilter(None)
-
- //Method-level RolesAllowed takes precedence
- val ra = am.getAnnotation(classOf[RolesAllowed])
-
- if (ra ne null)
- return mkFilter(Some(ra.value.toList))
-
- //PermitAll takes precedence over resource-level RolesAllowed annotation
- if (am.isAnnotationPresent(classOf[PermitAll]))
- return null;
-
- //Last but not least, the resource-level RolesAllowed
- val cra = am.getResource.getAnnotation(classOf[RolesAllowed])
- if (cra ne null)
- return mkFilter(Some(cra.value.toList))
-
- return null;
- }
-}
-
-/**
- * AuthenticationActor is the super-trait for actors doing Http authentication
- * It defines the common ground and the flow of execution
- */
-trait AuthenticationActor[C <: Credentials] extends Actor {
- type Req = ContainerRequest
-
- //What realm does the authentication use?
- def realm: String
-
- //Creates a response to signal unauthorized
- def unauthorized: Response
-
- //Used to extract information from the request, returns None if no credentials found
- def extractCredentials(r: Req): Option[C]
-
- //returns None is unverified
- def verify(c: Option[C]): Option[UserInfo]
-
- //Contruct a new SecurityContext from the supplied parameters
- def mkSecurityContext(r: Req, user: UserInfo): SecurityContext
-
- //This is the default security context factory
- def mkDefaultSecurityContext(r: Req, u: UserInfo, scheme: String): SecurityContext = {
- val n = u.username
- val p = new Principal { def getName = n }
-
- new SecurityContext {
- def getAuthenticationScheme = scheme
- def getUserPrincipal = p
- def isSecure = r.isSecure
- def isUserInRole(role: String) = u.roles.exists(_ == role)
- }
- }
-
- /**
- * Responsible for the execution flow of authentication
- *
- * Credentials are extracted and verified from the request,
- * and a security context is created for the ContainerRequest
- * this should ensure good integration with current Jersey security
- */
- protected val authenticate: Receive = {
- case Authenticate(req, roles) ⇒ {
- verify(extractCredentials(req)) match {
- case Some(u: UserInfo) ⇒ {
- req.setSecurityContext(mkSecurityContext(req, u))
- if (roles.exists(req.isUserInRole(_))) self.reply(OK)
- else self.reply(Response.status(Response.Status.FORBIDDEN).build)
- }
- case _ ⇒ self.reply(unauthorized)
- }
- }
- }
-
- def receive = authenticate
-
- //returns the string value of the "Authorization"-header of the request
- def auth(r: Req) = r.getHeaderValue("Authorization")
-
- //Turns the aforementioned header value into an option
- def authOption(r: Req): Option[String] = {
- val a = auth(r)
- if ((a ne null) && a.length > 0) Some(a) else None
- }
-}
-
-/**
- * This trait implements the logic for Http Basic authentication
- * mix this trait into a class to create an authenticator
- * Don't forget to set the authenticator FQN in the rest-part of the akka config
- */
-trait BasicAuthenticationActor extends AuthenticationActor[BasicCredentials] {
- override def unauthorized =
- Response.status(401).header("WWW-Authenticate", "Basic realm=\"" + realm + "\"").build
-
- override def extractCredentials(r: Req): Option[BasicCredentials] = {
- val Authorization = """(.*):(.*)""".r
-
- authOption(r) match {
- case Some(token) ⇒ {
- val authResponse = new String(Base64.decode(token.substring(6).getBytes))
- authResponse match {
- case Authorization(username, password) ⇒ Some(BasicCredentials(username, password))
- case _ ⇒ None
- }
- }
- case _ ⇒ None
- }
- }
-
- override def mkSecurityContext(r: Req, u: UserInfo): SecurityContext =
- mkDefaultSecurityContext(r, u, SecurityContext.BASIC_AUTH)
-}
-
-/**
- * This trait implements the logic for Http Digest authentication mix this trait into a
- * class to create an authenticator. Don't forget to set the authenticator FQN in the
- * rest-part of the akka config
- */
-trait DigestAuthenticationActor extends AuthenticationActor[DigestCredentials] {
- import LiftUtils._
-
- private object InvalidateNonces
-
- //Holds the generated nonces for the specified validity period
- val nonceMap = mkNonceMap
-
- //Discards old nonces
- protected val invalidateNonces: Receive = {
- case InvalidateNonces ⇒
- val ts = System.currentTimeMillis
- nonceMap.filter(tuple ⇒ (ts - tuple._2) < nonceValidityPeriod)
- case unknown ⇒ {}
- }
-
- //Schedule the invalidation of nonces
- Scheduler.schedule(self, InvalidateNonces, noncePurgeInterval, noncePurgeInterval, TimeUnit.MILLISECONDS)
-
- //authenticate or invalidate nonces
- override def receive = authenticate orElse invalidateNonces
-
- override def unauthorized: Response = {
- val nonce = randomString(64)
- nonceMap.put(nonce, System.currentTimeMillis)
- unauthorized(nonce, "auth", randomString(64))
- }
-
- def unauthorized(nonce: String, qop: String, opaque: String): Response = {
- Response.status(401).header(
- "WWW-Authenticate",
- "Digest realm=\"" + realm + "\", " +
- "qop=\"" + qop + "\", " +
- "nonce=\"" + nonce + "\", " +
- "opaque=\"" + opaque + "\"").build
- }
-
- //Tests wether the specified credentials are valid
- def validate(auth: DigestCredentials, user: UserInfo): Boolean = {
- def h(s: String) = hexEncode(md5(s.getBytes("UTF-8")))
-
- val ha1 = h(auth.userName + ":" + auth.realm + ":" + user.password)
- val ha2 = h(auth.method + ":" + auth.uri)
-
- val response = h(
- ha1 + ":" + auth.nonce + ":" +
- auth.nc + ":" + auth.cnonce + ":" +
- auth.qop + ":" + ha2)
-
- (response == auth.response) && (nonceMap.getOrElse(auth.nonce, -1) != -1)
- }
-
- override def verify(odc: Option[DigestCredentials]): Option[UserInfo] = odc match {
- case Some(dc) ⇒ {
- userInfo(dc.userName) match {
- case Some(u) if validate(dc, u) ⇒
- nonceMap.get(dc.nonce).map(t ⇒ (System.currentTimeMillis - t) < nonceValidityPeriod).map(_ ⇒ u)
- case _ ⇒ None
- }
- }
- case _ ⇒ None
- }
-
- override def extractCredentials(r: Req): Option[DigestCredentials] = {
- authOption(r).map(s ⇒ {
- val ? = splitNameValuePairs(s.substring(7, s.length))
- DigestCredentials(r.getMethod.toUpperCase,
- ?("username"), ?("realm"), ?("nonce"),
- ?("uri"), ?("qop"), ?("nc"),
- ?("cnonce"), ?("response"), ?("opaque"))
- })
- }
-
- override def mkSecurityContext(r: Req, u: UserInfo): SecurityContext =
- mkDefaultSecurityContext(r, u, SecurityContext.DIGEST_AUTH)
-
- //Mandatory overrides
- def userInfo(username: String): Option[UserInfo]
-
- def mkNonceMap: scala.collection.mutable.Map[String, Long]
-
- //Optional overrides
- def nonceValidityPeriod = 60 * 1000 //ms
- def noncePurgeInterval = 2 * 60 * 1000 //ms
-}
-
-import java.security.Principal
-import java.security.PrivilegedActionException
-import java.security.PrivilegedExceptionAction
-
-import javax.security.auth.login.AppConfigurationEntry
-import javax.security.auth.login.Configuration
-import javax.security.auth.login.LoginContext
-import javax.security.auth.Subject
-import javax.security.auth.kerberos.KerberosPrincipal
-
-import org.ietf.jgss.GSSContext
-import org.ietf.jgss.GSSCredential
-import org.ietf.jgss.GSSManager
-
-trait SpnegoAuthenticationActor extends AuthenticationActor[SpnegoCredentials] {
- override def unauthorized =
- Response.status(401).header("WWW-Authenticate", "Negotiate").build
-
- // for some reason the jersey Base64 class does not work with kerberos
- // but the commons Base64 does
- import org.apache.commons.codec.binary.Base64
- override def extractCredentials(r: Req): Option[SpnegoCredentials] = {
- val AuthHeader = """Negotiate\s(.*)""".r
-
- authOption(r) match {
- case Some(AuthHeader(token)) ⇒
- Some(SpnegoCredentials(Base64.decodeBase64(token.trim.getBytes)))
- case _ ⇒ None
- }
- }
-
- override def verify(odc: Option[SpnegoCredentials]): Option[UserInfo] = odc match {
- case Some(dc) ⇒ {
- try {
- val principal = Subject.doAs(this.serviceSubject, new KerberosValidateAction(dc.token));
- val user = stripRealmFrom(principal)
- Some(UserInfo(user, null, rolesFor(user)))
- } catch {
- case e: PrivilegedActionException ⇒ {
- EventHandler.error(e, this, e.getMessage)
- None
- }
- }
- }
- case _ ⇒ None
- }
-
- override def mkSecurityContext(r: Req, u: UserInfo): SecurityContext =
- mkDefaultSecurityContext(r, u, SecurityContext.CLIENT_CERT_AUTH) // the security context does not know about spnego/kerberos
- // not sure whether to use a constant from the security context or something like "SPNEGO/Kerberos"
-
- /**
- * returns the roles for the given user
- */
- def rolesFor(user: String): List[String]
-
- // Kerberos
-
- /**
- * strips the realm from a kerberos principal name, returning only the user part
- */
- private def stripRealmFrom(principal: String): String = principal.split("@")(0)
-
- /**
- * principal name for the HTTP kerberos service, i.e HTTP/ { server } @ { realm }
- */
- lazy val servicePrincipal = {
- val p = Config.config.getString("akka.http.kerberos.servicePrincipal", "N/A")
- if (p == "N/A") throw new IllegalActorStateException("The config option 'akka.http.kerberos.servicePrincipal' is not defined in 'akka.conf'")
- p
- }
-
- /**
- * keytab location with credentials for the service principal
- */
- lazy val keyTabLocation = {
- val p = Config.config.getString("akka.http.kerberos.keyTabLocation", "N/A")
- if (p == "N/A") throw new IllegalActorStateException("The config option 'akka.http.kerberos.keyTabLocation' is not defined in 'akka.conf'")
- p
- }
-
- lazy val kerberosDebug = {
- val p = Config.config.getString("akka.http.kerberos.kerberosDebug", "N/A")
- if (p == "N/A") throw new IllegalActorStateException("The config option 'akka.http.kerberos.kerberosDebug' is not defined in 'akka.conf'")
- p
- }
-
- /**
- * is not used by this authenticator, so accept an empty value
- */
- lazy val realm = Config.config.getString("akka.http.kerberos.realm", "")
-
- /**
- * verify the kerberos token from a client with the server
- */
- class KerberosValidateAction(kerberosTicket: Array[Byte]) extends PrivilegedExceptionAction[String] {
- def run = {
- val context = GSSManager.getInstance().createContext(null.asInstanceOf[GSSCredential])
- context.acceptSecContext(kerberosTicket, 0, kerberosTicket.length)
- val user = context.getSrcName().toString()
- context.dispose()
- user
- }
- }
-
- // service principal login to kerberos on startup
-
- val serviceSubject = servicePrincipalLogin
-
- /**
- * acquire an initial ticket from the kerberos server for the HTTP service
- */
- def servicePrincipalLogin = {
- val loginConfig = new LoginConfig(
- new java.net.URL(this.keyTabLocation).toExternalForm(),
- this.servicePrincipal,
- this.kerberosDebug)
- val princ = new java.util.HashSet[Principal](1)
- princ.add(new KerberosPrincipal(this.servicePrincipal))
- val sub = new Subject(false, princ, new java.util.HashSet[Object], new java.util.HashSet[Object])
- val lc = new LoginContext("", sub, null, loginConfig)
- lc.login()
- lc.getSubject()
- }
-
- /**
- * this class simulates a login-config.xml
- */
- class LoginConfig(keyTabLocation: String, servicePrincipal: String, debug: String) extends Configuration {
- override def getAppConfigurationEntry(name: String): Array[AppConfigurationEntry] = {
- val options = new java.util.HashMap[String, String]
- options.put("useKeyTab", "true")
- options.put("keyTab", this.keyTabLocation)
- options.put("principal", this.servicePrincipal)
- options.put("storeKey", "true")
- options.put("doNotPrompt", "true")
- options.put("isInitiator", "true")
- options.put("debug", debug)
-
- Array(new AppConfigurationEntry(
- "com.sun.security.auth.module.Krb5LoginModule",
- AppConfigurationEntry.LoginModuleControlFlag.REQUIRED,
- options))
- }
- }
-
-}
-
-/*
-* Copyright 2006-2010 WorldWide Conferencing, LLC
-*
-* Licensed under the Apache License, Version 2.0 (the "License");
-* you may not use this file except in compliance with the License.
-* You may obtain a copy of the License at
-*
-* http://www.apache.org/licenses/LICENSE-2.0
-*
-* Unless required by applicable law or agreed to in writing, software
-* distributed under the License is distributed on an "AS IS" BASIS,
-* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-* See the License for the specific language governing permissions and
-* limitations under the License.
-*/
-object LiftUtils {
- import java.security.{ MessageDigest, SecureRandom }
- val random = new SecureRandom()
-
- def md5(in: Array[Byte]): Array[Byte] = (MessageDigest.getInstance("MD5")).digest(in)
-
- /**
- * Create a random string of a given size
- * @param size size of the string to create. Must be a positive or nul integer
- * @return the generated string
- */
- def randomString(size: Int): String = {
- def addChar(pos: Int, lastRand: Int, sb: StringBuilder): StringBuilder = {
- if (pos >= size) sb
- else {
- val randNum = if ((pos % 6) == 0) random.nextInt else lastRand
- sb.append((randNum & 0x1f) match {
- case n if n < 26 ⇒ ('A' + n).toChar
- case n ⇒ ('0' + (n - 26)).toChar
- })
- addChar(pos + 1, randNum >> 5, sb)
- }
- }
- addChar(0, 0, new StringBuilder(size)).toString
- }
-
- /** encode a Byte array as hexadecimal characters */
- def hexEncode(in: Array[Byte]): String = {
- val sb = new StringBuilder
- val len = in.length
- def addDigit(in: Array[Byte], pos: Int, len: Int, sb: StringBuilder) {
- if (pos < len) {
- val b: Int = in(pos)
- val msb = (b & 0xf0) >> 4
- val lsb = (b & 0x0f)
- sb.append((if (msb < 10) ('0' + msb).asInstanceOf[Char] else ('a' + (msb - 10)).asInstanceOf[Char]))
- sb.append((if (lsb < 10) ('0' + lsb).asInstanceOf[Char] else ('a' + (lsb - 10)).asInstanceOf[Char]))
- addDigit(in, pos + 1, len, sb)
- }
- }
- addDigit(in, 0, len, sb)
- sb.toString
- }
-
- /**
- * Splits a string of the form <name1=value1, name2=value2, ... > and unquotes the quoted values.
- * The result is a Map[String, String]
- */
- def splitNameValuePairs(props: String): Map[String, String] = {
- /**
- * If str is surrounded by quotes it return the content between the quotes
- */
- def unquote(str: String) = {
- if ((str ne null) && str.length >= 2 && str.charAt(0) == '\"' && str.charAt(str.length - 1) == '\"')
- str.substring(1, str.length - 1)
- else
- str
- }
-
- val list = props.split(",").toList.map(in ⇒ {
- val pair = in match { case null ⇒ Nil case s ⇒ s.split("=").toList.map(_.trim).filter(_.length > 0) }
- (pair(0), unquote(pair(1)))
- })
- val map: Map[String, String] = Map.empty
- (map /: list)((m, next) ⇒ m + (next))
- }
-}
diff --git a/akka-http/src/test/scala/AllTest.scala b/akka-http/src/test/scala/AllTest.scala
deleted file mode 100644
index 0b473507dd..0000000000
--- a/akka-http/src/test/scala/AllTest.scala
+++ /dev/null
@@ -1,15 +0,0 @@
-package akka.security
-
-import junit.framework.Test
-import junit.framework.TestCase
-import junit.framework.TestSuite
-
-object AllTest extends TestCase {
- def suite(): Test = {
- val suite = new TestSuite("All Scala tests")
- suite.addTestSuite(classOf[BasicAuthenticatorSpec])
- suite
- }
-
- def main(args: Array[String]) = junit.textui.TestRunner.run(suite)
-}
diff --git a/akka-http/src/test/scala/SecuritySpec.scala b/akka-http/src/test/scala/SecuritySpec.scala
deleted file mode 100644
index edf7b2bac0..0000000000
--- a/akka-http/src/test/scala/SecuritySpec.scala
+++ /dev/null
@@ -1,82 +0,0 @@
-/**
- * Copyright (C) 2009-2011 Scalable Solutions AB
- */
-
-package akka.security
-
-import akka.config.Supervision._
-import akka.actor.Actor._
-
-import org.scalatest.Suite
-import org.scalatest.junit.JUnitSuite
-import org.scalatest.matchers.MustMatchers
-import org.scalatest.mock.MockitoSugar
-import org.mockito.Mockito._
-import org.mockito.Matchers._
-import org.junit.{ Before, After, Test }
-
-import javax.ws.rs.core.{ SecurityContext, Context, Response }
-import com.sun.jersey.spi.container.{ ResourceFilterFactory, ContainerRequest, ContainerRequestFilter, ContainerResponse, ContainerResponseFilter, ResourceFilter }
-import com.sun.jersey.core.util.Base64
-
-object BasicAuthenticatorSpec {
- class BasicAuthenticator extends BasicAuthenticationActor {
- def verify(odc: Option[BasicCredentials]): Option[UserInfo] = odc match {
- case Some(dc) ⇒ Some(UserInfo("foo", "bar", "ninja" :: "chef" :: Nil))
- case _ ⇒ None
- }
- override def realm = "test"
- }
-}
-
-class BasicAuthenticatorSpec extends junit.framework.TestCase
- with Suite with MockitoSugar with MustMatchers {
- import BasicAuthenticatorSpec._
-
- val authenticator = actorOf[BasicAuthenticator]
- authenticator.start()
-
- @Test
- def testChallenge = {
- val req = mock[ContainerRequest]
-
- val result = (authenticator !! (Authenticate(req, List("foo")), 10000)).as[Response].get
-
- // the actor replies with a challenge for the browser
- result.getStatus must equal(Response.Status.UNAUTHORIZED.getStatusCode)
- result.getMetadata.get("WWW-Authenticate").get(0).toString must startWith("Basic")
- }
-
- @Test
- def testAuthenticationSuccess = {
- val req = mock[ContainerRequest]
- // fake a basic auth header -> this will authenticate the user
- when(req.getHeaderValue("Authorization")).thenReturn("Basic " + new String(Base64.encode("foo:bar")))
-
- // fake a request authorization -> this will authorize the user
- when(req.isUserInRole("chef")).thenReturn(true)
-
- val result = (authenticator !! (Authenticate(req, List("chef")), 10000)).as[AnyRef].get
-
- result must be(OK)
- // the authenticator must have set a security context
- verify(req).setSecurityContext(any[SecurityContext])
- }
-
- @Test
- def testUnauthorized = {
- val req = mock[ContainerRequest]
-
- // fake a basic auth header -> this will authenticate the user
- when(req.getHeaderValue("Authorization")).thenReturn("Basic " + new String(Base64.encode("foo:bar")))
- when(req.isUserInRole("chef")).thenReturn(false) // this will deny access
-
- val result = (authenticator !! (Authenticate(req, List("chef")), 10000)).as[Response].get
-
- result.getStatus must equal(Response.Status.FORBIDDEN.getStatusCode)
-
- // the authenticator must have set a security context
- verify(req).setSecurityContext(any[SecurityContext])
- }
-}
-
diff --git a/akka-http/src/test/scala/config/ConfigSpec.scala b/akka-http/src/test/scala/config/ConfigSpec.scala
index 2b21f3cc34..fe4ad0c2f9 100644
--- a/akka-http/src/test/scala/config/ConfigSpec.scala
+++ b/akka-http/src/test/scala/config/ConfigSpec.scala
@@ -16,17 +16,10 @@ class ConfigSpec extends WordSpec with MustMatchers {
"contain all configuration properties for akka-http that are used in code with their correct defaults" in {
import Config.config._
- getString("akka.http.authenticator") must equal(Some("N/A"))
getBool("akka.http.connection-close") must equal(Some(true))
getString("akka.http.expired-header-name") must equal(Some("Async-Timeout"))
- getList("akka.http.filters") must equal(List("akka.security.AkkaSecurityFilterFactory"))
- getList("akka.http.resource-packages") must equal(Nil)
getString("akka.http.hostname") must equal(Some("localhost"))
getString("akka.http.expired-header-value") must equal(Some("expired"))
- getString("akka.http.kerberos.servicePrincipal") must equal(Some("N/A"))
- getString("akka.http.kerberos.keyTabLocation") must equal(Some("N/A"))
- getString("akka.http.kerberos.kerberosDebug") must equal(Some("N/A"))
- getString("akka.http.kerberos.realm") must equal(Some(""))
getInt("akka.http.port") must equal(Some(9998))
getBool("akka.http.root-actor-builtin") must equal(Some(true))
getString("akka.http.root-actor-id") must equal(Some("_httproot"))
diff --git a/akka-remote/src/main/java/akka/remote/protocol/RemoteProtocol.java b/akka-remote/src/main/java/akka/remote/protocol/RemoteProtocol.java
index 4fd11c3cbe..8f6c5409cf 100644
--- a/akka-remote/src/main/java/akka/remote/protocol/RemoteProtocol.java
+++ b/akka-remote/src/main/java/akka/remote/protocol/RemoteProtocol.java
@@ -75,6 +75,142 @@ public final class RemoteProtocol {
// @@protoc_insertion_point(enum_scope:CommandType)
}
+ public enum ReplicationStorageType
+ implements com.google.protobuf.ProtocolMessageEnum {
+ TRANSIENT(0, 1),
+ TRANSACTION_LOG(1, 2),
+ DATA_GRID(2, 3),
+ ;
+
+
+ public final int getNumber() { return value; }
+
+ public static ReplicationStorageType valueOf(int value) {
+ switch (value) {
+ case 1: return TRANSIENT;
+ case 2: return TRANSACTION_LOG;
+ case 3: return DATA_GRID;
+ default: return null;
+ }
+ }
+
+ public static com.google.protobuf.Internal.EnumLiteMap
+ internalGetValueMap() {
+ return internalValueMap;
+ }
+ private static com.google.protobuf.Internal.EnumLiteMap
+ internalValueMap =
+ new com.google.protobuf.Internal.EnumLiteMap() {
+ public ReplicationStorageType findValueByNumber(int number) {
+ return ReplicationStorageType.valueOf(number)
+ ; }
+ };
+
+ public final com.google.protobuf.Descriptors.EnumValueDescriptor
+ getValueDescriptor() {
+ return getDescriptor().getValues().get(index);
+ }
+ public final com.google.protobuf.Descriptors.EnumDescriptor
+ getDescriptorForType() {
+ return getDescriptor();
+ }
+ public static final com.google.protobuf.Descriptors.EnumDescriptor
+ getDescriptor() {
+ return akka.remote.protocol.RemoteProtocol.getDescriptor().getEnumTypes().get(1);
+ }
+
+ private static final ReplicationStorageType[] VALUES = {
+ TRANSIENT, TRANSACTION_LOG, DATA_GRID,
+ };
+ public static ReplicationStorageType valueOf(
+ com.google.protobuf.Descriptors.EnumValueDescriptor desc) {
+ if (desc.getType() != getDescriptor()) {
+ throw new java.lang.IllegalArgumentException(
+ "EnumValueDescriptor is not for this type.");
+ }
+ return VALUES[desc.getIndex()];
+ }
+ private final int index;
+ private final int value;
+ private ReplicationStorageType(int index, int value) {
+ this.index = index;
+ this.value = value;
+ }
+
+ static {
+ akka.remote.protocol.RemoteProtocol.getDescriptor();
+ }
+
+ // @@protoc_insertion_point(enum_scope:ReplicationStorageType)
+ }
+
+ public enum ReplicationStrategyType
+ implements com.google.protobuf.ProtocolMessageEnum {
+ WRITE_THROUGH(0, 1),
+ WRITE_BEHIND(1, 2),
+ ;
+
+
+ public final int getNumber() { return value; }
+
+ public static ReplicationStrategyType valueOf(int value) {
+ switch (value) {
+ case 1: return WRITE_THROUGH;
+ case 2: return WRITE_BEHIND;
+ default: return null;
+ }
+ }
+
+ public static com.google.protobuf.Internal.EnumLiteMap
+ internalGetValueMap() {
+ return internalValueMap;
+ }
+ private static com.google.protobuf.Internal.EnumLiteMap
+ internalValueMap =
+ new com.google.protobuf.Internal.EnumLiteMap() {
+ public ReplicationStrategyType findValueByNumber(int number) {
+ return ReplicationStrategyType.valueOf(number)
+ ; }
+ };
+
+ public final com.google.protobuf.Descriptors.EnumValueDescriptor
+ getValueDescriptor() {
+ return getDescriptor().getValues().get(index);
+ }
+ public final com.google.protobuf.Descriptors.EnumDescriptor
+ getDescriptorForType() {
+ return getDescriptor();
+ }
+ public static final com.google.protobuf.Descriptors.EnumDescriptor
+ getDescriptor() {
+ return akka.remote.protocol.RemoteProtocol.getDescriptor().getEnumTypes().get(2);
+ }
+
+ private static final ReplicationStrategyType[] VALUES = {
+ WRITE_THROUGH, WRITE_BEHIND,
+ };
+ public static ReplicationStrategyType valueOf(
+ com.google.protobuf.Descriptors.EnumValueDescriptor desc) {
+ if (desc.getType() != getDescriptor()) {
+ throw new java.lang.IllegalArgumentException(
+ "EnumValueDescriptor is not for this type.");
+ }
+ return VALUES[desc.getIndex()];
+ }
+ private final int index;
+ private final int value;
+ private ReplicationStrategyType(int index, int value) {
+ this.index = index;
+ this.value = value;
+ }
+
+ static {
+ akka.remote.protocol.RemoteProtocol.getDescriptor();
+ }
+
+ // @@protoc_insertion_point(enum_scope:ReplicationStrategyType)
+ }
+
public enum SerializationSchemeType
implements com.google.protobuf.ProtocolMessageEnum {
JAVA(0, 1),
@@ -120,7 +256,7 @@ public final class RemoteProtocol {
}
public static final com.google.protobuf.Descriptors.EnumDescriptor
getDescriptor() {
- return akka.remote.protocol.RemoteProtocol.getDescriptor().getEnumTypes().get(1);
+ return akka.remote.protocol.RemoteProtocol.getDescriptor().getEnumTypes().get(3);
}
private static final SerializationSchemeType[] VALUES = {
@@ -187,7 +323,7 @@ public final class RemoteProtocol {
}
public static final com.google.protobuf.Descriptors.EnumDescriptor
getDescriptor() {
- return akka.remote.protocol.RemoteProtocol.getDescriptor().getEnumTypes().get(2);
+ return akka.remote.protocol.RemoteProtocol.getDescriptor().getEnumTypes().get(4);
}
private static final LifeCycleType[] VALUES = {
@@ -2172,8 +2308,22 @@ public final class RemoteProtocol {
public boolean hasHotswapStack() { return hasHotswapStack; }
public com.google.protobuf.ByteString getHotswapStack() { return hotswapStack_; }
- // repeated .RemoteMessageProtocol messages = 11;
- public static final int MESSAGES_FIELD_NUMBER = 11;
+ // optional .ReplicationStorageType replicationStorage = 11;
+ public static final int REPLICATIONSTORAGE_FIELD_NUMBER = 11;
+ private boolean hasReplicationStorage;
+ private akka.remote.protocol.RemoteProtocol.ReplicationStorageType replicationStorage_;
+ public boolean hasReplicationStorage() { return hasReplicationStorage; }
+ public akka.remote.protocol.RemoteProtocol.ReplicationStorageType getReplicationStorage() { return replicationStorage_; }
+
+ // optional .ReplicationStrategyType replicationStrategy = 12;
+ public static final int REPLICATIONSTRATEGY_FIELD_NUMBER = 12;
+ private boolean hasReplicationStrategy;
+ private akka.remote.protocol.RemoteProtocol.ReplicationStrategyType replicationStrategy_;
+ public boolean hasReplicationStrategy() { return hasReplicationStrategy; }
+ public akka.remote.protocol.RemoteProtocol.ReplicationStrategyType getReplicationStrategy() { return replicationStrategy_; }
+
+ // repeated .RemoteMessageProtocol messages = 13;
+ public static final int MESSAGES_FIELD_NUMBER = 13;
private java.util.List messages_ =
java.util.Collections.emptyList();
public java.util.List getMessagesList() {
@@ -2188,6 +2338,8 @@ public final class RemoteProtocol {
uuid_ = akka.remote.protocol.RemoteProtocol.UuidProtocol.getDefaultInstance();
lifeCycle_ = akka.remote.protocol.RemoteProtocol.LifeCycleProtocol.getDefaultInstance();
supervisor_ = akka.remote.protocol.RemoteProtocol.RemoteActorRefProtocol.getDefaultInstance();
+ replicationStorage_ = akka.remote.protocol.RemoteProtocol.ReplicationStorageType.TRANSIENT;
+ replicationStrategy_ = akka.remote.protocol.RemoteProtocol.ReplicationStrategyType.WRITE_THROUGH;
}
public final boolean isInitialized() {
if (!hasUuid) return false;
@@ -2239,8 +2391,14 @@ public final class RemoteProtocol {
if (hasHotswapStack()) {
output.writeBytes(10, getHotswapStack());
}
+ if (hasReplicationStorage()) {
+ output.writeEnum(11, getReplicationStorage().getNumber());
+ }
+ if (hasReplicationStrategy()) {
+ output.writeEnum(12, getReplicationStrategy().getNumber());
+ }
for (akka.remote.protocol.RemoteProtocol.RemoteMessageProtocol element : getMessagesList()) {
- output.writeMessage(11, element);
+ output.writeMessage(13, element);
}
getUnknownFields().writeTo(output);
}
@@ -2291,9 +2449,17 @@ public final class RemoteProtocol {
size += com.google.protobuf.CodedOutputStream
.computeBytesSize(10, getHotswapStack());
}
+ if (hasReplicationStorage()) {
+ size += com.google.protobuf.CodedOutputStream
+ .computeEnumSize(11, getReplicationStorage().getNumber());
+ }
+ if (hasReplicationStrategy()) {
+ size += com.google.protobuf.CodedOutputStream
+ .computeEnumSize(12, getReplicationStrategy().getNumber());
+ }
for (akka.remote.protocol.RemoteProtocol.RemoteMessageProtocol element : getMessagesList()) {
size += com.google.protobuf.CodedOutputStream
- .computeMessageSize(11, element);
+ .computeMessageSize(13, element);
}
size += getUnknownFields().getSerializedSize();
memoizedSerializedSize = size;
@@ -2487,6 +2653,12 @@ public final class RemoteProtocol {
if (other.hasHotswapStack()) {
setHotswapStack(other.getHotswapStack());
}
+ if (other.hasReplicationStorage()) {
+ setReplicationStorage(other.getReplicationStorage());
+ }
+ if (other.hasReplicationStrategy()) {
+ setReplicationStrategy(other.getReplicationStrategy());
+ }
if (!other.messages_.isEmpty()) {
if (result.messages_.isEmpty()) {
result.messages_ = new java.util.ArrayList();
@@ -2573,7 +2745,27 @@ public final class RemoteProtocol {
setHotswapStack(input.readBytes());
break;
}
- case 90: {
+ case 88: {
+ int rawValue = input.readEnum();
+ akka.remote.protocol.RemoteProtocol.ReplicationStorageType value = akka.remote.protocol.RemoteProtocol.ReplicationStorageType.valueOf(rawValue);
+ if (value == null) {
+ unknownFields.mergeVarintField(11, rawValue);
+ } else {
+ setReplicationStorage(value);
+ }
+ break;
+ }
+ case 96: {
+ int rawValue = input.readEnum();
+ akka.remote.protocol.RemoteProtocol.ReplicationStrategyType value = akka.remote.protocol.RemoteProtocol.ReplicationStrategyType.valueOf(rawValue);
+ if (value == null) {
+ unknownFields.mergeVarintField(12, rawValue);
+ } else {
+ setReplicationStrategy(value);
+ }
+ break;
+ }
+ case 106: {
akka.remote.protocol.RemoteProtocol.RemoteMessageProtocol.Builder subBuilder = akka.remote.protocol.RemoteProtocol.RemoteMessageProtocol.newBuilder();
input.readMessage(subBuilder, extensionRegistry);
addMessages(subBuilder.buildPartial());
@@ -2836,7 +3028,49 @@ public final class RemoteProtocol {
return this;
}
- // repeated .RemoteMessageProtocol messages = 11;
+ // optional .ReplicationStorageType replicationStorage = 11;
+ public boolean hasReplicationStorage() {
+ return result.hasReplicationStorage();
+ }
+ public akka.remote.protocol.RemoteProtocol.ReplicationStorageType getReplicationStorage() {
+ return result.getReplicationStorage();
+ }
+ public Builder setReplicationStorage(akka.remote.protocol.RemoteProtocol.ReplicationStorageType value) {
+ if (value == null) {
+ throw new NullPointerException();
+ }
+ result.hasReplicationStorage = true;
+ result.replicationStorage_ = value;
+ return this;
+ }
+ public Builder clearReplicationStorage() {
+ result.hasReplicationStorage = false;
+ result.replicationStorage_ = akka.remote.protocol.RemoteProtocol.ReplicationStorageType.TRANSIENT;
+ return this;
+ }
+
+ // optional .ReplicationStrategyType replicationStrategy = 12;
+ public boolean hasReplicationStrategy() {
+ return result.hasReplicationStrategy();
+ }
+ public akka.remote.protocol.RemoteProtocol.ReplicationStrategyType getReplicationStrategy() {
+ return result.getReplicationStrategy();
+ }
+ public Builder setReplicationStrategy(akka.remote.protocol.RemoteProtocol.ReplicationStrategyType value) {
+ if (value == null) {
+ throw new NullPointerException();
+ }
+ result.hasReplicationStrategy = true;
+ result.replicationStrategy_ = value;
+ return this;
+ }
+ public Builder clearReplicationStrategy() {
+ result.hasReplicationStrategy = false;
+ result.replicationStrategy_ = akka.remote.protocol.RemoteProtocol.ReplicationStrategyType.WRITE_THROUGH;
+ return this;
+ }
+
+ // repeated .RemoteMessageProtocol messages = 13;
public java.util.List getMessagesList() {
return java.util.Collections.unmodifiableList(result.messages_);
}
@@ -5675,33 +5909,39 @@ public final class RemoteProtocol {
"\013commandType\030\002 \002(\0162\014.CommandType\"U\n\026Remo" +
"teActorRefProtocol\022\017\n\007address\030\001 \002(\t\022\031\n\021i" +
"netSocketAddress\030\002 \002(\014\022\017\n\007timeout\030\003 \001(\004\"" +
- "\323\002\n\032SerializedActorRefProtocol\022\033\n\004uuid\030\001" +
+ "\277\003\n\032SerializedActorRefProtocol\022\033\n\004uuid\030\001" +
" \002(\0132\r.UuidProtocol\022\017\n\007address\030\002 \002(\t\022\026\n\016" +
"actorClassname\030\003 \002(\t\022\025\n\ractorInstance\030\004 " +
"\001(\014\022\033\n\023serializerClassname\030\005 \001(\t\022\017\n\007time" +
"out\030\006 \001(\004\022\026\n\016receiveTimeout\030\007 \001(\004\022%\n\tlif",
"eCycle\030\010 \001(\0132\022.LifeCycleProtocol\022+\n\nsupe" +
"rvisor\030\t \001(\0132\027.RemoteActorRefProtocol\022\024\n" +
- "\014hotswapStack\030\n \001(\014\022(\n\010messages\030\013 \003(\0132\026." +
- "RemoteMessageProtocol\"g\n\037SerializedTyped" +
- "ActorRefProtocol\022-\n\010actorRef\030\001 \002(\0132\033.Ser" +
- "ializedActorRefProtocol\022\025\n\rinterfaceName" +
- "\030\002 \002(\t\";\n\017MessageProtocol\022\017\n\007message\030\001 \002" +
- "(\014\022\027\n\017messageManifest\030\002 \001(\014\"R\n\021ActorInfo" +
- "Protocol\022\033\n\004uuid\030\001 \002(\0132\r.UuidProtocol\022\017\n" +
- "\007timeout\030\002 \002(\004\022\017\n\007address\030\003 \001(\t\")\n\014UuidP",
- "rotocol\022\014\n\004high\030\001 \002(\004\022\013\n\003low\030\002 \002(\004\"3\n\025Me" +
- "tadataEntryProtocol\022\013\n\003key\030\001 \002(\t\022\r\n\005valu" +
- "e\030\002 \002(\014\"6\n\021LifeCycleProtocol\022!\n\tlifeCycl" +
- "e\030\001 \002(\0162\016.LifeCycleType\"1\n\017AddressProtoc" +
- "ol\022\020\n\010hostname\030\001 \002(\t\022\014\n\004port\030\002 \002(\r\"7\n\021Ex" +
- "ceptionProtocol\022\021\n\tclassname\030\001 \002(\t\022\017\n\007me" +
- "ssage\030\002 \002(\t*(\n\013CommandType\022\013\n\007CONNECT\020\001\022" +
- "\014\n\010SHUTDOWN\020\002*]\n\027SerializationSchemeType" +
- "\022\010\n\004JAVA\020\001\022\013\n\007SBINARY\020\002\022\016\n\nSCALA_JSON\020\003\022" +
- "\r\n\tJAVA_JSON\020\004\022\014\n\010PROTOBUF\020\005*-\n\rLifeCycl",
- "eType\022\r\n\tPERMANENT\020\001\022\r\n\tTEMPORARY\020\002B\030\n\024a" +
- "kka.remote.protocolH\001"
+ "\014hotswapStack\030\n \001(\014\0223\n\022replicationStorag" +
+ "e\030\013 \001(\0162\027.ReplicationStorageType\0225\n\023repl" +
+ "icationStrategy\030\014 \001(\0162\030.ReplicationStrat" +
+ "egyType\022(\n\010messages\030\r \003(\0132\026.RemoteMessag" +
+ "eProtocol\"g\n\037SerializedTypedActorRefProt" +
+ "ocol\022-\n\010actorRef\030\001 \002(\0132\033.SerializedActor" +
+ "RefProtocol\022\025\n\rinterfaceName\030\002 \002(\t\";\n\017Me" +
+ "ssageProtocol\022\017\n\007message\030\001 \002(\014\022\027\n\017messag",
+ "eManifest\030\002 \001(\014\"R\n\021ActorInfoProtocol\022\033\n\004" +
+ "uuid\030\001 \002(\0132\r.UuidProtocol\022\017\n\007timeout\030\002 \002" +
+ "(\004\022\017\n\007address\030\003 \001(\t\")\n\014UuidProtocol\022\014\n\004h" +
+ "igh\030\001 \002(\004\022\013\n\003low\030\002 \002(\004\"3\n\025MetadataEntryP" +
+ "rotocol\022\013\n\003key\030\001 \002(\t\022\r\n\005value\030\002 \002(\014\"6\n\021L" +
+ "ifeCycleProtocol\022!\n\tlifeCycle\030\001 \002(\0162\016.Li" +
+ "feCycleType\"1\n\017AddressProtocol\022\020\n\010hostna" +
+ "me\030\001 \002(\t\022\014\n\004port\030\002 \002(\r\"7\n\021ExceptionProto" +
+ "col\022\021\n\tclassname\030\001 \002(\t\022\017\n\007message\030\002 \002(\t*" +
+ "(\n\013CommandType\022\013\n\007CONNECT\020\001\022\014\n\010SHUTDOWN\020",
+ "\002*K\n\026ReplicationStorageType\022\r\n\tTRANSIENT" +
+ "\020\001\022\023\n\017TRANSACTION_LOG\020\002\022\r\n\tDATA_GRID\020\003*>" +
+ "\n\027ReplicationStrategyType\022\021\n\rWRITE_THROU" +
+ "GH\020\001\022\020\n\014WRITE_BEHIND\020\002*]\n\027SerializationS" +
+ "chemeType\022\010\n\004JAVA\020\001\022\013\n\007SBINARY\020\002\022\016\n\nSCAL" +
+ "A_JSON\020\003\022\r\n\tJAVA_JSON\020\004\022\014\n\010PROTOBUF\020\005*-\n" +
+ "\rLifeCycleType\022\r\n\tPERMANENT\020\001\022\r\n\tTEMPORA" +
+ "RY\020\002B\030\n\024akka.remote.protocolH\001"
};
com.google.protobuf.Descriptors.FileDescriptor.InternalDescriptorAssigner assigner =
new com.google.protobuf.Descriptors.FileDescriptor.InternalDescriptorAssigner() {
@@ -5745,7 +5985,7 @@ public final class RemoteProtocol {
internal_static_SerializedActorRefProtocol_fieldAccessorTable = new
com.google.protobuf.GeneratedMessage.FieldAccessorTable(
internal_static_SerializedActorRefProtocol_descriptor,
- new java.lang.String[] { "Uuid", "Address", "ActorClassname", "ActorInstance", "SerializerClassname", "Timeout", "ReceiveTimeout", "LifeCycle", "Supervisor", "HotswapStack", "Messages", },
+ new java.lang.String[] { "Uuid", "Address", "ActorClassname", "ActorInstance", "SerializerClassname", "Timeout", "ReceiveTimeout", "LifeCycle", "Supervisor", "HotswapStack", "ReplicationStorage", "ReplicationStrategy", "Messages", },
akka.remote.protocol.RemoteProtocol.SerializedActorRefProtocol.class,
akka.remote.protocol.RemoteProtocol.SerializedActorRefProtocol.Builder.class);
internal_static_SerializedTypedActorRefProtocol_descriptor =
diff --git a/akka-remote/src/main/protocol/RemoteProtocol.proto b/akka-remote/src/main/protocol/RemoteProtocol.proto
index 2a25ff6e0e..be1020fe7e 100644
--- a/akka-remote/src/main/protocol/RemoteProtocol.proto
+++ b/akka-remote/src/main/protocol/RemoteProtocol.proto
@@ -46,6 +46,23 @@ enum CommandType {
SHUTDOWN = 2;
}
+/**
+ * Defines the type of the ReplicationStorage
+ */
+enum ReplicationStorageType {
+ TRANSIENT = 1;
+ TRANSACTION_LOG = 2;
+ DATA_GRID = 3;
+}
+
+/**
+ * Defines the type of the ReplicationStrategy
+ */
+enum ReplicationStrategyType {
+ WRITE_THROUGH = 1;
+ WRITE_BEHIND = 2;
+}
+
/**
* Defines a remote ActorRef that "remembers" and uses its original Actor instance
* on the original node.
@@ -72,7 +89,9 @@ message SerializedActorRefProtocol {
optional LifeCycleProtocol lifeCycle = 8;
optional RemoteActorRefProtocol supervisor = 9;
optional bytes hotswapStack = 10;
- repeated RemoteMessageProtocol messages = 11;
+ optional ReplicationStorageType replicationStorage = 11;
+ optional ReplicationStrategyType replicationStrategy = 12;
+ repeated RemoteMessageProtocol messages = 13;
}
/**
diff --git a/akka-remote/src/main/scala/akka/remote/RemoteConfig.scala b/akka-remote/src/main/scala/akka/remote/RemoteConfig.scala
index 0708cffae7..d6803013f2 100644
--- a/akka-remote/src/main/scala/akka/remote/RemoteConfig.scala
+++ b/akka-remote/src/main/scala/akka/remote/RemoteConfig.scala
@@ -9,59 +9,58 @@ import akka.config.Config._
import akka.config.ConfigurationException
object RemoteClientSettings {
- val SECURE_COOKIE: Option[String] = config.getString("akka.remote.secure-cookie", "") match {
+ val SECURE_COOKIE: Option[String] = config.getString("akka.cluster.secure-cookie", "") match {
case "" ⇒ None
case cookie ⇒ Some(cookie)
}
- val RECONNECTION_TIME_WINDOW = Duration(config.getInt("akka.remote.client.reconnection-time-window", 600), TIME_UNIT).toMillis
- val READ_TIMEOUT = Duration(config.getInt("akka.remote.client.read-timeout", 10), TIME_UNIT)
- val RECONNECT_DELAY = Duration(config.getInt("akka.remote.client.reconnect-delay", 5), TIME_UNIT)
- val REAP_FUTURES_DELAY = Duration(config.getInt("akka.remote.client.reap-futures-delay", 5), TIME_UNIT)
- val MESSAGE_FRAME_SIZE = config.getInt("akka.remote.client.message-frame-size", 1048576)
+ val RECONNECTION_TIME_WINDOW = Duration(config.getInt("akka.cluster.client.reconnection-time-window", 600), TIME_UNIT).toMillis
+ val READ_TIMEOUT = Duration(config.getInt("akka.cluster.client.read-timeout", 10), TIME_UNIT)
+ val RECONNECT_DELAY = Duration(config.getInt("akka.cluster.client.reconnect-delay", 5), TIME_UNIT)
+ val REAP_FUTURES_DELAY = Duration(config.getInt("akka.cluster.client.reap-futures-delay", 5), TIME_UNIT)
+ val MESSAGE_FRAME_SIZE = config.getInt("akka.cluster.client.message-frame-size", 1048576)
}
object RemoteServerSettings {
- val isRemotingEnabled = config.getList("akka.enabled-modules").exists(_ == "remote")
- val MESSAGE_FRAME_SIZE = config.getInt("akka.remote.server.message-frame-size", 1048576)
- val SECURE_COOKIE = config.getString("akka.remote.secure-cookie")
+ val isRemotingEnabled = config.getList("akka.enabled-modules").exists(_ == "cluster")
+ val MESSAGE_FRAME_SIZE = config.getInt("akka.cluster.server.message-frame-size", 1048576)
+ val SECURE_COOKIE = config.getString("akka.cluster.secure-cookie")
val REQUIRE_COOKIE = {
- val requireCookie = config.getBool("akka.remote.server.require-cookie", false)
+ val requireCookie = config.getBool("akka.cluster.server.require-cookie", false)
if (isRemotingEnabled && requireCookie && SECURE_COOKIE.isEmpty) throw new ConfigurationException(
- "Configuration option 'akka.remote.server.require-cookie' is turned on but no secure cookie is defined in 'akka.remote.secure-cookie'.")
+ "Configuration option 'akka.cluster.server.require-cookie' is turned on but no secure cookie is defined in 'akka.cluster.secure-cookie'.")
requireCookie
}
- val UNTRUSTED_MODE = config.getBool("akka.remote.server.untrusted-mode", false)
- val HOSTNAME = config.getString("akka.remote.server.hostname", "localhost")
- val PORT = config.getInt("akka.remote.server.port", 2552)
- val CONNECTION_TIMEOUT_MILLIS = Duration(config.getInt("akka.remote.server.connection-timeout", 1), TIME_UNIT)
- val COMPRESSION_SCHEME = config.getString("akka.remote.compression-scheme", "zlib")
+ val UNTRUSTED_MODE = config.getBool("akka.cluster.server.untrusted-mode", false)
+ val PORT = config.getInt("akka.cluster.server.port", 2552)
+ val CONNECTION_TIMEOUT_MILLIS = Duration(config.getInt("akka.cluster.server.connection-timeout", 1), TIME_UNIT)
+ val COMPRESSION_SCHEME = config.getString("akka.cluster.compression-scheme", "zlib")
val ZLIB_COMPRESSION_LEVEL = {
- val level = config.getInt("akka.remote.zlib-compression-level", 6)
+ val level = config.getInt("akka.cluster.zlib-compression-level", 6)
if (level < 1 && level > 9) throw new IllegalArgumentException(
"zlib compression level has to be within 1-9, with 1 being fastest and 9 being the most compressed")
level
}
- val BACKLOG = config.getInt("akka.remote.server.backlog", 4096)
+ val BACKLOG = config.getInt("akka.cluster.server.backlog", 4096)
- val EXECUTION_POOL_KEEPALIVE = Duration(config.getInt("akka.remote.server.execution-pool-keepalive", 60), TIME_UNIT)
+ val EXECUTION_POOL_KEEPALIVE = Duration(config.getInt("akka.cluster.server.execution-pool-keepalive", 60), TIME_UNIT)
val EXECUTION_POOL_SIZE = {
- val sz = config.getInt("akka.remote.server.execution-pool-size", 16)
- if (sz < 1) throw new IllegalArgumentException("akka.remote.server.execution-pool-size is less than 1")
+ val sz = config.getInt("akka.cluster.server.execution-pool-size", 16)
+ if (sz < 1) throw new IllegalArgumentException("akka.cluster.server.execution-pool-size is less than 1")
sz
}
val MAX_CHANNEL_MEMORY_SIZE = {
- val sz = config.getInt("akka.remote.server.max-channel-memory-size", 0)
- if (sz < 0) throw new IllegalArgumentException("akka.remote.server.max-channel-memory-size is less than 0")
+ val sz = config.getInt("akka.cluster.server.max-channel-memory-size", 0)
+ if (sz < 0) throw new IllegalArgumentException("akka.cluster.server.max-channel-memory-size is less than 0")
sz
}
val MAX_TOTAL_MEMORY_SIZE = {
- val sz = config.getInt("akka.remote.server.max-total-memory-size", 0)
- if (sz < 0) throw new IllegalArgumentException("akka.remote.server.max-total-memory-size is less than 0")
+ val sz = config.getInt("akka.cluster.server.max-total-memory-size", 0)
+ if (sz < 0) throw new IllegalArgumentException("akka.cluster.server.max-total-memory-size is less than 0")
sz
}
}
diff --git a/akka-remote/src/main/scala/akka/remote/netty/NettyRemoteSupport.scala b/akka-remote/src/main/scala/akka/remote/netty/NettyRemoteSupport.scala
index dae668ca95..c503557640 100644
--- a/akka-remote/src/main/scala/akka/remote/netty/NettyRemoteSupport.scala
+++ b/akka-remote/src/main/scala/akka/remote/netty/NettyRemoteSupport.scala
@@ -148,8 +148,8 @@ abstract class RemoteClient private[akka] (
val module: NettyRemoteClientModule,
val remoteAddress: InetSocketAddress) {
- val useTransactionLog = config.getBool("akka.remote.client.buffering.retry-message-send-on-failure", true)
- val transactionLogCapacity = config.getInt("akka.remote.client.buffering.capacity", -1)
+ val useTransactionLog = config.getBool("akka.cluster.client.buffering.retry-message-send-on-failure", true)
+ val transactionLogCapacity = config.getInt("akka.cluster.client.buffering.capacity", -1)
val name = this.getClass.getSimpleName + "@" +
remoteAddress.getAddress.getHostAddress + "::" +
@@ -879,9 +879,13 @@ class RemoteServerHandler(
case _ ⇒ None
}
- private def handleRemoteMessageProtocol(request: RemoteMessageProtocol, channel: Channel) = {
+ private def handleRemoteMessageProtocol(request: RemoteMessageProtocol, channel: Channel) = try {
EventHandler.debug(this, "Received remote message [%s]".format(request))
dispatchToActor(request, channel)
+ } catch {
+ case e: Exception ⇒
+ server.notifyListeners(RemoteServerError(e, server))
+ EventHandler.error(e, this, e.getMessage)
}
private def dispatchToActor(request: RemoteMessageProtocol, channel: Channel) {
diff --git a/akka-remote/src/main/scala/akka/serialization/SerializationProtocol.scala b/akka-remote/src/main/scala/akka/serialization/SerializationProtocol.scala
index 5c0b563c15..8e832cd391 100644
--- a/akka-remote/src/main/scala/akka/serialization/SerializationProtocol.scala
+++ b/akka-remote/src/main/scala/akka/serialization/SerializationProtocol.scala
@@ -4,20 +4,21 @@
package akka.serialization
-import akka.dispatch.MessageInvocation
-import akka.remote.protocol.RemoteProtocol._
-import akka.remote.protocol.RemoteProtocol
-
import akka.config.Supervision._
import akka.actor.{ uuidFrom, newUuid }
import akka.actor._
+import DeploymentConfig._
+import akka.dispatch.MessageInvocation
+import akka.util.ReflectiveAccess
+import akka.remote.{ RemoteClientSettings, MessageSerializer }
+import akka.remote.protocol.RemoteProtocol
+import RemoteProtocol._
import scala.collection.immutable.Stack
-import com.google.protobuf.ByteString
-import akka.util.ReflectiveAccess
import java.net.InetSocketAddress
-import akka.remote.{ RemoteClientSettings, MessageSerializer }
+
+import com.google.protobuf.ByteString
/**
* Module for local actor serialization.
@@ -31,19 +32,30 @@ object ActorSerialization {
def fromBinary[T <: Actor](bytes: Array[Byte]): ActorRef =
fromBinaryToLocalActorRef(bytes, None)
- def toBinary[T <: Actor](a: ActorRef, serializeMailBox: Boolean = true): Array[Byte] =
- toSerializedActorRefProtocol(a, serializeMailBox).toByteArray
+ def toBinary[T <: Actor](
+ a: ActorRef,
+ serializeMailBox: Boolean = true,
+ replicationScheme: ReplicationScheme = Transient)(implicit format: Serializer): Array[Byte] =
+ toSerializedActorRefProtocol(a, format, serializeMailBox, replicationScheme).toByteArray
// wrapper for implicits to be used by Java
def fromBinaryJ[T <: Actor](bytes: Array[Byte]): ActorRef =
fromBinary(bytes)
// wrapper for implicits to be used by Java
- def toBinaryJ[T <: Actor](a: ActorRef, srlMailBox: Boolean = true): Array[Byte] =
- toBinary(a, srlMailBox)
+ def toBinaryJ[T <: Actor](
+ a: ActorRef,
+ format: Serializer,
+ srlMailBox: Boolean,
+ replicationScheme: ReplicationScheme): Array[Byte] =
+ toBinary(a, srlMailBox, replicationScheme)(format)
private[akka] def toSerializedActorRefProtocol[T <: Actor](
- actorRef: ActorRef, serializeMailBox: Boolean = true): SerializedActorRefProtocol = {
+ actorRef: ActorRef,
+ format: Serializer,
+ serializeMailBox: Boolean,
+ replicationScheme: ReplicationScheme): SerializedActorRefProtocol = {
+
val lifeCycleProtocol: Option[LifeCycleProtocol] = {
actorRef.lifeCycle match {
case Permanent ⇒ Some(LifeCycleProtocol.newBuilder.setLifeCycle(LifeCycleType.PERMANENT).build)
@@ -58,6 +70,24 @@ object ActorSerialization {
.setActorClassname(actorRef.actorInstance.get.getClass.getName)
.setTimeout(actorRef.timeout)
+ replicationScheme match {
+ case _: Transient | Transient ⇒
+ builder.setReplicationStorage(ReplicationStorageType.TRANSIENT)
+
+ case Replication(storage, strategy) ⇒
+ val storageType = storage match {
+ case _: TransactionLog | TransactionLog ⇒ ReplicationStorageType.TRANSACTION_LOG
+ case _: DataGrid | DataGrid ⇒ ReplicationStorageType.DATA_GRID
+ }
+ builder.setReplicationStorage(storageType)
+
+ val strategyType = strategy match {
+ case _: WriteBehind | WriteBehind ⇒ ReplicationStrategyType.WRITE_BEHIND
+ case _: WriteThrough | WriteThrough ⇒ ReplicationStrategyType.WRITE_THROUGH
+ }
+ builder.setReplicationStrategy(strategyType)
+ }
+
if (serializeMailBox == true) {
if (actorRef.mailbox eq null) throw new IllegalActorStateException("Can't serialize an actor that has not been started.")
val messages =
@@ -120,6 +150,29 @@ object ActorSerialization {
if (protocol.hasSupervisor) Some(RemoteActorSerialization.fromProtobufToRemoteActorRef(protocol.getSupervisor, loader))
else None
+ import ReplicationStorageType._
+ import ReplicationStrategyType._
+
+ val replicationScheme =
+ if (protocol.hasReplicationStorage) {
+ protocol.getReplicationStorage match {
+ case TRANSIENT ⇒ Transient
+ case store ⇒
+ val storage = store match {
+ case TRANSACTION_LOG ⇒ TransactionLog
+ case DATA_GRID ⇒ DataGrid
+ }
+ val strategy = if (protocol.hasReplicationStrategy) {
+ protocol.getReplicationStrategy match {
+ case WRITE_THROUGH ⇒ WriteThrough
+ case WRITE_BEHIND ⇒ WriteBehind
+ }
+ } else throw new IllegalActorStateException(
+ "Expected replication strategy for replication storage [" + storage + "]")
+ Replication(storage, strategy)
+ }
+ } else Transient
+
val hotswap =
try {
Serialization.deserialize(protocol.getHotswapStack.toByteArray, classOf[Stack[PartialFunction[Any, Unit]]], loader) match {
@@ -133,7 +186,7 @@ object ActorSerialization {
case e: Exception ⇒ Stack[PartialFunction[Any, Unit]]()
}
- val classLoader = loader.getOrElse(getClass.getClassLoader)
+ val classLoader = loader.getOrElse(this.getClass.getClassLoader)
val factory = () ⇒ {
val actorClass = classLoader.loadClass(protocol.getActorClassname)
@@ -156,7 +209,8 @@ object ActorSerialization {
lifeCycle,
supervisor,
hotswap,
- factory)
+ factory,
+ replicationScheme)
val messages = protocol.getMessagesList.toArray.toList.asInstanceOf[List[RemoteMessageProtocol]]
messages.foreach(message ⇒ ar ! MessageSerializer.deserialize(message.getMessage, Some(classLoader)))
diff --git a/akka-slf4j/src/main/scala/akka/event/slf4j/SLF4J.scala b/akka-slf4j/src/main/scala/akka/event/slf4j/SLF4J.scala
index 7a1f9d2c5d..4ab3d1976e 100644
--- a/akka-slf4j/src/main/scala/akka/event/slf4j/SLF4J.scala
+++ b/akka-slf4j/src/main/scala/akka/event/slf4j/SLF4J.scala
@@ -39,7 +39,7 @@ class Slf4jEventHandler extends Actor with Logging {
def receive = {
case Error(cause, instance, message) ⇒
log.error("\n\t[{}]\n\t[{}]\n\t[{}]",
- Array[Any](instance.getClass.getName, message, stackTraceFor(cause)))
+ Array[AnyRef](instance.getClass.getName, message.asInstanceOf[AnyRef], stackTraceFor(cause)))
case Warning(instance, message) ⇒
log.warn("\n\t[{}]\n\t[{}]", instance.getClass.getName, message)
diff --git a/akka-spring/src/test/resources/property-config.xml b/akka-spring/src/test/resources/property-config.xml
index f199df7074..43d36852ed 100644
--- a/akka-spring/src/test/resources/property-config.xml
+++ b/akka-spring/src/test/resources/property-config.xml
@@ -15,7 +15,7 @@ http://akka.io/akka-2.0-SNAPSHOT.xsd">
-
+
diff --git a/akka-testkit/src/main/scala/akka/testkit/CallingThreadDispatcher.scala b/akka-testkit/src/main/scala/akka/testkit/CallingThreadDispatcher.scala
index 90d9bfda83..8e21641636 100644
--- a/akka-testkit/src/main/scala/akka/testkit/CallingThreadDispatcher.scala
+++ b/akka-testkit/src/main/scala/akka/testkit/CallingThreadDispatcher.scala
@@ -135,6 +135,8 @@ class CallingThreadDispatcher(val warnings: Boolean = true) extends MessageDispa
override def mailboxSize(actor: ActorRef) = getMailbox(actor).queue.size
+ def mailboxIsEmpty(actorRef: ActorRef): Boolean = getMailbox(actorRef).queue.isEmpty
+
private[akka] override def dispatch(handle: MessageInvocation) {
val mbox = getMailbox(handle.receiver)
val queue = mbox.queue
@@ -210,6 +212,7 @@ class CallingThreadDispatcher(val warnings: Boolean = true) extends MessageDispa
class NestingQueue {
private var q = new LinkedList[MessageInvocation]()
def size = q.size
+ def isEmpty = q.isEmpty
def push(handle: MessageInvocation) { q.offer(handle) }
def peek = q.peek
def pop = q.poll
diff --git a/akka-testkit/src/main/scala/akka/testkit/TestActorRef.scala b/akka-testkit/src/main/scala/akka/testkit/TestActorRef.scala
index 4e9d7b527d..a940d5ce01 100644
--- a/akka-testkit/src/main/scala/akka/testkit/TestActorRef.scala
+++ b/akka-testkit/src/main/scala/akka/testkit/TestActorRef.scala
@@ -15,11 +15,11 @@ import com.eaio.uuid.UUID
* overrides the dispatcher to CallingThreadDispatcher and sets the receiveTimeout to None. Otherwise,
* it acts just like a normal ActorRef. You may retrieve a reference to the underlying actor to test internal logic.
*
- *
* @author Roland Kuhn
* @since 1.1
*/
-class TestActorRef[T <: Actor](factory: () ⇒ T, address: String) extends LocalActorRef(factory, address) {
+class TestActorRef[T <: Actor](factory: () ⇒ T, address: String)
+ extends LocalActorRef(factory, address, DeploymentConfig.Transient) {
dispatcher = CallingThreadDispatcher.global
receiveTimeout = None
diff --git a/config/akka-reference.conf b/config/akka-reference.conf
index 16a3f872b1..0241386856 100644
--- a/config/akka-reference.conf
+++ b/config/akka-reference.conf
@@ -10,7 +10,7 @@
akka {
version = "2.0-SNAPSHOT" # Akka version, checked against the runtime version of Akka.
- enabled-modules = [] # Comma separated list of the enabled modules. Options: ["remote", "camel", "http"]
+ enabled-modules = [] # Comma separated list of the enabled modules. Options: ["cluster", "camel", "http"]
time-unit = "seconds" # Time unit for all timeout properties throughout the config
@@ -27,63 +27,76 @@ akka {
boot = []
actor {
- timeout = 5 # Default timeout for Future based invocations
- # - Actor: !! && !!!
- # - UntypedActor: sendRequestReply && sendRequestReplyFuture
- # - TypedActor: methods with non-void return type
- serialize-messages = off # Does a deep clone of (non-primitive) messages to ensure immutability
- throughput = 5 # Default throughput for all Dispatcher, set to 1 for complete fairness
- throughput-deadline-time = -1 # Default throughput deadline for all Dispatcher, set to 0 or negative for no deadline
- dispatcher-shutdown-timeout = 1 # Using the akka.time-unit, how long dispatchers by default will wait for new actors until they shut down
+ timeout = 5 # Default timeout for Future based invocations
+ # - Actor: !! && !!!
+ # - UntypedActor: sendRequestReply && sendRequestReplyFuture
+ # - TypedActor: methods with non-void return type
+ serialize-messages = off # Does a deep clone of (non-primitive) messages to ensure immutability
+ throughput = 5 # Default throughput for all Dispatcher, set to 1 for complete fairness
+ throughput-deadline-time = -1 # Default throughput deadline for all Dispatcher, set to 0 or negative for no deadline
+ dispatcher-shutdown-timeout = 1 # Using the akka.time-unit, how long dispatchers by default will wait for new actors until they shut down
deployment {
- # -------------------------------
- # -- all configuration options --
- # -------------------------------
+ service-ping { # stateless actor with replication factor 3 and round-robin load-balancer
- service-ping { # stateless actor with replication factor 3 and round-robin load-balancer
- router = "least-cpu" # routing (load-balance) scheme to use
- # available: "direct", "round-robin", "random", "least-cpu", "least-ram", "least-messages"
- # or: fully qualified class name of the router class
- # default is "direct";
- format = "akka.serialization.Format$Default$"
- clustered { # makes the actor available in the cluster registry
- # default (if omitted) is local non-clustered actor
- home = "node:node1" # defines the hostname, IP-address or node name of the "home" node for clustered actor
- # available: "host:", "ip:" and "node:"
- # default is "host:localhost"
- replicas = 3 # number of actor replicas in the cluster
- # available: positivoe integer (0-N) or the string "auto" for auto-scaling
- # if "auto" is used then 'home' has no meaning
- # default is '0', meaning no replicas;
- stateless = on # is the actor stateless or stateful
- # if turned 'on': actor is defined as stateless and can be load-balanced accordingly
- # if turned 'off' (or omitted): actor is defined as stateful which means replicatable through transaction log
- # default is 'off'
+ format = "akka.serialization.Format$Default$" # serializer for messages and actor instance
+
+ router = "least-cpu" # routing (load-balance) scheme to use
+ # available: "direct", "round-robin", "random",
+ # "least-cpu", "least-ram", "least-messages"
+ # or: fully qualified class name of the router class
+ # default is "direct";
+
+ clustered { # makes the actor available in the cluster registry
+ # default (if omitted) is local non-clustered actor
+
+ home = "node:node1" # hostname, IP-address or node name of the "home" node for clustered actor
+ # available: "host:", "ip:" and "node:"
+ # default is "host:localhost"
+
+ replicas = 3 # number of actor replicas in the cluster
+ # available: positivoe integer (0-N) or the string "auto" for auto-scaling
+ # if "auto" is used then 'home' has no meaning
+ # default is '0', meaning no replicas;
+
+ replication { # use replication or not?
+
+ # FIXME should we have this config option here? If so, implement it all through.
+ serialize-mailbox = on # should the actor mailbox be part of the serialized snapshot?
+
+ storage = "transaction-log" # storage model for replication
+ # available: "transaction-log" and "data-grid"
+ # default is "transaction-log"
+
+ strategy = "write-through" # guaranteees for replication
+ # available: "write-through" and "write-behind"
+ # default is "write-through"
+
+ }
}
}
}
default-dispatcher {
- type = "GlobalDispatcher" # Must be one of the following, all "Global*" are non-configurable
- # - Dispatcher
- # - BalancingDispatcher
- # - GlobalDispatcher
- keep-alive-time = 60 # Keep alive time for threads
- core-pool-size-factor = 1.0 # No of core threads ... ceil(available processors * factor)
- max-pool-size-factor = 4.0 # Max no of threads ... ceil(available processors * factor)
- executor-bounds = -1 # Makes the Executor bounded, -1 is unbounded
- allow-core-timeout = on # Allow core threads to time out
- rejection-policy = "caller-runs" # abort, caller-runs, discard-oldest, discard
- throughput = 5 # Throughput for Dispatcher, set to 1 for complete fairness
- throughput-deadline-time = -1 # Throughput deadline for Dispatcher, set to 0 or negative for no deadline
- mailbox-capacity = -1 # If negative (or zero) then an unbounded mailbox is used (default)
- # If positive then a bounded mailbox is used and the capacity is set using the property
- # NOTE: setting a mailbox to 'blocking' can be a bit dangerous, could lead to deadlock, use with care
- # The following are only used for Dispatcher and only if mailbox-capacity > 0
- mailbox-push-timeout-time = 10 # Specifies the timeout to add a new message to a mailbox that is full - negative number means infinite timeout
- # (in unit defined by the time-unit property)
+ type = "GlobalDispatcher" # Must be one of the following, all "Global*" are non-configurable
+ # - Dispatcher
+ # - BalancingDispatcher
+ # - GlobalDispatcher
+ keep-alive-time = 60 # Keep alive time for threads
+ core-pool-size-factor = 1.0 # No of core threads ... ceil(available processors * factor)
+ max-pool-size-factor = 4.0 # Max no of threads ... ceil(available processors * factor)
+ executor-bounds = -1 # Makes the Executor bounded, -1 is unbounded
+ allow-core-timeout = on # Allow core threads to time out
+ rejection-policy = "caller-runs" # abort, caller-runs, discard-oldest, discard
+ throughput = 5 # Throughput for Dispatcher, set to 1 for complete fairness
+ throughput-deadline-time = -1 # Throughput deadline for Dispatcher, set to 0 or negative for no deadline
+ mailbox-capacity = -1 # If negative (or zero) then an unbounded mailbox is used (default)
+ # If positive then a bounded mailbox is used and the capacity is set using the property
+ # NOTE: setting a mailbox to 'blocking' can be a bit dangerous, could lead to deadlock, use with care
+ # The following are only used for Dispatcher and only if mailbox-capacity > 0
+ mailbox-push-timeout-time = 10 # Specifies the timeout to add a new message to a mailbox that is full - negative number means infinite timeout
+ # (in unit defined by the time-unit property)
}
mailbox {
@@ -128,39 +141,36 @@ akka {
cluster {
name = "test-cluster"
- zookeeper-server-addresses = "localhost:2181"
+ zookeeper-server-addresses = "localhost:2181" # comma-separated list of ':' elements
remote-server-port = 2552
max-time-to-wait-until-connected = 30
session-timeout = 60
connection-timeout = 60
use-compression = off
- remote-daemon-ack-timeout = 30 # Timeout for ACK of cluster operations, lik checking actor out etc.
- exclude-ref-node-in-replica-set = on # Should a replica be instantiated on the same node as the
- # cluster reference to the actor
- # Default: on
-
- replication {
- digest-type = "MAC" # Options: CRC32 (cheap & unsafe), MAC (expensive & secure using password)
- password = "secret" # FIXME: store open in file?
- ensemble-size = 3
- quorum-size = 2
- }
- }
-
- remote {
-
- # secure-cookie = "050E0A0D0D06010A00000900040D060F0C09060B" # generate your own with '$AKKA_HOME/scripts/generate_config_with_secure_cookie.sh' or using 'Crypt.generateSecureCookie'
- secure-cookie = ""
-
- compression-scheme = "zlib" # Options: "zlib" (lzf to come), leave out for no compression
- zlib-compression-level = 6 # Options: 0-9 (1 being fastest and 9 being the most compressed), default is 6
+ remote-daemon-ack-timeout = 30 # Timeout for ACK of cluster operations, lik checking actor out etc.
+ exclude-ref-node-in-replica-set = on # Should a replica be instantiated on the same node as the
+ # cluster reference to the actor
+ # Default: on
+ compression-scheme = "zlib" # Options: "zlib" (lzf to come), leave out for no compression
+ zlib-compression-level = 6 # Options: 0-9 (1 being fastest and 9 being the most compressed), default is 6
+ # FIXME rename to transport
layer = "akka.remote.netty.NettyRemoteSupport"
+ secure-cookie = "" # generate your own with '$AKKA_HOME/scripts/generate_config_with_secure_cookie.sh'
+ # or using 'Crypt.generateSecureCookie'
+
+ replication {
+ digest-type = "MAC" # Options: CRC32 (cheap & unsafe), MAC (expensive & secure using password)
+ password = "secret" # FIXME: store open in file?
+ ensemble-size = 3
+ quorum-size = 2
+ snapshot-frequency = 1000 # The number of messages that should be logged between every actor snapshot
+ timeout = 30 # Timeout for asyncronous (write-behind) operations
+ }
+
server {
- # FIXME remove hostname/port
- hostname = "localhost" # The hostname or IP that clients should connect to
- port = 2552 # The port clients should connect to. Default is 2552 (AKKA)
+ port = 2552 # The default remote server port clients should connect to. Default is 2552 (AKKA)
message-frame-size = 1048576 # Increase this if you want to be able to send messages with large payloads
connection-timeout = 1
require-cookie = off # Should the remote server require that it peers share the same secure-cookie (defined in the 'remote' section)?
@@ -182,7 +192,7 @@ akka {
read-timeout = 10
message-frame-size = 1048576
reap-futures-delay = 5
- reconnection-time-window = 600 # Maximum time window that a client should try to reconnect for
+ reconnection-time-window = 600 # Maximum time window that a client should try to reconnect for
}
}
@@ -204,40 +214,14 @@ akka {
hostname = "localhost"
port = 9998
- #If you are using akka.http.AkkaRestServlet
- filters = ["akka.security.AkkaSecurityFilterFactory"] # List with all jersey filters to use
- # resource-packages = ["sample.rest.scala",
- # "sample.rest.java",
- # "sample.security"] # List with all resource packages for your Jersey services
- resource-packages = []
-
- # The authentication service to use. Need to be overridden (sample now)
- # authenticator = "sample.security.BasicAuthenticationService"
- authenticator = "N/A"
-
- # Uncomment if you are using the KerberosAuthenticationActor
- # kerberos {
- # servicePrincipal = "HTTP/localhost@EXAMPLE.COM"
- # keyTabLocation = "URL to keytab"
- # kerberosDebug = "true"
- # realm = "EXAMPLE.COM"
- # }
- kerberos {
- servicePrincipal = "N/A"
- keyTabLocation = "N/A"
- kerberosDebug = "N/A"
- realm = ""
+ mist-dispatcher { # If you are using akka.http.AkkaMistServlet
+ #type = "GlobalDispatcher" # Uncomment if you want to use a different dispatcher than the default one for Comet
}
-
- # If you are using akka.http.AkkaMistServlet
- mist-dispatcher {
- #type = "GlobalDispatcher" # Uncomment if you want to use a different dispatcher than the default one for Comet
- }
- connection-close = true # toggles the addition of the "Connection" response header with a "close" value
- root-actor-id = "_httproot" # the id of the actor to use as the root endpoint
- root-actor-builtin = true # toggles the use of the built-in root endpoint base class
- timeout = 1000 # the default timeout for all async requests (in ms)
- expired-header-name = "Async-Timeout" # the name of the response header to use when an async request expires
- expired-header-value = "expired" # the value of the response header to use when an async request expires
+ connection-close = true # toggles the addition of the "Connection" response header with a "close" value
+ root-actor-id = "_httproot" # the id of the actor to use as the root endpoint
+ root-actor-builtin = true # toggles the use of the built-in root endpoint base class
+ timeout = 1000 # the default timeout for all async requests (in ms)
+ expired-header-name = "Async-Timeout" # the name of the response header to use when an async request expires
+ expired-header-value = "expired" # the value of the response header to use when an async request expires
}
}
diff --git a/config/microkernel-server.xml b/config/microkernel-server.xml
new file mode 100644
index 0000000000..be7382405d
--- /dev/null
+++ b/config/microkernel-server.xml
@@ -0,0 +1,106 @@
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+ 300000
+ 2
+ false
+ 8443
+ 20000
+ 5000
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+ -
+
+ /
+
+ akka.http.AkkaMistServlet
+ /*
+
+
+
+ -
+
+
+
+
+
+
+
+
+
+
+ true
+ true
+ true
+ 1000
+
+
diff --git a/project/build/AkkaProject.scala b/project/build/AkkaProject.scala
index 9e69f3faf3..a0462e2543 100644
--- a/project/build/AkkaProject.scala
+++ b/project/build/AkkaProject.scala
@@ -73,12 +73,15 @@ class AkkaParentProject(info: ProjectInfo) extends ParentProject(info) with Exec
lazy val jmsModuleConfig = ModuleConfiguration("javax.jms", JBossRepo)
lazy val jsr311ModuleConfig = ModuleConfiguration("javax.ws.rs", "jsr311-api", sbt.DefaultMavenRepository)
lazy val zookeeperModuleConfig = ModuleConfiguration("org.apache.hadoop.zookeeper", AkkaRepo)
+ lazy val protobufModuleConfig = ModuleConfiguration("com.google.protobuf", AkkaRepo)
lazy val zkclientModuleConfig = ModuleConfiguration("zkclient", AkkaRepo)
+ lazy val camelModuleConfig = ModuleConfiguration("org.apache.camel", "camel-core", AkkaRepo)
// -------------------------------------------------------------------------------------------------------------------
// Versions
// -------------------------------------------------------------------------------------------------------------------
lazy val CAMEL_VERSION = "2.7.1"
+ lazy val CAMEL_PATCH_VERSION = "2.7.1.1"
lazy val SPRING_VERSION = "3.0.5.RELEASE"
lazy val JACKSON_VERSION = "1.8.0"
lazy val JERSEY_VERSION = "1.3"
@@ -100,7 +103,7 @@ class AkkaParentProject(info: ProjectInfo) extends ParentProject(info) with Exec
// Compile
lazy val beanstalk = "beanstalk" % "beanstalk_client" % "1.4.5" //New BSD
lazy val bookkeeper = "org.apache.hadoop.zookeeper" % "bookkeeper" % ZOOKEEPER_VERSION //ApacheV2
- lazy val camel_core = "org.apache.camel" % "camel-core" % CAMEL_VERSION % "compile" //ApacheV2
+ lazy val camel_core = "org.apache.camel" % "camel-core" % CAMEL_PATCH_VERSION % "compile" //ApacheV2
lazy val commons_codec = "commons-codec" % "commons-codec" % "1.4" % "compile" //ApacheV2
lazy val commons_io = "commons-io" % "commons-io" % "2.0.1" % "compile" //ApacheV2
@@ -124,7 +127,7 @@ class AkkaParentProject(info: ProjectInfo) extends ParentProject(info) with Exec
lazy val multiverse = "org.multiverse" % "multiverse-alpha" % MULTIVERSE_VERSION % "compile" //ApacheV2
lazy val netty = "org.jboss.netty" % "netty" % "3.2.4.Final" % "compile" //ApacheV2
lazy val osgi_core = "org.osgi" % "org.osgi.core" % "4.2.0" //ApacheV2
- lazy val protobuf = "com.google.protobuf" % "protobuf-java" % "2.3.0" % "compile" //New BSD
+ lazy val protobuf = "com.google.protobuf" % "protobuf-java" % "2.4.1" % "compile" //New BSD
lazy val redis = "net.debasishg" % "redisclient_2.9.0" % "2.3.1" //ApacheV2
lazy val sjson = "net.debasishg" %% "sjson" % "0.11" % "compile" //ApacheV2
lazy val sjson_test = "net.debasishg" %% "sjson" % "0.11" % "test" //ApacheV2
@@ -133,7 +136,7 @@ class AkkaParentProject(info: ProjectInfo) extends ParentProject(info) with Exec
lazy val spring_context = "org.springframework" % "spring-context" % SPRING_VERSION % "compile" //ApacheV2
lazy val stax_api = "javax.xml.stream" % "stax-api" % "1.0-2" % "compile" //ApacheV2
- lazy val logback = "ch.qos.logback" % "logback-classic" % "0.9.28" % "runtime" //MIT
+ lazy val logback = "ch.qos.logback" % "logback-classic" % "0.9.28" % "runtime" //MIT
lazy val log4j = "log4j" % "log4j" % "1.2.15" //ApacheV2
lazy val zookeeper = "org.apache.hadoop.zookeeper" % "zookeeper" % ZOOKEEPER_VERSION //ApacheV2
lazy val zookeeper_lock = "org.apache.hadoop.zookeeper" % "zookeeper-recipes-lock" % ZOOKEEPER_VERSION //ApacheV2
@@ -141,14 +144,14 @@ class AkkaParentProject(info: ProjectInfo) extends ParentProject(info) with Exec
// Test
lazy val multiverse_test = "org.multiverse" % "multiverse-alpha" % MULTIVERSE_VERSION % "test" //ApacheV2
- lazy val commons_coll = "commons-collections" % "commons-collections" % "3.2.1" % "test" //ApacheV2
- lazy val testJetty = "org.eclipse.jetty" % "jetty-server" % JETTY_VERSION % "test" //Eclipse license
- lazy val testJettyWebApp = "org.eclipse.jetty" % "jetty-webapp" % JETTY_VERSION % "test" //Eclipse license
- lazy val junit = "junit" % "junit" % "4.5" % "test" //Common Public License 1.0
- lazy val mockito = "org.mockito" % "mockito-all" % "1.8.1" % "test" //MIT
- lazy val scalatest = "org.scalatest" %% "scalatest" % SCALATEST_VERSION % "test" //ApacheV2
- lazy val testLogback = "ch.qos.logback" % "logback-classic" % LOGBACK_VERSION % "test" // EPL 1.0 / LGPL 2.1
- lazy val camel_spring = "org.apache.camel" % "camel-spring" % CAMEL_VERSION % "test" //ApacheV2
+ lazy val commons_coll = "commons-collections" % "commons-collections" % "3.2.1" % "test" //ApacheV2
+ lazy val testJetty = "org.eclipse.jetty" % "jetty-server" % JETTY_VERSION % "test" //Eclipse license
+ lazy val testJettyWebApp = "org.eclipse.jetty" % "jetty-webapp" % JETTY_VERSION % "test" //Eclipse license
+ lazy val junit = "junit" % "junit" % "4.5" % "test" //Common Public License 1.0
+ lazy val mockito = "org.mockito" % "mockito-all" % "1.8.1" % "test" //MIT
+ lazy val scalatest = "org.scalatest" %% "scalatest" % SCALATEST_VERSION % "test" //ApacheV2
+ lazy val testLogback = "ch.qos.logback" % "logback-classic" % LOGBACK_VERSION % "test" // EPL 1.0 / LGPL 2.1
+ lazy val camel_spring = "org.apache.camel" % "camel-spring" % CAMEL_VERSION % "test" //ApacheV2
}
@@ -166,10 +169,10 @@ class AkkaParentProject(info: ProjectInfo) extends ParentProject(info) with Exec
lazy val akka_cluster = project("akka-cluster", "akka-cluster", new AkkaClusterProject(_), akka_remote)
lazy val akka_durable_mailboxes = project("akka-durable-mailboxes", "akka-durable-mailboxes", new AkkaDurableMailboxesParentProject(_), akka_remote)
- lazy val akka_camel = project("akka-camel", "akka-camel", new AkkaCamelProject(_), akka_actor, akka_slf4j)
+ //lazy val akka_camel = project("akka-camel", "akka-camel", new AkkaCamelProject(_), akka_actor, akka_slf4j)
//lazy val akka_camel_typed = project("akka-camel-typed", "akka-camel-typed", new AkkaCamelTypedProject(_), akka_actor, akka_slf4j, akka_camel)
//lazy val akka_spring = project("akka-spring", "akka-spring", new AkkaSpringProject(_), akka_remote, akka_actor, akka_camel)
- lazy val akka_kernel = project("akka-kernel", "akka-kernel", new AkkaKernelProject(_), akka_stm, akka_remote, akka_http, akka_slf4j, akka_camel)
+ //lazy val akka_kernel = project("akka-kernel", "akka-kernel", new AkkaKernelProject(_), akka_stm, akka_remote, akka_http, akka_slf4j, akka_camel)
lazy val akka_sbt_plugin = project("akka-sbt-plugin", "akka-sbt-plugin", new AkkaSbtPluginProject(_))
lazy val akka_tutorials = project("akka-tutorials", "akka-tutorials", new AkkaTutorialsParentProject(_), akka_actor)
@@ -614,16 +617,16 @@ class AkkaParentProject(info: ProjectInfo) extends ParentProject(info) with Exec
lazy val akka_sample_ants = project("akka-sample-ants", "akka-sample-ants",
new AkkaSampleAntsProject(_), akka_stm)
-// lazy val akka_sample_chat = project("akka-sample-chat", "akka-sample-chat",
-// new AkkaSampleChatProject(_), akka_remote)
+ // lazy val akka_sample_chat = project("akka-sample-chat", "akka-sample-chat",
+ // new AkkaSampleChatProject(_), akka_remote)
lazy val akka_sample_fsm = project("akka-sample-fsm", "akka-sample-fsm",
new AkkaSampleFSMProject(_), akka_actor)
- lazy val akka_sample_hello = project("akka-sample-hello", "akka-sample-hello",
- new AkkaSampleHelloProject(_), akka_kernel)
+ // lazy val akka_sample_hello = project("akka-sample-hello", "akka-sample-hello",
+ // new AkkaSampleHelloProject(_), akka_kernel)
lazy val akka_sample_osgi = project("akka-sample-osgi", "akka-sample-osgi",
new AkkaSampleOsgiProject(_), akka_actor)
-// lazy val akka_sample_remote = project("akka-sample-remote", "akka-sample-remote",
-// new AkkaSampleRemoteProject(_), akka_remote)
+ // lazy val akka_sample_remote = project("akka-sample-remote", "akka-sample-remote",
+ // new AkkaSampleRemoteProject(_), akka_remote)
lazy val publishRelease = {
val releaseConfiguration = new DefaultPublishConfiguration(localReleaseRepository, "release", false)
@@ -788,8 +791,8 @@ class AkkaParentProject(info: ProjectInfo) extends ParentProject(info) with Exec
lazy val akkaCoreDist = project("core", "akka-dist-core", new AkkaCoreDistProject(_),
akkaActorsDist, akka_remote, akka_http, akka_slf4j, akka_testkit, akka_actor_tests)
- lazy val akkaMicrokernelDist = project("microkernel", "akka-dist-microkernel", new AkkaMicrokernelDistProject(_),
- akkaCoreDist, akka_kernel, akka_samples)
+// lazy val akkaMicrokernelDist = project("microkernel", "akka-dist-microkernel", new AkkaMicrokernelDistProject(_),
+// akkaCoreDist, akka_kernel, akka_samples)
def doNothing = task { None }
override def publishLocalAction = doNothing
@@ -831,44 +834,44 @@ class AkkaParentProject(info: ProjectInfo) extends ParentProject(info) with Exec
override def distScriptSources = akkaParent.info.projectPath / "scripts" / "microkernel" * "*"
- override def distClasspath = akka_kernel.runClasspath
+// override def distClasspath = akka_kernel.runClasspath
- override def projectDependencies = akka_kernel.topologicalSort
+// override def projectDependencies = akka_kernel.topologicalSort
- override def distAction = super.distAction dependsOn (distSamples)
+// override def distAction = super.distAction dependsOn (distSamples)
- val distSamplesPath = distDocPath / "samples"
+// val distSamplesPath = distDocPath / "samples"
- lazy val distSamples = task {
- val demo = akka_samples.akka_sample_hello.jarPath
- val samples = Set(//akka_samples.akka_sample_camel
- akka_samples.akka_sample_hello)
- //akka_samples.akka_sample_security)
+ // lazy val distSamples = task {
+ // val demo = akka_samples.akka_sample_hello.jarPath
+ // val samples = Set(//akka_samples.akka_sample_camel
+ // akka_samples.akka_sample_hello)
+ // //akka_samples.akka_sample_security)
- def copySamples[P <: DefaultProject](samples: Set[P]) = {
- samples.map { sample =>
- val sampleOutputPath = distSamplesPath / sample.name
- val binPath = sampleOutputPath / "bin"
- val configPath = sampleOutputPath / "config"
- val deployPath = sampleOutputPath / "deploy"
- val libPath = sampleOutputPath / "lib"
- val srcPath = sampleOutputPath / "src"
- val confs = sample.info.projectPath / "config" ** "*.*"
- val scripts = akkaParent.info.projectPath / "scripts" / "samples" * "*"
- val libs = sample.managedClasspath(Configurations.Runtime)
- val deployed = sample.jarPath
- val sources = sample.packageSourcePaths
- copyFiles(confs, configPath) orElse
- copyScripts(scripts, binPath) orElse
- copyFiles(libs, libPath) orElse
- copyFiles(deployed, deployPath) orElse
- copyPaths(sources, srcPath)
- }.foldLeft(None: Option[String])(_ orElse _)
- }
+ // def copySamples[P <: DefaultProject](samples: Set[P]) = {
+ // samples.map { sample =>
+ // val sampleOutputPath = distSamplesPath / sample.name
+ // val binPath = sampleOutputPath / "bin"
+ // val configPath = sampleOutputPath / "config"
+ // val deployPath = sampleOutputPath / "deploy"
+ // val libPath = sampleOutputPath / "lib"
+ // val srcPath = sampleOutputPath / "src"
+ // val confs = sample.info.projectPath / "config" ** "*.*"
+ // val scripts = akkaParent.info.projectPath / "scripts" / "samples" * "*"
+ // val libs = sample.managedClasspath(Configurations.Runtime)
+ // val deployed = sample.jarPath
+ // val sources = sample.packageSourcePaths
+ // copyFiles(confs, configPath) orElse
+ // copyScripts(scripts, binPath) orElse
+ // copyFiles(libs, libPath) orElse
+ // copyFiles(deployed, deployPath) orElse
+ // copyPaths(sources, srcPath)
+ // }.foldLeft(None: Option[String])(_ orElse _)
+ // }
- copyFiles(demo, distDeployPath) orElse
- copySamples(samples)
- } dependsOn (distBase)
+ // copyFiles(demo, distDeployPath) orElse
+ // copySamples(samples)
+ // } dependsOn (distBase)
}
}
}
diff --git a/scripts/microkernel/akka.bat b/scripts/microkernel/akka.bat
index 4d2f912096..59d1a91a48 100644
--- a/scripts/microkernel/akka.bat
+++ b/scripts/microkernel/akka.bat
@@ -1,6 +1,6 @@
@echo off
set AKKA_HOME=%~dp0..
set JAVA_OPTS=-Xms1024M -Xmx1024M -Xss1M -XX:MaxPermSize=256M -XX:+UseParallelGC
-set AKKA_CLASSPATH=%AKKA_HOME%\lib\scala-library.jar;%AKKA_HOME%\lib\akka\*;%AKKA_HOME%\config
+set AKKA_CLASSPATH=%AKKA_HOME%\lib\scala-library.jar;%AKKA_HOME%\config;%AKKA_HOME%\lib\akka\*
java %JAVA_OPTS% -cp "%AKKA_CLASSPATH%" -Dakka.home="%AKKA_HOME%" akka.kernel.Main