diff --git a/akka-actor-tests/src/test/scala/akka/actor/actor/DeployerSpec.scala b/akka-actor-tests/src/test/scala/akka/actor/actor/DeployerSpec.scala index 974b2ea1c9..15316f727d 100644 --- a/akka-actor-tests/src/test/scala/akka/actor/actor/DeployerSpec.scala +++ b/akka-actor-tests/src/test/scala/akka/actor/actor/DeployerSpec.scala @@ -22,7 +22,7 @@ class DeployerSpec extends WordSpec with MustMatchers { Clustered( Node("node1"), Replicate(3), - Stateful( + Replication( TransactionLog, WriteThrough))))) } diff --git a/akka-actor/src/main/scala/akka/actor/Actor.scala b/akka-actor/src/main/scala/akka/actor/Actor.scala index a60a6437ca..d0d39d29c6 100644 --- a/akka-actor/src/main/scala/akka/actor/Actor.scala +++ b/akka-actor/src/main/scala/akka/actor/Actor.scala @@ -380,7 +380,13 @@ object Actor extends ListenerManagement { private def newClusterActorRef(factory: () ⇒ ActorRef, address: String, deploy: Deploy): ActorRef = { deploy match { - case Deploy(configAdress, router, serializerClassName, Clustered(home, replication: Replication, state: State)) ⇒ + case Deploy( + configAdress, router, serializerClassName, + Clustered( + home, + replicas, + replication)) ⇒ + ClusterModule.ensureEnabled() if (configAdress != address) throw new IllegalStateException( @@ -389,11 +395,12 @@ object Actor extends ListenerManagement { "Remote server is not running") val isHomeNode = DeploymentConfig.isHomeNode(home) - val replicas = DeploymentConfig.replicaValueFor(replication) + val nrOfReplicas = DeploymentConfig.replicaValueFor(replicas) - def storeActorAndGetClusterRef(replicationStrategy: ReplicationStrategy, serializer: Serializer): ActorRef = { + def storeActorAndGetClusterRef(replicationScheme: ReplicationScheme, serializer: Serializer): ActorRef = { // add actor to cluster registry (if not already added) - if (!cluster.isClustered(address)) cluster.store(factory().start(), replicas, replicationStrategy, false, serializer) + if (!cluster.isClustered(address)) + cluster.store(factory().start(), nrOfReplicas, replicationScheme, false, serializer) // remote node (not home node), check out as ClusterActorRef cluster.ref(address, DeploymentConfig.routerTypeFor(router)) @@ -401,11 +408,11 @@ object Actor extends ListenerManagement { val serializer = serializerFor(address, serializerClassName) - state match { - case _: Stateless | Stateless ⇒ + replication match { + case _: Transient | Transient ⇒ storeActorAndGetClusterRef(Transient, serializer) - case Stateful(storage, strategy) ⇒ + case replication: Replication ⇒ if (isHomeNode) { // stateful actor's home node cluster .use(address, serializer) @@ -413,7 +420,7 @@ object Actor extends ListenerManagement { "Could not check out actor [" + address + "] from cluster registry as a \"local\" actor")) } else { // FIXME later manage different 'storage' (data grid) as well - storeActorAndGetClusterRef(strategy, serializer) + storeActorAndGetClusterRef(replication, serializer) } } diff --git a/akka-actor/src/main/scala/akka/actor/ActorRef.scala b/akka-actor/src/main/scala/akka/actor/ActorRef.scala index 9ad22c4c2d..a11a9a34c7 100644 --- a/akka-actor/src/main/scala/akka/actor/ActorRef.scala +++ b/akka-actor/src/main/scala/akka/actor/ActorRef.scala @@ -6,13 +6,13 @@ package akka.actor import akka.event.EventHandler import akka.dispatch._ -import akka.config.Config +import akka.config._ import akka.config.Supervision._ import akka.util._ import akka.serialization.{ Format, Serializer } import ReflectiveAccess._ import ClusterModule._ -import DeploymentConfig.{ ReplicationStrategy, Transient, WriteThrough, WriteBehind } +import DeploymentConfig.{ ReplicationScheme, Replication, Transient, WriteThrough, WriteBehind } import java.net.InetSocketAddress import java.util.concurrent.atomic.AtomicReference @@ -515,7 +515,7 @@ trait ActorRef extends ActorRefShared with java.lang.Comparable[ActorRef] with S class LocalActorRef private[akka] ( private[this] val actorFactory: () ⇒ Actor, val address: String, - replicationStrategy: ReplicationStrategy) + replicationScheme: ReplicationScheme) extends ActorRef with ScalaActorRef { protected[akka] val guard = new ReentrantGuard @@ -543,24 +543,39 @@ class LocalActorRef private[akka] ( protected[akka] val actorInstance = guard.withGuard { new AtomicReference[Actor](newActor) } - private val isReplicated: Boolean = replicationStrategy match { - case Transient ⇒ false - case _ ⇒ true + private val isReplicated: Boolean = replicationScheme match { + case _: Transient | Transient ⇒ false + case _ ⇒ true } // FIXME how to get the matching serializerClassName? Now default is used. Needed for transaction log snapshot private val serializer = Actor.serializerFor(address, Format.defaultSerializerName) - private lazy val txLog: TransactionLog = { - val log = replicationStrategy match { - case Transient ⇒ throw new IllegalStateException("Can not replicate 'transient' actor [" + toString + "]") - case WriteThrough ⇒ transactionLog.newLogFor(_uuid.toString, false, replicationStrategy, serializer) - case WriteBehind ⇒ transactionLog.newLogFor(_uuid.toString, true, replicationStrategy, serializer) + private lazy val replicationStorage: Either[TransactionLog, AnyRef] = { + replicationScheme match { + case _: Transient | Transient ⇒ + throw new IllegalStateException("Can not replicate 'transient' actor [" + toString + "]") + + case Replication(storage, strategy) ⇒ + val isWriteBehind = strategy match { + case _: WriteBehind | WriteBehind ⇒ true + case _: WriteThrough | WriteThrough ⇒ false + } + + storage match { + case _: DeploymentConfig.TransactionLog | DeploymentConfig.TransactionLog ⇒ + EventHandler.debug(this, + "Creating a transaction log for Actor [%s] with replication strategy [%s]" + .format(address, replicationScheme)) + Left(transactionLog.newLogFor(_uuid.toString, isWriteBehind, replicationScheme, serializer)) + + case _: DeploymentConfig.DataGrid | DeploymentConfig.DataGrid ⇒ + throw new ConfigurationException("Replication storage type \"data-grid\" is not yet supported") + + case unknown ⇒ + throw new ConfigurationException("Unknown replication storage type [" + unknown + "]") + } } - EventHandler.debug(this, - "Creating a transaction log for Actor [%s] with replication strategy [%s]" - .format(address, replicationStrategy)) - log } //If it was started inside "newActor", initialize it @@ -576,7 +591,7 @@ class LocalActorRef private[akka] ( __supervisor: Option[ActorRef], __hotswap: Stack[PartialFunction[Any, Unit]], __factory: () ⇒ Actor, - __replicationStrategy: ReplicationStrategy) = { + __replicationStrategy: ReplicationScheme) = { this(__factory, __address, __replicationStrategy) @@ -652,7 +667,9 @@ class LocalActorRef private[akka] ( } } //else if (isBeingRestarted) throw new ActorKilledException("Actor [" + toString + "] is being restarted.") - if (isReplicated) txLog.delete() + if (isReplicated) { + if (replicationStorage.isLeft) replicationStorage.left.get.delete() + } } } @@ -774,7 +791,7 @@ class LocalActorRef private[akka] ( } finally { guard.lock.unlock() if (isReplicated) { - txLog.recordEntry(messageHandle, this) + if (replicationStorage.isLeft) replicationStorage.left.get.recordEntry(messageHandle, this) } } } diff --git a/akka-actor/src/main/scala/akka/actor/Deployer.scala b/akka-actor/src/main/scala/akka/actor/Deployer.scala index 07b258cd88..bfbf1441ef 100644 --- a/akka-actor/src/main/scala/akka/actor/Deployer.scala +++ b/akka-actor/src/main/scala/akka/actor/Deployer.scala @@ -62,8 +62,8 @@ object DeploymentConfig { sealed trait Scope case class Clustered( home: Home = Host("localhost"), - replication: Replication = NoReplicas, - state: State = Stateless) extends Scope + replicas: Replicas = NoReplicas, + replication: ReplicationScheme = Transient) extends Scope // For Java API case class Local() extends Scope @@ -80,34 +80,34 @@ object DeploymentConfig { case class IP(ipAddress: String) extends Home // -------------------------------- - // --- Replication + // --- Replicas // -------------------------------- - sealed trait Replication - case class Replicate(factor: Int) extends Replication { - if (factor < 1) throw new IllegalArgumentException("Replication factor can not be negative or zero") + sealed trait Replicas + case class Replicate(factor: Int) extends Replicas { + if (factor < 1) throw new IllegalArgumentException("Replicas factor can not be negative or zero") } // For Java API - case class AutoReplicate() extends Replication - case class NoReplicas() extends Replication + case class AutoReplicate() extends Replicas + case class NoReplicas() extends Replicas // For Scala API - case object AutoReplicate extends Replication - case object NoReplicas extends Replication + case object AutoReplicate extends Replicas + case object NoReplicas extends Replicas // -------------------------------- - // --- State + // --- Replication // -------------------------------- - sealed trait State + sealed trait ReplicationScheme // For Java API - case class Stateless() extends State + case class Transient() extends ReplicationScheme // For Scala API - case object Stateless extends State - case class Stateful( + case object Transient extends ReplicationScheme + case class Replication( storage: ReplicationStorage, - strategy: ReplicationStrategy) extends State + strategy: ReplicationStrategy) extends ReplicationScheme // -------------------------------- // --- ReplicationStorage @@ -130,12 +130,10 @@ object DeploymentConfig { // For Java API case class WriteBehind() extends ReplicationStrategy case class WriteThrough() extends ReplicationStrategy - case class Transient() extends ReplicationStrategy // For Scala API case object WriteBehind extends ReplicationStrategy case object WriteThrough extends ReplicationStrategy - case object Transient extends ReplicationStrategy // -------------------------------- // --- Helper methods for parsing @@ -147,7 +145,7 @@ object DeploymentConfig { case Node(nodename) ⇒ nodename == Config.nodename } - def replicaValueFor(replication: Replication): Int = replication match { + def replicaValueFor(replicas: Replicas): Int = replicas match { case Replicate(replicas) ⇒ replicas case AutoReplicate ⇒ -1 case AutoReplicate() ⇒ -1 @@ -170,6 +168,11 @@ object DeploymentConfig { case LeastMessages() ⇒ RouterType.LeastMessages case c: CustomRouter ⇒ throw new UnsupportedOperationException("routerTypeFor: " + c) } + + def isReplicationAsync(strategy: ReplicationStrategy): Boolean = strategy match { + case _: WriteBehind | WriteBehind ⇒ true + case _: WriteThrough | WriteThrough ⇒ false + } } /** @@ -375,27 +378,30 @@ object Deployer { } // -------------------------------- - // akka.actor.deployment.
.clustered.stateful + // akka.actor.deployment..clustered.replication // -------------------------------- - clusteredConfig.getSection("stateful") match { + clusteredConfig.getSection("replication") match { case None ⇒ - Some(Deploy(address, router, format, Clustered(home, replicas, Stateless))) + Some(Deploy(address, router, format, Clustered(home, replicas, Transient))) - case Some(statefulConfig) ⇒ - val storage = statefulConfig.getString("replication-storage", "transaction-log") match { + case Some(replicationConfig) ⇒ + val storage = replicationConfig.getString("storage", "transaction-log") match { case "transaction-log" ⇒ TransactionLog case "data-grid" ⇒ DataGrid case unknown ⇒ throw new ConfigurationException("Config option [" + addressPath + - ".clustered.stateful.replication-storage] needs to be either [\"transaction-log\"] or [\"data-grid\"] - was [" + + ".clustered.replication.storage] needs to be either [\"transaction-log\"] or [\"data-grid\"] - was [" + unknown + "]") } - val strategy = statefulConfig.getString("replication-strategy", "write-through") match { + val strategy = replicationConfig.getString("strategy", "write-through") match { case "write-through" ⇒ WriteThrough case "write-behind" ⇒ WriteBehind - case unknown ⇒ Transient + case unknown ⇒ + throw new ConfigurationException("Config option [" + addressPath + + ".clustered.replication.strategy] needs to be either [\"write-through\"] or [\"write-behind\"] - was [" + + unknown + "]") } - Some(Deploy(address, router, format, Clustered(home, replicas, Stateful(storage, strategy)))) + Some(Deploy(address, router, format, Clustered(home, replicas, Replication(storage, strategy)))) } } } @@ -459,7 +465,8 @@ object Address { def validate(address: String) { if (validAddressPattern.matcher(address).matches) true else { - val e = new IllegalArgumentException("Address [" + address + "] is not valid, need to follow pattern [0-9a-zA-Z\\-\\_\\$]+") + val e = new IllegalArgumentException( + "Address [" + address + "] is not valid, need to follow pattern [0-9a-zA-Z\\-\\_\\$]+") EventHandler.error(e, this, e.getMessage) throw e } diff --git a/akka-actor/src/main/scala/akka/cluster/ClusterInterface.scala b/akka-actor/src/main/scala/akka/cluster/ClusterInterface.scala index 7920121e95..82155ebc90 100644 --- a/akka-actor/src/main/scala/akka/cluster/ClusterInterface.scala +++ b/akka-actor/src/main/scala/akka/cluster/ClusterInterface.scala @@ -185,7 +185,7 @@ trait ClusterNode { * with the actor passed in as argument. You can use this to save off snapshots of the actor to a highly * available durable store. */ - def store[T <: Actor](address: String, actorClass: Class[T], replicationStrategy: ReplicationStrategy, format: Serializer): ClusterNode + def store[T <: Actor](address: String, actorClass: Class[T], replicationScheme: ReplicationScheme, format: Serializer): ClusterNode /** * Clusters an actor of a specific type. If the actor is already clustered then the clustered version will be updated @@ -199,7 +199,7 @@ trait ClusterNode { * with the actor passed in as argument. You can use this to save off snapshots of the actor to a highly * available durable store. */ - def store[T <: Actor](address: String, actorClass: Class[T], replicationFactor: Int, replicationStrategy: ReplicationStrategy, format: Serializer): ClusterNode + def store[T <: Actor](address: String, actorClass: Class[T], replicationFactor: Int, replicationScheme: ReplicationScheme, format: Serializer): ClusterNode /** * Clusters an actor of a specific type. If the actor is already clustered then the clustered version will be updated @@ -213,7 +213,7 @@ trait ClusterNode { * with the actor passed in as argument. You can use this to save off snapshots of the actor to a highly * available durable store. */ - def store[T <: Actor](address: String, actorClass: Class[T], replicationStrategy: ReplicationStrategy, serializeMailbox: Boolean, format: Serializer): ClusterNode + def store[T <: Actor](address: String, actorClass: Class[T], replicationScheme: ReplicationScheme, serializeMailbox: Boolean, format: Serializer): ClusterNode /** * Clusters an actor of a specific type. If the actor is already clustered then the clustered version will be updated @@ -227,7 +227,7 @@ trait ClusterNode { * with the actor passed in as argument. You can use this to save off snapshots of the actor to a highly * available durable store. */ - def store[T <: Actor](address: String, actorClass: Class[T], replicationFactor: Int, replicationStrategy: ReplicationStrategy, serializeMailbox: Boolean, format: Serializer): ClusterNode + def store[T <: Actor](address: String, actorClass: Class[T], replicationFactor: Int, replicationScheme: ReplicationScheme, serializeMailbox: Boolean, format: Serializer): ClusterNode /** * Clusters an actor with UUID. If the actor is already clustered then the clustered version will be updated @@ -241,7 +241,7 @@ trait ClusterNode { * with the actor passed in as argument. You can use this to save off snapshots of the actor to a highly * available durable store. */ - def store(actorRef: ActorRef, replicationStrategy: ReplicationStrategy, format: Serializer): ClusterNode + def store(actorRef: ActorRef, replicationScheme: ReplicationScheme, format: Serializer): ClusterNode /** * Clusters an actor with UUID. If the actor is already clustered then the clustered version will be updated @@ -255,7 +255,7 @@ trait ClusterNode { * with the actor passed in as argument. You can use this to save off snapshots of the actor to a highly * available durable store. */ - def store(actorRef: ActorRef, replicationFactor: Int, replicationStrategy: ReplicationStrategy, format: Serializer): ClusterNode + def store(actorRef: ActorRef, replicationFactor: Int, replicationScheme: ReplicationScheme, format: Serializer): ClusterNode /** * Clusters an actor with UUID. If the actor is already clustered then the clustered version will be updated @@ -269,7 +269,7 @@ trait ClusterNode { * with the actor passed in as argument. You can use this to save off snapshots of the actor to a highly * available durable store. */ - def store(actorRef: ActorRef, replicationStrategy: ReplicationStrategy, serializeMailbox: Boolean, format: Serializer): ClusterNode + def store(actorRef: ActorRef, replicationScheme: ReplicationScheme, serializeMailbox: Boolean, format: Serializer): ClusterNode /** * Needed to have reflection through structural typing work. @@ -279,7 +279,7 @@ trait ClusterNode { /** * Needed to have reflection through structural typing work. */ - def store(actorRef: ActorRef, replicationFactor: Int, replicationStrategy: ReplicationStrategy, serializeMailbox: Boolean, format: AnyRef): ClusterNode + def store(actorRef: ActorRef, replicationFactor: Int, replicationScheme: ReplicationScheme, serializeMailbox: Boolean, format: AnyRef): ClusterNode /** * Clusters an actor with UUID. If the actor is already clustered then the clustered version will be updated @@ -293,7 +293,7 @@ trait ClusterNode { * with the actor passed in as argument. You can use this to save off snapshots of the actor to a highly * available durable store. */ - def store(actorRef: ActorRef, replicationFactor: Int, replicationStrategy: ReplicationStrategy, serializeMailbox: Boolean, format: Serializer): ClusterNode + def store(actorRef: ActorRef, replicationFactor: Int, replicationScheme: ReplicationScheme, serializeMailbox: Boolean, format: Serializer): ClusterNode /** * Removes actor with uuid from the cluster. diff --git a/akka-actor/src/main/scala/akka/util/ReflectiveAccess.scala b/akka-actor/src/main/scala/akka/util/ReflectiveAccess.scala index c7374a1b12..42fd88a78f 100644 --- a/akka-actor/src/main/scala/akka/util/ReflectiveAccess.scala +++ b/akka-actor/src/main/scala/akka/util/ReflectiveAccess.scala @@ -8,7 +8,7 @@ import akka.dispatch.{ Future, Promise, MessageInvocation } import akka.config.{ Config, ModuleNotAvailableException } import akka.remoteinterface.RemoteSupport import akka.actor._ -import DeploymentConfig.{ Deploy, ReplicationStrategy } +import DeploymentConfig.{ Deploy, ReplicationScheme, ReplicationStrategy } import akka.event.EventHandler import akka.serialization.Format import akka.cluster.ClusterNode @@ -111,13 +111,13 @@ object ReflectiveAccess { def newLogFor( id: String, isAsync: Boolean, - replicationStrategy: ReplicationStrategy, + replicationScheme: ReplicationScheme, format: Serializer): TransactionLog def logFor( id: String, isAsync: Boolean, - replicationStrategy: ReplicationStrategy, + replicationScheme: ReplicationScheme, format: Serializer): TransactionLog def shutdown() diff --git a/akka-cluster/src/main/scala/akka/cluster/Cluster.scala b/akka-cluster/src/main/scala/akka/cluster/Cluster.scala index b516aa6e47..a5b76b646c 100644 --- a/akka-cluster/src/main/scala/akka/cluster/Cluster.scala +++ b/akka-cluster/src/main/scala/akka/cluster/Cluster.scala @@ -29,7 +29,7 @@ import Helpers._ import akka.actor._ import Actor._ import Status._ -import DeploymentConfig.{ ReplicationStrategy, Transient, WriteThrough, WriteBehind } +import DeploymentConfig.{ ReplicationScheme, ReplicationStrategy, Transient, WriteThrough, WriteBehind } import akka.event.EventHandler import akka.dispatch.{ Dispatchers, Future } import akka.remoteinterface._ @@ -471,8 +471,8 @@ class DefaultClusterNode private[akka] ( * with the actor passed in as argument. You can use this to save off snapshots of the actor to a highly * available durable store. */ - def store[T <: Actor](address: String, actorClass: Class[T], replicationStrategy: ReplicationStrategy, format: Serializer): ClusterNode = - store(Actor.actorOf(actorClass, address).start, 0, replicationStrategy, false, format) + def store[T <: Actor](address: String, actorClass: Class[T], replicationScheme: ReplicationScheme, format: Serializer): ClusterNode = + store(Actor.actorOf(actorClass, address).start, 0, replicationScheme, false, format) /** * Clusters an actor of a specific type. If the actor is already clustered then the clustered version will be updated @@ -487,8 +487,8 @@ class DefaultClusterNode private[akka] ( * with the actor passed in as argument. You can use this to save off snapshots of the actor to a highly * available durable store. */ - def store[T <: Actor](address: String, actorClass: Class[T], replicationFactor: Int, replicationStrategy: ReplicationStrategy, format: Serializer): ClusterNode = - store(Actor.actorOf(actorClass, address).start, replicationFactor, replicationStrategy, false, format) + def store[T <: Actor](address: String, actorClass: Class[T], replicationFactor: Int, replicationScheme: ReplicationScheme, format: Serializer): ClusterNode = + store(Actor.actorOf(actorClass, address).start, replicationFactor, replicationScheme, false, format) /** * Clusters an actor of a specific type. If the actor is already clustered then the clustered version will be updated @@ -503,8 +503,8 @@ class DefaultClusterNode private[akka] ( * with the actor passed in as argument. You can use this to save off snapshots of the actor to a highly * available durable store. */ - def store[T <: Actor](address: String, actorClass: Class[T], replicationStrategy: ReplicationStrategy, serializeMailbox: Boolean, format: Serializer): ClusterNode = - store(Actor.actorOf(actorClass, address).start, 0, replicationStrategy, serializeMailbox, format) + def store[T <: Actor](address: String, actorClass: Class[T], replicationScheme: ReplicationScheme, serializeMailbox: Boolean, format: Serializer): ClusterNode = + store(Actor.actorOf(actorClass, address).start, 0, replicationScheme, serializeMailbox, format) /** * Clusters an actor of a specific type. If the actor is already clustered then the clustered version will be updated @@ -519,8 +519,8 @@ class DefaultClusterNode private[akka] ( * with the actor passed in as argument. You can use this to save off snapshots of the actor to a highly * available durable store. */ - def store[T <: Actor](address: String, actorClass: Class[T], replicationFactor: Int, replicationStrategy: ReplicationStrategy, serializeMailbox: Boolean, format: Serializer): ClusterNode = - store(Actor.actorOf(actorClass, address).start, replicationFactor, replicationStrategy, serializeMailbox, format) + def store[T <: Actor](address: String, actorClass: Class[T], replicationFactor: Int, replicationScheme: ReplicationScheme, serializeMailbox: Boolean, format: Serializer): ClusterNode = + store(Actor.actorOf(actorClass, address).start, replicationFactor, replicationScheme, serializeMailbox, format) /** * Clusters an actor with UUID. If the actor is already clustered then the clustered version will be updated @@ -535,8 +535,8 @@ class DefaultClusterNode private[akka] ( * with the actor passed in as argument. You can use this to save off snapshots of the actor to a highly * available durable store. */ - def store(actorRef: ActorRef, replicationStrategy: ReplicationStrategy, format: Serializer): ClusterNode = - store(actorRef, 0, replicationStrategy, false, format) + def store(actorRef: ActorRef, replicationScheme: ReplicationScheme, format: Serializer): ClusterNode = + store(actorRef, 0, replicationScheme, false, format) /** * Clusters an actor with UUID. If the actor is already clustered then the clustered version will be updated @@ -551,8 +551,8 @@ class DefaultClusterNode private[akka] ( * with the actor passed in as argument. You can use this to save off snapshots of the actor to a highly * available durable store. */ - def store(actorRef: ActorRef, replicationFactor: Int, replicationStrategy: ReplicationStrategy, format: Serializer): ClusterNode = - store(actorRef, replicationFactor, replicationStrategy, false, format) + def store(actorRef: ActorRef, replicationFactor: Int, replicationScheme: ReplicationScheme, format: Serializer): ClusterNode = + store(actorRef, replicationFactor, replicationScheme, false, format) /** * Clusters an actor with UUID. If the actor is already clustered then the clustered version will be updated @@ -575,14 +575,14 @@ class DefaultClusterNode private[akka] ( * with the actor passed in as argument. You can use this to save off snapshots of the actor to a highly * available durable store. */ - def store(actorRef: ActorRef, replicationStrategy: ReplicationStrategy, serializeMailbox: Boolean, format: Serializer): ClusterNode = - store(actorRef, 0, replicationStrategy, serializeMailbox, format) + def store(actorRef: ActorRef, replicationScheme: ReplicationScheme, serializeMailbox: Boolean, format: Serializer): ClusterNode = + store(actorRef, 0, replicationScheme, serializeMailbox, format) /** * Needed to have reflection through structural typing work. */ - def store(actorRef: ActorRef, replicationFactor: Int, replicationStrategy: ReplicationStrategy, serializeMailbox: Boolean, format: AnyRef): ClusterNode = - store(actorRef, replicationFactor, replicationStrategy, serializeMailbox, format.asInstanceOf[Serializer]) + def store(actorRef: ActorRef, replicationFactor: Int, replicationScheme: ReplicationScheme, serializeMailbox: Boolean, format: AnyRef): ClusterNode = + store(actorRef, replicationFactor, replicationScheme, serializeMailbox, format.asInstanceOf[Serializer]) /** * Needed to have reflection through structural typing work. @@ -598,7 +598,7 @@ class DefaultClusterNode private[akka] ( def store( actorRef: ActorRef, replicationFactor: Int, - replicationStrategy: ReplicationStrategy, + replicationScheme: ReplicationScheme, serializeMailbox: Boolean, format: Serializer): ClusterNode = if (isConnected.isOn) { @@ -612,8 +612,8 @@ class DefaultClusterNode private[akka] ( "Storing actor [%s] with UUID [%s] in cluster".format(actorRef.address, uuid)) val actorBytes = - if (shouldCompressData) LZF.compress(toBinary(actorRef, serializeMailbox, replicationStrategy)(format)) - else toBinary(actorRef, serializeMailbox, replicationStrategy)(format) + if (shouldCompressData) LZF.compress(toBinary(actorRef, serializeMailbox, replicationScheme)(format)) + else toBinary(actorRef, serializeMailbox, replicationScheme)(format) val actorRegistryPath = actorRegistryPathFor(uuid) diff --git a/akka-cluster/src/main/scala/akka/cluster/TransactionLog.scala b/akka-cluster/src/main/scala/akka/cluster/TransactionLog.scala index 7efec7df23..281d2f91e5 100644 --- a/akka-cluster/src/main/scala/akka/cluster/TransactionLog.scala +++ b/akka-cluster/src/main/scala/akka/cluster/TransactionLog.scala @@ -14,7 +14,7 @@ import akka.config._ import Config._ import akka.util._ import akka.actor._ -import DeploymentConfig.{ ReplicationStrategy, Transient, WriteThrough, WriteBehind } +import DeploymentConfig.{ ReplicationScheme, ReplicationStrategy, Transient, WriteThrough, WriteBehind } import akka.event.EventHandler import akka.dispatch.{ DefaultPromise, Promise, MessageInvocation } import akka.remote.MessageSerializer @@ -52,7 +52,7 @@ class TransactionLog private ( ledger: LedgerHandle, val id: String, val isAsync: Boolean, - replicationStrategy: ReplicationStrategy, + replicationScheme: ReplicationScheme, format: Serializer) { import TransactionLog._ @@ -71,8 +71,8 @@ class TransactionLog private ( if (nrOfEntries.incrementAndGet % snapshotFrequency == 0) { val snapshot = // FIXME ReplicationStrategy Transient is always used - if (Cluster.shouldCompressData) LZF.compress(toBinary(actorRef, false, replicationStrategy)(format)) - else toBinary(actorRef, false, replicationStrategy)(format) + if (Cluster.shouldCompressData) LZF.compress(toBinary(actorRef, false, replicationScheme)(format)) + else toBinary(actorRef, false, replicationScheme)(format) recordSnapshot(snapshot) } recordEntry(MessageSerializer.serialize(messageHandle.message).toByteArray) @@ -371,9 +371,9 @@ object TransactionLog { ledger: LedgerHandle, id: String, isAsync: Boolean, - replicationStrategy: ReplicationStrategy, + replicationScheme: ReplicationScheme, format: Serializer) = - new TransactionLog(ledger, id, isAsync, replicationStrategy, format) + new TransactionLog(ledger, id, isAsync, replicationScheme, format) /** * Shuts down the transaction log. @@ -397,7 +397,7 @@ object TransactionLog { def newLogFor( id: String, isAsync: Boolean, - replicationStrategy: ReplicationStrategy, + replicationScheme: ReplicationScheme, format: Serializer): TransactionLog = { val txLogPath = transactionLogNode + "/" + id @@ -443,7 +443,7 @@ object TransactionLog { } EventHandler.info(this, "Created new transaction log [%s] for UUID [%s]".format(logId, id)) - TransactionLog(ledger, id, isAsync, replicationStrategy, format) + TransactionLog(ledger, id, isAsync, replicationScheme, format) } /** @@ -452,7 +452,7 @@ object TransactionLog { def logFor( id: String, isAsync: Boolean, - replicationStrategy: ReplicationStrategy, + replicationScheme: ReplicationScheme, format: Serializer): TransactionLog = { val txLogPath = transactionLogNode + "/" + id @@ -493,7 +493,7 @@ object TransactionLog { case e ⇒ handleError(e) } - TransactionLog(ledger, id, isAsync, replicationStrategy, format) + TransactionLog(ledger, id, isAsync, replicationScheme, format) } private[akka] def await[T](future: Promise[T]): T = { diff --git a/akka-remote/src/main/java/akka/remote/protocol/RemoteProtocol.java b/akka-remote/src/main/java/akka/remote/protocol/RemoteProtocol.java index 29e490103e..b643a9a750 100644 --- a/akka-remote/src/main/java/akka/remote/protocol/RemoteProtocol.java +++ b/akka-remote/src/main/java/akka/remote/protocol/RemoteProtocol.java @@ -77,25 +77,94 @@ public final class RemoteProtocol { // @@protoc_insertion_point(enum_scope:CommandType) } - public enum ReplicationStrategyType + public enum ReplicationStorageType implements com.google.protobuf.ProtocolMessageEnum { TRANSIENT(0, 1), - WRITE_THROUGH(1, 2), - WRITE_BEHIND(2, 3), + TRANSACTION_LOG(1, 2), + DATA_GRID(2, 3), ; public static final int TRANSIENT_VALUE = 1; - public static final int WRITE_THROUGH_VALUE = 2; - public static final int WRITE_BEHIND_VALUE = 3; + public static final int TRANSACTION_LOG_VALUE = 2; + public static final int DATA_GRID_VALUE = 3; + + + public final int getNumber() { return value; } + + public static ReplicationStorageType valueOf(int value) { + switch (value) { + case 1: return TRANSIENT; + case 2: return TRANSACTION_LOG; + case 3: return DATA_GRID; + default: return null; + } + } + + public static com.google.protobuf.Internal.EnumLiteMap