- */
-
-package akka.cluster
-
-import org.apache.zookeeper._
-import org.apache.zookeeper.Watcher.Event._
-import org.apache.zookeeper.data.Stat
-import org.apache.zookeeper.recipes.lock.{ WriteLock, LockListener }
-
-import org.I0Itec.zkclient._
-import org.I0Itec.zkclient.serialize._
-import org.I0Itec.zkclient.exception._
-
-import java.util.{ List ⇒ JList }
-import java.util.concurrent.atomic.{ AtomicBoolean, AtomicReference }
-import java.util.concurrent.{ CopyOnWriteArrayList, Callable, ConcurrentHashMap }
-import javax.management.StandardMBean
-import java.net.InetSocketAddress
-
-import scala.collection.mutable.ConcurrentMap
-import scala.collection.JavaConversions._
-import scala.annotation.tailrec
-
-import akka.util._
-import duration._
-import Helpers._
-
-import akka.actor._
-import Actor._
-import Status._
-import DeploymentConfig._
-
-import akka.event.EventHandler
-import akka.config.Config
-import akka.config.Config._
-
-import akka.serialization.{ Serialization, Serializer, ActorSerialization, Compression }
-import ActorSerialization._
-import Compression.LZF
-
-import akka.routing._
-import akka.cluster._
-import akka.cluster.metrics._
-import akka.cluster.zookeeper._
-import ChangeListener._
-import RemoteProtocol._
-import RemoteSystemDaemonMessageType._
-
-import com.eaio.uuid.UUID
-
-import com.google.protobuf.ByteString
-import akka.dispatch.{Await, Dispatchers, Future, PinnedDispatcher}
-
-// FIXME add watch for each node that when the entry for the node is removed then the node shuts itself down
-
-/**
- * JMX MBean for the cluster service.
- */
-trait ClusterNodeMBean {
-
- def stop()
-
- def disconnect()
-
- def reconnect()
-
- def resign()
-
- def getRemoteServerHostname: String
-
- def getRemoteServerPort: Int
-
- def getNodeName: String
-
- def getClusterName: String
-
- def getZooKeeperServerAddresses: String
-
- def getMemberNodes: Array[String]
-
- def getNodeAddress(): NodeAddress
-
- def getLeaderLockName: String
-
- def isLeader: Boolean
-
- def getUuidsForClusteredActors: Array[String]
-
- def getAddressesForClusteredActors: Array[String]
-
- def getUuidsForActorsInUse: Array[String]
-
- def getAddressesForActorsInUse: Array[String]
-
- def getNodesForActorInUseWithAddress(address: String): Array[String]
-
- def getUuidsForActorsInUseOnNode(nodeName: String): Array[String]
-
- def getAddressesForActorsInUseOnNode(nodeName: String): Array[String]
-
- def setConfigElement(key: String, value: String)
-
- def getConfigElement(key: String): AnyRef
-
- def removeConfigElement(key: String)
-
- def getConfigElementKeys: Array[String]
-
- def getMembershipPathFor(node: String): String
-
- def getConfigurationPathFor(key: String): String
-
- def getActorAddresstoNodesPathFor(actorAddress: String): String
-
- def getActorAddressToNodesPathForWithNodeName(actorAddress: String, nodeName: String): String
-
- def getNodeToUuidsPathFor(node: String): String
-
- // FIXME All MBean methods that take a UUID are useless, change to String
- def getNodeToUuidsPathFor(node: String, uuid: UUID): String
-
- def getActorAddressRegistryPathFor(actorAddress: String): String
-
- def getActorAddressRegistrySerializerPathFor(actorAddress: String): String
-
- def getActorAddressRegistryUuidPathFor(actorAddress: String): String
-
- def getActorUuidRegistryNodePathFor(uuid: UUID): String
-
- def getActorUuidRegistryRemoteAddressPathFor(uuid: UUID): String
-
- def getActorAddressToUuidsPathFor(actorAddress: String): String
-
- def getActorAddressToUuidsPathForWithNodeName(actorAddress: String, uuid: UUID): String
-}
-
-/**
- * Module for the Cluster. Also holds global state such as configuration data etc.
- */
-object Cluster {
- val EMPTY_STRING = "".intern
-
- // config options
- val name = Config.clusterName
- val zooKeeperServers = config.getString("akka.cluster.zookeeper-server-addresses", "localhost:2181")
- val remoteServerPort = config.getInt("akka.remote.server.port", 2552)
- val sessionTimeout = Duration(config.getInt("akka.cluster.session-timeout", 60), TIME_UNIT).toMillis.toInt
- val metricsRefreshInterval = Duration(config.getInt("akka.cluster.metrics-refresh-timeout", 2), TIME_UNIT)
- val connectionTimeout = Duration(config.getInt("akka.cluster.connection-timeout", 60), TIME_UNIT).toMillis.toInt
- val maxTimeToWaitUntilConnected = Duration(config.getInt("akka.cluster.max-time-to-wait-until-connected", 30), TIME_UNIT).toMillis.toInt
- val shouldCompressData = config.getBool("akka.remote.use-compression", false)
- val enableJMX = config.getBool("akka.enable-jmx", true)
- val remoteDaemonAckTimeout = Duration(config.getInt("akka.remote.remote-daemon-ack-timeout", 30), TIME_UNIT).toMillis.toInt
- val includeRefNodeInReplicaSet = config.getBool("akka.cluster.include-ref-node-in-replica-set", true)
-
- @volatile
- private var properties = Map.empty[String, String]
-
- /**
- * Use to override JVM options such as -Dakka.cluster.nodename=node1 etc.
- * Currently supported options are:
- *
- * Cluster setProperty ("akka.cluster.nodename", "node1")
- * Cluster setProperty ("akka.remote.hostname", "darkstar.lan")
- * Cluster setProperty ("akka.remote.port", "1234")
- *
- */
- def setProperty(property: (String, String)) {
- properties = properties + property
- }
-
- private def nodename: String = properties.get("akka.cluster.nodename") match {
- case Some(uberride) ⇒ uberride
- case None ⇒ Config.nodename
- }
-
- private def hostname: String = properties.get("akka.remote.hostname") match {
- case Some(uberride) ⇒ uberride
- case None ⇒ Config.hostname
- }
-
- private def port: Int = properties.get("akka.remote.port") match {
- case Some(uberride) ⇒ uberride.toInt
- case None ⇒ Config.remoteServerPort
- }
-
- val defaultZooKeeperSerializer = new SerializableSerializer
-
- /**
- * The node address.
- */
- val nodeAddress = NodeAddress(name, nodename)
-
- /**
- * The reference to the running ClusterNode.
- */
- val node = {
- if (nodeAddress eq null) throw new IllegalArgumentException("NodeAddress can't be null")
- new DefaultClusterNode(nodeAddress, hostname, port, zooKeeperServers, defaultZooKeeperSerializer)
- }
-
- /**
- * Creates a new AkkaZkClient.
- */
- def newZkClient(): AkkaZkClient = new AkkaZkClient(zooKeeperServers, sessionTimeout, connectionTimeout, defaultZooKeeperSerializer)
-
- def uuidToString(uuid: UUID): String = uuid.toString
-
- def stringToUuid(uuid: String): UUID = {
- if (uuid eq null) throw new ClusterException("UUID is null")
- if (uuid == "") throw new ClusterException("UUID is an empty string")
- try {
- new UUID(uuid)
- } catch {
- case e: StringIndexOutOfBoundsException ⇒
- val error = new ClusterException("UUID not valid [" + uuid + "]")
- EventHandler.error(error, this, "")
- throw error
- }
- }
-
- def uuidProtocolToUuid(uuid: UuidProtocol): UUID = new UUID(uuid.getHigh, uuid.getLow)
-
- def uuidToUuidProtocol(uuid: UUID): UuidProtocol =
- UuidProtocol.newBuilder
- .setHigh(uuid.getTime)
- .setLow(uuid.getClockSeqAndNode)
- .build
-}
-
-/**
- * A Cluster is made up by a bunch of jvm's, the ClusterNode.
- *
- * These are the path tree holding the cluster meta-data in ZooKeeper.
- *
- * Syntax: foo means a variable string, 'foo' means a symbol that does not change and "data" in foo[data] means the value (in bytes) for the node "foo"
- *
- *
- * /clusterName/'members'/nodeName
- * /clusterName/'config'/key[bytes]
- *
- * /clusterName/'actor-address-to-nodes'/actorAddress/nodeName
- * /clusterName/'actors-node-to-uuids'/nodeName/actorUuid
- *
- * /clusterName/'actor-address-registry'/actorAddress/'serializer'[serializerName]
- * /clusterName/'actor-address-registry'/actorAddress/'uuid'[actorUuid]
- *
- * /clusterName/'actor-uuid-registry'/actorUuid/'node'[nodeName]
- * /clusterName/'actor-uuid-registry'/actorUuid/'node'/ip:port
- * /clusterName/'actor-uuid-registry'/actorUuid/'address'[actorAddress]
- *
- * /clusterName/'actor-address-to-uuids'/actorAddress/actorUuid
- *
- */
-class DefaultClusterNode private[akka] (
- val nodeAddress: NodeAddress,
- val hostname: String = Config.hostname,
- val port: Int = Config.remoteServerPort,
- val zkServerAddresses: String,
- val serializer: ZkSerializer) extends ErrorHandler with ClusterNode {
- self ⇒
-
- if ((hostname eq null) || hostname == "") throw new NullPointerException("Host name must not be null or empty string")
- if (port < 1) throw new NullPointerException("Port can not be negative")
- if (nodeAddress eq null) throw new IllegalArgumentException("'nodeAddress' can not be 'null'")
-
- val clusterJmxObjectName = JMX.nameFor(hostname, "monitoring", "cluster")
-
- import Cluster._
-
- // private val connectToAllNewlyArrivedMembershipNodesInClusterLock = new AtomicBoolean(false)
-
- private[cluster] lazy val remoteClientLifeCycleHandler = actorOf(Props(new Actor {
- def receive = {
- case RemoteClientError(cause, client, address) ⇒ client.shutdownClientModule()
- case RemoteClientDisconnected(client, address) ⇒ client.shutdownClientModule()
- case _ ⇒ //ignore other
- }
- }), "akka.cluster.RemoteClientLifeCycleListener")
-
- private[cluster] lazy val remoteDaemon = new LocalActorRef(Props(new RemoteClusterDaemon(this)).copy(dispatcher = new PinnedDispatcher()), RemoteClusterDaemon.Address, systemService = true)
-
- private[cluster] lazy val remoteDaemonSupervisor = Supervisor(
- SupervisorConfig(
- OneForOneStrategy(List(classOf[Exception]), Int.MaxValue, Int.MaxValue), // is infinite restart what we want?
- Supervise(
- remoteDaemon,
- Permanent)
- :: Nil)).start()
-
- lazy val remoteService: RemoteSupport = {
- val remote = new akka.remote.netty.NettyRemoteSupport
- remote.start(hostname, port)
- remote.register(RemoteClusterDaemon.Address, remoteDaemon)
- remote.addListener(RemoteFailureDetector.sender)
- remote.addListener(remoteClientLifeCycleHandler)
- remote
- }
-
- lazy val remoteServerAddress: InetSocketAddress = remoteService.address
-
- lazy val metricsManager: NodeMetricsManager = new LocalNodeMetricsManager(zkClient, Cluster.metricsRefreshInterval).start()
-
- // static nodes
- val CLUSTER_PATH = "/" + nodeAddress.clusterName
- val MEMBERSHIP_PATH = CLUSTER_PATH + "/members"
- val CONFIGURATION_PATH = CLUSTER_PATH + "/config"
- val PROVISIONING_PATH = CLUSTER_PATH + "/provisioning"
- val ACTOR_ADDRESS_NODES_TO_PATH = CLUSTER_PATH + "/actor-address-to-nodes"
- val ACTOR_ADDRESS_REGISTRY_PATH = CLUSTER_PATH + "/actor-address-registry"
- val ACTOR_UUID_REGISTRY_PATH = CLUSTER_PATH + "/actor-uuid-registry"
- val ACTOR_ADDRESS_TO_UUIDS_PATH = CLUSTER_PATH + "/actor-address-to-uuids"
- val NODE_TO_ACTOR_UUIDS_PATH = CLUSTER_PATH + "/node-to-actors-uuids"
- val NODE_METRICS = CLUSTER_PATH + "/metrics"
-
- val basePaths = List(
- CLUSTER_PATH,
- MEMBERSHIP_PATH,
- ACTOR_ADDRESS_REGISTRY_PATH,
- ACTOR_UUID_REGISTRY_PATH,
- ACTOR_ADDRESS_NODES_TO_PATH,
- NODE_TO_ACTOR_UUIDS_PATH,
- ACTOR_ADDRESS_TO_UUIDS_PATH,
- CONFIGURATION_PATH,
- PROVISIONING_PATH,
- NODE_METRICS)
-
- val LEADER_ELECTION_PATH = CLUSTER_PATH + "/leader" // should NOT be part of 'basePaths' only used by 'leaderLock'
-
- private val membershipNodePath = membershipPathFor(nodeAddress.nodeName)
-
- def membershipNodes: Array[String] = locallyCachedMembershipNodes.toList.toArray.asInstanceOf[Array[String]]
-
- // zookeeper listeners
- private val stateListener = new StateListener(this)
- private val membershipListener = new MembershipChildListener(this)
-
- // cluster node listeners
- private val changeListeners = new CopyOnWriteArrayList[ChangeListener]()
-
- // Address -> ClusterActorRef
- private[akka] val clusterActorRefs = new Index[InetSocketAddress, ClusterActorRef]
-
- case class VersionedConnectionState(version: Long, connections: Map[String, Tuple2[InetSocketAddress, ActorRef]])
-
- // all the connections to other nodes
- private[akka] val nodeConnections = {
- var conns = Map.empty[String, Tuple2[InetSocketAddress, ActorRef]]
- // add the remote connection to 'this' node as well, but as a 'local' actor
- if (includeRefNodeInReplicaSet) conns += (nodeAddress.nodeName -> (remoteServerAddress, remoteDaemon))
- new AtomicReference[VersionedConnectionState](VersionedConnectionState(0, conns))
- }
-
- private val isShutdownFlag = new AtomicBoolean(false)
-
- // ZooKeeper client
- private[cluster] val zkClient = new AkkaZkClient(zkServerAddresses, sessionTimeout, connectionTimeout, serializer)
-
- // leader election listener, registered to the 'leaderLock' below
- private[cluster] val leaderElectionCallback = new LockListener {
- override def lockAcquired() {
- EventHandler.info(this, "Node [%s] is the new leader".format(self.nodeAddress.nodeName))
- self.publish(NewLeader(self.nodeAddress.nodeName))
- }
-
- override def lockReleased() {
- EventHandler.info(this, "Node [%s] is *NOT* the leader anymore".format(self.nodeAddress.nodeName))
- }
- }
-
- // leader election lock in ZooKeeper
- private[cluster] val leaderLock = new WriteLock(
- zkClient.connection.getZookeeper,
- LEADER_ELECTION_PATH, null,
- leaderElectionCallback)
-
- if (enableJMX) createMBean
-
- boot()
-
- // =======================================
- // Node
- // =======================================
-
- private[cluster] def boot() {
- EventHandler.info(this,
- ("\nCreating cluster node with" +
- "\n\tcluster name = [%s]" +
- "\n\tnode name = [%s]" +
- "\n\tport = [%s]" +
- "\n\tzookeeper server addresses = [%s]" +
- "\n\tserializer = [%s]")
- .format(nodeAddress.clusterName, nodeAddress.nodeName, port, zkServerAddresses, serializer))
- EventHandler.info(this, "Starting up remote server [%s]".format(remoteServerAddress.toString))
- createZooKeeperPathStructureIfNeeded()
- registerListeners()
- joinCluster()
- joinLeaderElection()
- fetchMembershipNodes()
- EventHandler.info(this, "Cluster node [%s] started successfully".format(nodeAddress))
- }
-
- def isShutdown = isShutdownFlag.get
-
- def start() {}
-
- def shutdown() {
- isShutdownFlag.set(true)
-
- def shutdownNode() {
- ignore[ZkNoNodeException](zkClient.deleteRecursive(membershipNodePath))
-
- locallyCachedMembershipNodes.clear()
-
- nodeConnections.get.connections.toList.foreach({
- case (_, (address, _)) ⇒
- Actor.remote.shutdownClientConnection(address) // shut down client connections
- })
-
- remoteService.shutdown() // shutdown server
-
- RemoteFailureDetector.sender.stop()
- remoteClientLifeCycleHandler.stop()
- remoteDaemon.stop()
-
- // for monitoring remote listener
- registry.local.actors.filter(remoteService.hasListener).foreach(_.stop())
-
- nodeConnections.set(VersionedConnectionState(0, Map.empty[String, Tuple2[InetSocketAddress, ActorRef]]))
-
- disconnect()
- EventHandler.info(this, "Cluster node shut down [%s]".format(nodeAddress))
- }
-
- shutdownNode()
- }
-
- def disconnect(): ClusterNode = {
- zkClient.unsubscribeAll()
- zkClient.close()
- this
- }
-
- def reconnect(): ClusterNode = {
- zkClient.reconnect()
- this
- }
-
- // =======================================
- // Change notification
- // =======================================
-
- /**
- * Registers a cluster change listener.
- */
- def register(listener: ChangeListener): ClusterNode = {
- changeListeners.add(listener)
- this
- }
-
- private[cluster] def publish(change: ChangeNotification) {
- changeListeners.iterator.foreach(_.notify(change, this))
- }
-
- // =======================================
- // Leader
- // =======================================
-
- /**
- * Returns the name of the current leader lock.
- */
- def leader: String = leaderLock.getId
-
- /**
- * Returns true if 'this' node is the current leader.
- */
- def isLeader: Boolean = leaderLock.isOwner
-
- /**
- * Explicitly resign from being a leader. If this node is not a leader then this operation is a no-op.
- */
- def resign() {
- if (isLeader) leaderLock.unlock()
- }
-
- // =======================================
- // Actor
- // =======================================
-
- /**
- * Clusters an actor of a specific type. If the actor is already clustered then the clustered version will be updated
- * with the actor passed in as argument. You can use this to save off snapshots of the actor to a highly
- * available durable store.
- */
- def store[T <: Actor](actorAddress: String, actorClass: Class[T], serializer: Serializer): ClusterNode =
- store(actorAddress, () ⇒ Actor.actorOf(actorClass, actorAddress), 0, Transient, false, serializer)
-
- /**
- * Clusters an actor of a specific type. If the actor is already clustered then the clustered version will be updated
- * with the actor passed in as argument. You can use this to save off snapshots of the actor to a highly
- * available durable store.
- */
- def store[T <: Actor](actorAddress: String, actorClass: Class[T], replicationScheme: ReplicationScheme, serializer: Serializer): ClusterNode =
- store(actorAddress, () ⇒ Actor.actorOf(actorClass, actorAddress), 0, replicationScheme, false, serializer)
-
- /**
- * Clusters an actor of a specific type. If the actor is already clustered then the clustered version will be updated
- * with the actor passed in as argument. You can use this to save off snapshots of the actor to a highly
- * available durable store.
- */
- def store[T <: Actor](actorAddress: String, actorClass: Class[T], nrOfInstances: Int, serializer: Serializer): ClusterNode =
- store(actorAddress, () ⇒ Actor.actorOf(actorClass, actorAddress), nrOfInstances, Transient, false, serializer)
-
- /**
- * Clusters an actor of a specific type. If the actor is already clustered then the clustered version will be updated
- * with the actor passed in as argument. You can use this to save off snapshots of the actor to a highly
- * available durable store.
- */
- def store[T <: Actor](actorAddress: String, actorClass: Class[T], nrOfInstances: Int, replicationScheme: ReplicationScheme, serializer: Serializer): ClusterNode =
- store(actorAddress, () ⇒ Actor.actorOf(actorClass, actorAddress), nrOfInstances, replicationScheme, false, serializer)
-
- /**
- * Clusters an actor of a specific type. If the actor is already clustered then the clustered version will be updated
- * with the actor passed in as argument. You can use this to save off snapshots of the actor to a highly
- * available durable store.
- */
- def store[T <: Actor](actorAddress: String, actorClass: Class[T], serializeMailbox: Boolean, serializer: Serializer): ClusterNode =
- store(actorAddress, () ⇒ Actor.actorOf(actorClass, actorAddress), 0, Transient, serializeMailbox, serializer)
-
- /**
- * Clusters an actor of a specific type. If the actor is already clustered then the clustered version will be updated
- * with the actor passed in as argument. You can use this to save off snapshots of the actor to a highly
- * available durable store.
- */
- def store[T <: Actor](actorAddress: String, actorClass: Class[T], replicationScheme: ReplicationScheme, serializeMailbox: Boolean, serializer: Serializer): ClusterNode =
- store(actorAddress, () ⇒ Actor.actorOf(actorClass, actorAddress), 0, replicationScheme, serializeMailbox, serializer)
-
- /**
- * Clusters an actor of a specific type. If the actor is already clustered then the clustered version will be updated
- * with the actor passed in as argument. You can use this to save off snapshots of the actor to a highly
- * available durable store.
- */
- def store[T <: Actor](actorAddress: String, actorClass: Class[T], nrOfInstances: Int, serializeMailbox: Boolean, serializer: Serializer): ClusterNode =
- store(actorAddress, () ⇒ Actor.actorOf(actorClass, actorAddress), nrOfInstances, Transient, serializeMailbox, serializer)
-
- /**
- * Clusters an actor of a specific type. If the actor is already clustered then the clustered version will be updated
- * with the actor passed in as argument. You can use this to save off snapshots of the actor to a highly
- * available durable store.
- */
- def store[T <: Actor](actorAddress: String, actorClass: Class[T], nrOfInstances: Int, replicationScheme: ReplicationScheme, serializeMailbox: Boolean, serializer: Serializer): ClusterNode =
- store(actorAddress, () ⇒ Actor.actorOf(actorClass, actorAddress), nrOfInstances, replicationScheme, serializeMailbox, serializer)
-
- /**
- * Clusters an actor with UUID. If the actor is already clustered then the clustered version will be updated
- * with the actor passed in as argument. You can use this to save off snapshots of the actor to a highly
- * available durable store.
- */
- def store(actorAddress: String, actorFactory: () ⇒ ActorRef, serializer: Serializer): ClusterNode =
- store(actorAddress, actorFactory, 0, Transient, false, serializer)
-
- /**
- * Clusters an actor with UUID. If the actor is already clustered then the clustered version will be updated
- * with the actor passed in as argument. You can use this to save off snapshots of the actor to a highly
- * available durable store.
- */
- def store(actorAddress: String, actorFactory: () ⇒ ActorRef, serializeMailbox: Boolean, serializer: Serializer): ClusterNode =
- store(actorAddress, actorFactory, 0, Transient, serializeMailbox, serializer)
-
- /**
- * Clusters an actor with UUID. If the actor is already clustered then the clustered version will be updated
- * with the actor passed in as argument. You can use this to save off snapshots of the actor to a highly
- * available durable store.
- */
- def store(actorAddress: String, actorFactory: () ⇒ ActorRef, replicationScheme: ReplicationScheme, serializer: Serializer): ClusterNode =
- store(actorAddress, actorFactory, 0, replicationScheme, false, serializer)
-
- /**
- * Clusters an actor with UUID. If the actor is already clustered then the clustered version will be updated
- * with the actor passed in as argument. You can use this to save off snapshots of the actor to a highly
- * available durable store.
- */
- def store(actorAddress: String, actorFactory: () ⇒ ActorRef, nrOfInstances: Int, serializer: Serializer): ClusterNode =
- store(actorAddress, actorFactory, nrOfInstances, Transient, false, serializer)
-
- /**
- * Clusters an actor with UUID. If the actor is already clustered then the clustered version will be updated
- * with the actor passed in as argument. You can use this to save off snapshots of the actor to a highly
- * available durable store.
- */
- def store(actorAddress: String, actorFactory: () ⇒ ActorRef, nrOfInstances: Int, replicationScheme: ReplicationScheme, serializer: Serializer): ClusterNode =
- store(actorAddress, actorFactory, nrOfInstances, replicationScheme, false, serializer)
-
- /**
- * Clusters an actor with UUID. If the actor is already clustered then the clustered version will be updated
- * with the actor passed in as argument. You can use this to save off snapshots of the actor to a highly
- * available durable store.
- */
- def store(actorAddress: String, actorFactory: () ⇒ ActorRef, nrOfInstances: Int, serializeMailbox: Boolean, serializer: Serializer): ClusterNode =
- store(actorAddress, actorFactory, nrOfInstances, Transient, serializeMailbox, serializer)
-
- /**
- * Clusters an actor with UUID. If the actor is already clustered then the clustered version will be updated
- * with the actor passed in as argument. You can use this to save off snapshots of the actor to a highly
- * available durable store.
- */
- def store(actorAddress: String, actorFactory: () ⇒ ActorRef, replicationScheme: ReplicationScheme, serializeMailbox: Boolean, serializer: Serializer): ClusterNode =
- store(actorAddress, actorFactory, 0, replicationScheme, serializeMailbox, serializer)
-
- /**
- * Needed to have reflection through structural typing work.
- */
- def store(actorAddress: String, actorFactory: () ⇒ ActorRef, nrOfInstances: Int, replicationScheme: ReplicationScheme, serializeMailbox: Boolean, serializer: AnyRef): ClusterNode =
- store(actorAddress, actorFactory, nrOfInstances, replicationScheme, serializeMailbox, serializer.asInstanceOf[Serializer])
-
- /**
- * Needed to have reflection through structural typing work.
- */
- def store(actorAddress: String, actorFactory: () ⇒ ActorRef, nrOfInstances: Int, serializeMailbox: Boolean, serializer: AnyRef): ClusterNode =
- store(actorAddress, actorFactory, nrOfInstances, Transient, serializeMailbox, serializer)
-
- /**
- * Clusters an actor. If the actor is already clustered then the clustered version will be updated
- * with the actor passed in as argument. You can use this to save off snapshots of the actor to a highly
- * available durable store.
- */
- def store(
- actorAddress: String,
- actorFactory: () ⇒ ActorRef,
- nrOfInstances: Int,
- replicationScheme: ReplicationScheme,
- serializeMailbox: Boolean,
- serializer: Serializer): ClusterNode = {
-
- EventHandler.debug(this,
- "Storing actor with address [%s] in cluster".format(actorAddress))
-
- val actorFactoryBytes =
- Serialization.serialize(actorFactory) match {
- case Left(error) ⇒ throw error
- case Right(bytes) ⇒
- if (shouldCompressData) LZF.compress(bytes)
- else bytes
- }
-
- val actorAddressRegistryPath = actorAddressRegistryPathFor(actorAddress)
-
- // create ADDRESS -> Array[Byte] for actor registry
- try {
- zkClient.writeData(actorAddressRegistryPath, actorFactoryBytes)
- } catch {
- case e: ZkNoNodeException ⇒ // if not stored yet, store the actor
- zkClient.retryUntilConnected(new Callable[Either[String, Exception]]() {
- def call: Either[String, Exception] = {
- try {
- Left(zkClient.connection.create(actorAddressRegistryPath, actorFactoryBytes, CreateMode.PERSISTENT))
- } catch {
- case e: KeeperException.NodeExistsException ⇒ Right(e)
- }
- }
- }) match {
- case Left(path) ⇒ path
- case Right(exception) ⇒ actorAddressRegistryPath
- }
- }
-
- // create ADDRESS -> SERIALIZER CLASS NAME mapping
- try {
- zkClient.createPersistent(actorAddressRegistrySerializerPathFor(actorAddress), serializer.identifier.toString)
- } catch {
- case e: ZkNodeExistsException ⇒ zkClient.writeData(actorAddressRegistrySerializerPathFor(actorAddress), serializer.identifier.toString)
- }
-
- // create ADDRESS -> NODE mapping
- ignore[ZkNodeExistsException](zkClient.createPersistent(actorAddressToNodesPathFor(actorAddress)))
-
- // create ADDRESS -> UUIDs mapping
- ignore[ZkNodeExistsException](zkClient.createPersistent(actorAddressToUuidsPathFor(actorAddress)))
-
- useActorOnNodes(nodesForNrOfInstances(nrOfInstances, Some(actorAddress)).toArray, actorAddress)
-
- this
- }
-
- /**
- * Removes actor from the cluster.
- */
- // def remove(actorRef: ActorRef) {
- // remove(actorRef.address)
- // }
-
- /**
- * Removes actor with uuid from the cluster.
- */
- // def remove(actorAddress: String) {
- // releaseActorOnAllNodes(actorAddress)
- // // warning: ordering matters here
- // // FIXME remove ADDRESS to UUID mapping?
- // ignore[ZkNoNodeException](zkClient.deleteRecursive(actorAddressToUuidsPathFor(actorAddress)))
- // ignore[ZkNoNodeException](zkClient.deleteRecursive(actorAddressRegistryPathFor(actorAddress)))
- // ignore[ZkNoNodeException](zkClient.deleteRecursive(actorAddressToNodesPathFor(actorAddress)))
- // }
-
- /**
- * Is the actor with uuid clustered or not?
- */
- def isClustered(actorAddress: String): Boolean = zkClient.exists(actorAddressRegistryPathFor(actorAddress))
-
- /**
- * Is the actor with uuid in use on 'this' node or not?
- */
- def isInUseOnNode(actorAddress: String): Boolean = isInUseOnNode(actorAddress, nodeAddress)
-
- /**
- * Is the actor with uuid in use or not?
- */
- def isInUseOnNode(actorAddress: String, node: NodeAddress): Boolean = zkClient.exists(actorAddressToNodesPathFor(actorAddress, node.nodeName))
-
- /**
- * Is the actor with uuid in use or not?
- */
- def isInUseOnNode(actorAddress: String, nodeName: String): Boolean = zkClient.exists(actorAddressToNodesPathFor(actorAddress, nodeName))
-
- /**
- * Checks out an actor for use on this node, e.g. checked out as a 'LocalActorRef' but it makes it available
- * for remote access through lookup by its UUID.
- */
- def use[T <: Actor](actorAddress: String): Option[LocalActorRef] = {
- val nodeName = nodeAddress.nodeName
-
- val actorFactoryPath = actorAddressRegistryPathFor(actorAddress)
- zkClient.retryUntilConnected(new Callable[Either[Exception, () ⇒ LocalActorRef]]() {
- def call: Either[Exception, () ⇒ LocalActorRef] = {
- try {
-
- val actorFactoryBytes =
- if (shouldCompressData) LZF.uncompress(zkClient.connection.readData(actorFactoryPath, new Stat, false))
- else zkClient.connection.readData(actorFactoryPath, new Stat, false)
-
- val actorFactory =
- Serialization.deserialize(actorFactoryBytes, classOf[() ⇒ LocalActorRef], None) match {
- case Left(error) ⇒ throw error
- case Right(instance) ⇒ instance.asInstanceOf[() ⇒ LocalActorRef]
- }
-
- Right(actorFactory)
- } catch {
- case e: KeeperException.NoNodeException ⇒ Left(e)
- }
- }
- }) match {
- case Left(exception) ⇒ throw exception
- case Right(actorFactory) ⇒
- val actorRef = actorFactory()
-
- EventHandler.debug(this,
- "Checking out actor [%s] to be used on node [%s] as local actor"
- .format(actorAddress, nodeName))
-
- val uuid = actorRef.uuid
-
- // create UUID registry
- ignore[ZkNodeExistsException](zkClient.createPersistent(actorUuidRegistryPathFor(uuid)))
-
- // create UUID -> NODE mapping
- try {
- zkClient.createPersistent(actorUuidRegistryNodePathFor(uuid), nodeName)
- } catch {
- case e: ZkNodeExistsException ⇒ zkClient.writeData(actorUuidRegistryNodePathFor(uuid), nodeName)
- }
-
- // create UUID -> ADDRESS
- try {
- zkClient.createPersistent(actorUuidRegistryAddressPathFor(uuid), actorAddress)
- } catch {
- case e: ZkNodeExistsException ⇒ zkClient.writeData(actorUuidRegistryAddressPathFor(uuid), actorAddress)
- }
-
- // create UUID -> REMOTE ADDRESS (InetSocketAddress) mapping
- try {
- zkClient.createPersistent(actorUuidRegistryRemoteAddressPathFor(uuid), remoteServerAddress)
- } catch {
- case e: ZkNodeExistsException ⇒ zkClient.writeData(actorUuidRegistryRemoteAddressPathFor(uuid), remoteServerAddress)
- }
-
- // create ADDRESS -> UUID mapping
- try {
- zkClient.createPersistent(actorAddressRegistryUuidPathFor(actorAddress), uuid)
- } catch {
- case e: ZkNodeExistsException ⇒ zkClient.writeData(actorAddressRegistryUuidPathFor(actorAddress), uuid)
- }
-
- // create NODE -> UUID mapping
- ignore[ZkNodeExistsException](zkClient.createPersistent(nodeToUuidsPathFor(nodeName, uuid), true))
-
- // create ADDRESS -> UUIDs mapping
- ignore[ZkNodeExistsException](zkClient.createPersistent(actorAddressToUuidsPathFor(actorAddress, uuid)))
-
- // create ADDRESS -> NODE mapping
- ignore[ZkNodeExistsException](zkClient.createPersistent(actorAddressToNodesPathFor(actorAddress, nodeName)))
-
- actorRef
- }
- }
-
- /**
- * Using (checking out) actor on a specific set of nodes.
- */
- def useActorOnNodes(nodes: Array[String], actorAddress: String, replicateFromUuid: Option[UUID] = None) {
- EventHandler.debug(this,
- "Sending command to nodes [%s] for checking out actor [%s]".format(nodes.mkString(", "), actorAddress))
-
- val builder = RemoteSystemDaemonMessageProtocol.newBuilder
- .setMessageType(USE)
- .setActorAddress(actorAddress)
-
- // set the UUID to replicated from - if available
- replicateFromUuid foreach (uuid ⇒ builder.setReplicateActorFromUuid(uuidToUuidProtocol(uuid)))
-
- val command = builder.build
-
- nodes foreach { node ⇒
- nodeConnections.get.connections(node) foreach {
- case (address, connection) ⇒
- sendCommandToNode(connection, command, async = false)
- }
- }
- }
-
- /**
- * Using (checking out) actor on all nodes in the cluster.
- */
- def useActorOnAllNodes(actorAddress: String, replicateFromUuid: Option[UUID] = None) {
- useActorOnNodes(membershipNodes, actorAddress, replicateFromUuid)
- }
-
- /**
- * Using (checking out) actor on a specific node.
- */
- def useActorOnNode(node: String, actorAddress: String, replicateFromUuid: Option[UUID] = None) {
- useActorOnNodes(Array(node), actorAddress, replicateFromUuid)
- }
-
- /**
- * Checks in an actor after done using it on this node.
- */
- def release(actorRef: ActorRef) {
- release(actorRef.address)
- }
-
- /**
- * Checks in an actor after done using it on this node.
- */
- def release(actorAddress: String) {
-
- // FIXME 'Cluster.release' needs to notify all existing ClusterActorRef's that are using the instance that it is no
- // longer available. Then what to do? Should we even remove this method?
-
- ignore[ZkNoNodeException](zkClient.delete(actorAddressToNodesPathFor(actorAddress, nodeAddress.nodeName)))
-
- uuidsForActorAddress(actorAddress) foreach { uuid ⇒
- EventHandler.debug(this,
- "Releasing actor [%s] with UUID [%s] after usage".format(actorAddress, uuid))
-
- ignore[ZkNoNodeException](zkClient.deleteRecursive(nodeToUuidsPathFor(nodeAddress.nodeName, uuid)))
- ignore[ZkNoNodeException](zkClient.delete(actorUuidRegistryRemoteAddressPathFor(uuid)))
- }
- }
-
- /**
- * Releases (checking in) all actors with a specific address on all nodes in the cluster where the actor is in 'use'.
- */
- private[akka] def releaseActorOnAllNodes(actorAddress: String) {
- EventHandler.debug(this,
- "Releasing (checking in) all actors with address [%s] on all nodes in cluster".format(actorAddress))
-
- val command = RemoteSystemDaemonMessageProtocol.newBuilder
- .setMessageType(RELEASE)
- .setActorAddress(actorAddress)
- .build
-
- nodesForActorsInUseWithAddress(actorAddress) foreach { node ⇒
- nodeConnections.get.connections(node) foreach {
- case (_, connection) ⇒ sendCommandToNode(connection, command, async = true)
- }
- }
- }
-
- /**
- * Creates an ActorRef with a Router to a set of clustered actors.
- */
- def ref(actorAddress: String, router: RouterType, failureDetector: FailureDetectorType): ActorRef =
- ClusterActorRef.newRef(actorAddress, router, failureDetector, Actor.TIMEOUT)
-
- /**
- * Returns the UUIDs of all actors checked out on this node.
- */
- private[akka] def uuidsForActorsInUse: Array[UUID] = uuidsForActorsInUseOnNode(nodeAddress.nodeName)
-
- /**
- * Returns the addresses of all actors checked out on this node.
- */
- def addressesForActorsInUse: Array[String] = actorAddressForUuids(uuidsForActorsInUse)
-
- /**
- * Returns the UUIDs of all actors registered in this cluster.
- */
- private[akka] def uuidsForClusteredActors: Array[UUID] =
- zkClient.getChildren(ACTOR_UUID_REGISTRY_PATH).toList.map(new UUID(_)).toArray.asInstanceOf[Array[UUID]]
-
- /**
- * Returns the addresses of all actors registered in this cluster.
- */
- def addressesForClusteredActors: Array[String] = actorAddressForUuids(uuidsForClusteredActors)
-
- /**
- * Returns the actor id for the actor with a specific UUID.
- */
- private[akka] def actorAddressForUuid(uuid: UUID): Option[String] = {
- try {
- Some(zkClient.readData(actorUuidRegistryAddressPathFor(uuid)).asInstanceOf[String])
- } catch {
- case e: ZkNoNodeException ⇒ None
- }
- }
-
- /**
- * Returns the actor ids for all the actors with a specific UUID.
- */
- private[akka] def actorAddressForUuids(uuids: Array[UUID]): Array[String] =
- uuids map (actorAddressForUuid(_)) filter (_.isDefined) map (_.get)
-
- /**
- * Returns the actor UUIDs for actor ID.
- */
- private[akka] def uuidsForActorAddress(actorAddress: String): Array[UUID] = {
- try {
- zkClient.getChildren(actorAddressToUuidsPathFor(actorAddress)).toList.toArray map {
- case c: CharSequence ⇒ new UUID(c)
- } filter (_ ne null)
- } catch {
- case e: ZkNoNodeException ⇒ Array[UUID]()
- }
- }
-
- /**
- * Returns the node names of all actors in use with UUID.
- */
- private[akka] def nodesForActorsInUseWithAddress(actorAddress: String): Array[String] = {
- try {
- zkClient.getChildren(actorAddressToNodesPathFor(actorAddress)).toList.toArray.asInstanceOf[Array[String]]
- } catch {
- case e: ZkNoNodeException ⇒ Array[String]()
- }
- }
-
- /**
- * Returns the UUIDs of all actors in use registered on a specific node.
- */
- private[akka] def uuidsForActorsInUseOnNode(nodeName: String): Array[UUID] = {
- try {
- zkClient.getChildren(nodeToUuidsPathFor(nodeName)).toList.toArray map {
- case c: CharSequence ⇒ new UUID(c)
- } filter (_ ne null)
- } catch {
- case e: ZkNoNodeException ⇒ Array[UUID]()
- }
- }
-
- /**
- * Returns the addresses of all actors in use registered on a specific node.
- */
- def addressesForActorsInUseOnNode(nodeName: String): Array[String] = {
- val uuids =
- try {
- zkClient.getChildren(nodeToUuidsPathFor(nodeName)).toList.toArray map {
- case c: CharSequence ⇒ new UUID(c)
- } filter (_ ne null)
- } catch {
- case e: ZkNoNodeException ⇒ Array[UUID]()
- }
- actorAddressForUuids(uuids)
- }
-
- /**
- * Returns Serializer for actor with specific address.
- */
- def serializerForActor(actorAddress: String): Serializer = try {
- Serialization.serializerByIdentity(zkClient.readData(actorAddressRegistrySerializerPathFor(actorAddress), new Stat).asInstanceOf[String].toByte)
- } catch {
- case e: ZkNoNodeException ⇒ throw new IllegalStateException("No serializer found for actor with address [%s]".format(actorAddress))
- }
-
- /**
- * Returns addresses for nodes that the clustered actor is in use on.
- */
- def inetSocketAddressesForActor(actorAddress: String): Array[(UUID, InetSocketAddress)] = {
- try {
- for {
- uuid ← uuidsForActorAddress(actorAddress)
- } yield {
- val remoteAddress = zkClient.readData(actorUuidRegistryRemoteAddressPathFor(uuid)).asInstanceOf[InetSocketAddress]
- (uuid, remoteAddress)
- }
- } catch {
- case e: ZkNoNodeException ⇒
- EventHandler.warning(this,
- "Could not retrieve remote socket address for node hosting actor [%s] due to: %s"
- .format(actorAddress, e.toString))
- Array[(UUID, InetSocketAddress)]()
- }
- }
-
- // =======================================
- // Compute Grid
- // =======================================
-
- /**
- * Send a function 'Function0[Unit]' to be invoked on a random number of nodes (defined by 'nrOfInstances' argument).
- */
- def send(f: Function0[Unit], nrOfInstances: Int) {
- Serialization.serialize(f) match {
- case Left(error) ⇒ throw error
- case Right(bytes) ⇒
- val message = RemoteSystemDaemonMessageProtocol.newBuilder
- .setMessageType(FUNCTION_FUN0_UNIT)
- .setPayload(ByteString.copyFrom(bytes))
- .build
- nodeConnectionsForNrOfInstances(nrOfInstances) foreach (_ ! message)
- }
- }
-
- /**
- * Send a function 'Function0[Any]' to be invoked on a random number of nodes (defined by 'nrOfInstances' argument).
- * Returns an 'Array' with all the 'Future's from the computation.
- */
- def send(f: Function0[Any], nrOfInstances: Int): List[Future[Any]] = {
- Serialization.serialize(f) match {
- case Left(error) ⇒ throw error
- case Right(bytes) ⇒
- val message = RemoteSystemDaemonMessageProtocol.newBuilder
- .setMessageType(FUNCTION_FUN0_ANY)
- .setPayload(ByteString.copyFrom(bytes))
- .build
- val results = nodeConnectionsForNrOfInstances(nrOfInstances) map (_ ? message)
- results.toList.asInstanceOf[List[Future[Any]]]
- }
- }
-
- /**
- * Send a function 'Function1[Any, Unit]' to be invoked on a random number of nodes (defined by 'nrOfInstances' argument)
- * with the argument speficied.
- */
- def send(f: Function1[Any, Unit], arg: Any, nrOfInstances: Int) {
- Serialization.serialize((f, arg)) match {
- case Left(error) ⇒ throw error
- case Right(bytes) ⇒
- val message = RemoteSystemDaemonMessageProtocol.newBuilder
- .setMessageType(FUNCTION_FUN1_ARG_UNIT)
- .setPayload(ByteString.copyFrom(bytes))
- .build
- nodeConnectionsForNrOfInstances(nrOfInstances) foreach (_ ! message)
- }
- }
-
- /**
- * Send a function 'Function1[Any, Any]' to be invoked on a random number of nodes (defined by 'nrOfInstances' argument)
- * with the argument speficied.
- * Returns an 'Array' with all the 'Future's from the computation.
- */
- def send(f: Function1[Any, Any], arg: Any, nrOfInstances: Int): List[Future[Any]] = {
- Serialization.serialize((f, arg)) match {
- case Left(error) ⇒ throw error
- case Right(bytes) ⇒
- val message = RemoteSystemDaemonMessageProtocol.newBuilder
- .setMessageType(FUNCTION_FUN1_ARG_ANY)
- .setPayload(ByteString.copyFrom(bytes))
- .build
- val results = nodeConnectionsForNrOfInstances(nrOfInstances) map (_ ? message)
- results.toList.asInstanceOf[List[Future[Any]]]
- }
- }
-
- // =======================================
- // Config
- // =======================================
-
- /**
- * Stores a configuration element under a specific key.
- * If the key already exists then it will be overwritten.
- */
- def setConfigElement(key: String, bytes: Array[Byte]) {
- val compressedBytes = if (shouldCompressData) LZF.compress(bytes) else bytes
- EventHandler.debug(this,
- "Adding config value [%s] under key [%s] in cluster registry".format(key, compressedBytes))
- zkClient.retryUntilConnected(new Callable[Either[Unit, Exception]]() {
- def call: Either[Unit, Exception] = {
- try {
- Left(zkClient.connection.create(configurationPathFor(key), compressedBytes, CreateMode.PERSISTENT))
- } catch {
- case e: KeeperException.NodeExistsException ⇒
- try {
- Left(zkClient.connection.writeData(configurationPathFor(key), compressedBytes))
- } catch {
- case e: Exception ⇒ Right(e)
- }
- }
- }
- }) match {
- case Left(_) ⇒ /* do nothing */
- case Right(exception) ⇒ throw exception
- }
- }
-
- /**
- * Returns the config element for the key or NULL if no element exists under the key.
- * Returns Some(element) if it exists else None
- */
- def getConfigElement(key: String): Option[Array[Byte]] = try {
- Some(zkClient.connection.readData(configurationPathFor(key), new Stat, true))
- } catch {
- case e: KeeperException.NoNodeException ⇒ None
- }
-
- /**
- * Removes configuration element for a specific key.
- * Does nothing if the key does not exist.
- */
- def removeConfigElement(key: String) {
- ignore[ZkNoNodeException] {
- EventHandler.debug(this,
- "Removing config element with key [%s] from cluster registry".format(key))
- zkClient.deleteRecursive(configurationPathFor(key))
- }
- }
-
- /**
- * Returns a list with all config element keys.
- */
- def getConfigElementKeys: Array[String] = zkClient.getChildren(CONFIGURATION_PATH).toList.toArray.asInstanceOf[Array[String]]
-
- // =======================================
- // Private
- // =======================================
-
- private def sendCommandToNode(connection: ActorRef, command: RemoteSystemDaemonMessageProtocol, async: Boolean = true) {
- if (async) {
- connection ! command
- } else {
- try {
- Await.result(connection ? (command, remoteDaemonAckTimeout), 10 seconds).asInstanceOf[Status] match {
- case Success(status) ⇒
- EventHandler.debug(this, "Remote command sent to [%s] successfully received".format(status))
- case Failure(cause) ⇒
- EventHandler.error(cause, this, cause.toString)
- throw cause
- }
- } catch {
- case e: TimeoutException =>
- EventHandler.error(e, this, "Remote command to [%s] timed out".format(connection.address))
- throw e
- case e: Exception ⇒
- EventHandler.error(e, this, "Could not send remote command to [%s] due to: %s".format(connection.address, e.toString))
- throw e
- }
- }
- }
-
- private[cluster] def membershipPathFor(node: String): String = "%s/%s".format(MEMBERSHIP_PATH, node)
-
- private[cluster] def configurationPathFor(key: String): String = "%s/%s".format(CONFIGURATION_PATH, key)
-
- private[cluster] def actorAddressToNodesPathFor(actorAddress: String): String = "%s/%s".format(ACTOR_ADDRESS_NODES_TO_PATH, actorAddress)
-
- private[cluster] def actorAddressToNodesPathFor(actorAddress: String, nodeName: String): String = "%s/%s".format(actorAddressToNodesPathFor(actorAddress), nodeName)
-
- private[cluster] def nodeToUuidsPathFor(node: String): String = "%s/%s".format(NODE_TO_ACTOR_UUIDS_PATH, node)
-
- private[cluster] def nodeToUuidsPathFor(node: String, uuid: UUID): String = "%s/%s/%s".format(NODE_TO_ACTOR_UUIDS_PATH, node, uuid)
-
- private[cluster] def actorAddressRegistryPathFor(actorAddress: String): String = "%s/%s".format(ACTOR_ADDRESS_REGISTRY_PATH, actorAddress)
-
- private[cluster] def actorAddressRegistrySerializerPathFor(actorAddress: String): String = "%s/%s".format(actorAddressRegistryPathFor(actorAddress), "serializer")
-
- private[cluster] def actorAddressRegistryUuidPathFor(actorAddress: String): String = "%s/%s".format(actorAddressRegistryPathFor(actorAddress), "uuid")
-
- private[cluster] def actorUuidRegistryPathFor(uuid: UUID): String = "%s/%s".format(ACTOR_UUID_REGISTRY_PATH, uuid)
-
- private[cluster] def actorUuidRegistryNodePathFor(uuid: UUID): String = "%s/%s".format(actorUuidRegistryPathFor(uuid), "node")
-
- private[cluster] def actorUuidRegistryAddressPathFor(uuid: UUID): String = "%s/%s".format(actorUuidRegistryPathFor(uuid), "address")
-
- private[cluster] def actorUuidRegistryRemoteAddressPathFor(uuid: UUID): String = "%s/%s".format(actorUuidRegistryPathFor(uuid), "remote-address")
-
- private[cluster] def actorAddressToUuidsPathFor(actorAddress: String): String = "%s/%s".format(ACTOR_ADDRESS_TO_UUIDS_PATH, actorAddress.replace('.', '_'))
-
- private[cluster] def actorAddressToUuidsPathFor(actorAddress: String, uuid: UUID): String = "%s/%s".format(actorAddressToUuidsPathFor(actorAddress), uuid)
-
- /**
- * Returns a random set with node names of size 'nrOfInstances'.
- * Default nrOfInstances is 0, which returns the empty Set.
- */
- private def nodesForNrOfInstances(nrOfInstances: Int = 0, actorAddress: Option[String] = None): Set[String] = {
- var replicaNames = Set.empty[String]
- val nrOfClusterNodes = nodeConnections.get.connections.size
-
- if (nrOfInstances < 1) return replicaNames
- if (nrOfClusterNodes < nrOfInstances) throw new IllegalArgumentException(
- "Replication factor [" + nrOfInstances +
- "] is greater than the number of available nodeNames [" + nrOfClusterNodes + "]")
-
- val preferredNodes =
- if (actorAddress.isDefined) {
- // use 'preferred-nodes' in deployment config for the actor
- Deployer.deploymentFor(actorAddress.get) match {
- case Deploy(_, _, _, _, Cluster(nodes, _, _)) ⇒
- nodes map (node ⇒ DeploymentConfig.nodeNameFor(node)) take nrOfInstances
- case _ ⇒
- throw new ClusterException("Actor [" + actorAddress.get + "] is not configured as clustered")
- }
- } else Vector.empty[String]
-
- for {
- nodeName ← preferredNodes
- key ← nodeConnections.get.connections.keys
- if key == nodeName
- } replicaNames = replicaNames + nodeName
-
- val nrOfCurrentReplicaNames = replicaNames.size
-
- val replicaSet =
- if (nrOfCurrentReplicaNames > nrOfInstances) throw new IllegalStateException("Replica set is larger than replication factor")
- else if (nrOfCurrentReplicaNames == nrOfInstances) replicaNames
- else {
- val random = new java.util.Random(System.currentTimeMillis)
- while (replicaNames.size < nrOfInstances) {
- replicaNames = replicaNames + membershipNodes(random.nextInt(nrOfClusterNodes))
- }
- replicaNames
- }
-
- EventHandler.debug(this,
- "Picked out replica set [%s] for actor [%s]".format(replicaSet.mkString(", "), actorAddress))
-
- replicaSet
- }
-
- /**
- * Returns a random set with replica connections of size 'nrOfInstances'.
- * Default nrOfInstances is 0, which returns the empty Set.
- */
- private def nodeConnectionsForNrOfInstances(nrOfInstances: Int = 0, actorAddress: Option[String] = None): Set[ActorRef] = {
- for {
- node ← nodesForNrOfInstances(nrOfInstances, actorAddress)
- connectionOption ← nodeConnections.get.connections(node)
- connection ← connectionOption
- actorRef ← connection._2
- } yield actorRef
- }
-
- /**
- * Update the list of connections to other nodes in the cluster.
- * Tail recursive, using lockless optimimistic concurrency.
- *
- * @return a Map with the remote socket addresses to of disconnected node connections
- */
- @tailrec
- final private[cluster] def connectToAllNewlyArrivedMembershipNodesInCluster(
- newlyConnectedMembershipNodes: Traversable[String],
- newlyDisconnectedMembershipNodes: Traversable[String]): Map[String, InetSocketAddress] = {
-
- var change = false
- val oldState = nodeConnections.get
-
- var newConnections = oldState.connections //Map.empty[String, Tuple2[InetSocketAddress, ActorRef]]
-
- // cache the disconnected connections in a map, needed for fail-over of these connections later
- var disconnectedConnections = Map.empty[String, InetSocketAddress]
- newlyDisconnectedMembershipNodes foreach { node ⇒
- disconnectedConnections = disconnectedConnections + (node -> (oldState.connections(node) match {
- case (address, _) ⇒ address
- }))
- }
-
- // remove connections to failed nodes
- newlyDisconnectedMembershipNodes foreach { node ⇒
- newConnections = newConnections - node
- change = true
- }
-
- // add connections newly arrived nodes
- newlyConnectedMembershipNodes foreach { node ⇒
- if (!newConnections.contains(node)) {
-
- // only connect to each replica once
- remoteSocketAddressForNode(node) foreach { address ⇒
- EventHandler.debug(this, "Setting up connection to node with nodename [%s] and address [%s]".format(node, address))
-
- val clusterDaemon = remoteService.actorFor(
- RemoteClusterDaemon.Address, address.getHostName, address.getPort)
- newConnections = newConnections + (node -> (address, clusterDaemon))
- change = true
- }
- }
- }
-
- // add the remote connection to 'this' node as well, but as a 'local' actor
- if (includeRefNodeInReplicaSet)
- newConnections = newConnections + (nodeAddress.nodeName -> (remoteServerAddress, remoteDaemon))
-
- //there was a state change, so we are now going to update the state.
- val newState = new VersionedConnectionState(oldState.version + 1, newConnections)
-
- if (!nodeConnections.compareAndSet(oldState, newState)) {
- // we failed to set the state, try again
- connectToAllNewlyArrivedMembershipNodesInCluster(
- newlyConnectedMembershipNodes, newlyDisconnectedMembershipNodes)
- } else {
- // we succeeded to set the state, return
- EventHandler.info(this, "Connected to nodes [\n\t%s]".format(newConnections.mkString("\n\t")))
- disconnectedConnections
- }
- }
-
- private[cluster] def joinCluster() {
- try {
- EventHandler.info(this,
- "Joining cluster as membership node [%s] on [%s]".format(nodeAddress, membershipNodePath))
- zkClient.createEphemeral(membershipNodePath, remoteServerAddress)
- } catch {
- case e: ZkNodeExistsException ⇒
- e.printStackTrace
- val error = new ClusterException(
- "Can't join the cluster. The node name [" + nodeAddress.nodeName + "] is already in use by another node.")
- EventHandler.error(error, this, error.toString)
- throw error
- }
- ignore[ZkNodeExistsException](zkClient.createPersistent(nodeToUuidsPathFor(nodeAddress.nodeName)))
- }
-
- private[cluster] def joinLeaderElection(): Boolean = {
- EventHandler.info(this, "Node [%s] is joining leader election".format(nodeAddress.nodeName))
- try {
- leaderLock.lock
- } catch {
- case e: KeeperException.NodeExistsException ⇒ false
- }
- }
-
- private[cluster] def remoteSocketAddressForNode(node: String): Option[InetSocketAddress] = {
- try {
- Some(zkClient.readData(membershipPathFor(node), new Stat).asInstanceOf[InetSocketAddress])
- } catch {
- case e: ZkNoNodeException ⇒ None
- }
- }
-
- private[cluster] def failOverClusterActorRefConnections(from: InetSocketAddress, to: InetSocketAddress) {
- EventHandler.info(this, "Failing over ClusterActorRef from %s to %s".format(from, to))
- clusterActorRefs.valueIterator(from) foreach (_.failOver(from, to))
- }
-
- private[cluster] def migrateActorsOnFailedNodes(
- failedNodes: List[String],
- currentClusterNodes: List[String],
- oldClusterNodes: List[String],
- disconnectedConnections: Map[String, InetSocketAddress]) {
-
- failedNodes.foreach { failedNodeName ⇒
-
- val failedNodeAddress = NodeAddress(nodeAddress.clusterName, failedNodeName)
-
- val myIndex = oldClusterNodes.indexWhere(_.endsWith(nodeAddress.nodeName))
- val failedNodeIndex = oldClusterNodes.indexWhere(_ == failedNodeName)
-
- // Migrate to the successor of the failed node (using a sorted circular list of the node names)
- if ((failedNodeIndex == 0 && myIndex == oldClusterNodes.size - 1) || // No leftmost successor exists, check the tail
- (failedNodeIndex == myIndex + 1)) {
- // Am I the leftmost successor?
-
- // Takes the lead of migrating the actors. Not all to this node.
- // All to this node except if the actor already resides here, then pick another node it is not already on.
-
- // Yes I am the node to migrate the actor to (can only be one in the cluster)
- val actorUuidsForFailedNode = zkClient.getChildren(nodeToUuidsPathFor(failedNodeName)).toList
-
- actorUuidsForFailedNode.foreach { uuidAsString ⇒
- EventHandler.debug(this,
- "Cluster node [%s] has failed, migrating actor with UUID [%s] to [%s]"
- .format(failedNodeName, uuidAsString, nodeAddress.nodeName))
-
- val uuid = uuidFrom(uuidAsString)
- val actorAddress = actorAddressForUuid(uuid).getOrElse(
- throw new IllegalStateException("No actor address found for UUID [" + uuidAsString + "]"))
-
- val migrateToNodeAddress =
- if (!isShutdown && isInUseOnNode(actorAddress)) {
- // already in use on this node, pick another node to instantiate the actor on
- val replicaNodesForActor = nodesForActorsInUseWithAddress(actorAddress)
- val nodesAvailableForMigration = (currentClusterNodes.toSet diff failedNodes.toSet) diff replicaNodesForActor.toSet
-
- if (nodesAvailableForMigration.isEmpty) throw new ClusterException(
- "Can not migrate actor to new node since there are not any available nodes left. " +
- "(However, the actor already has >1 replica in cluster, so we are ok)")
-
- NodeAddress(nodeAddress.clusterName, nodesAvailableForMigration.head)
- } else {
- // actor is not in use on this node, migrate it here
- nodeAddress
- }
-
- // if actor is replicated => pass along the UUID for the actor to replicate from (replay transaction log etc.)
- val replicateFromUuid =
- if (isReplicated(actorAddress)) Some(uuid)
- else None
-
- migrateWithoutCheckingThatActorResidesOnItsHomeNode(
- failedNodeAddress,
- migrateToNodeAddress,
- actorAddress,
- replicateFromUuid)
- }
-
- // notify all available nodes that they should fail-over all connections from 'from' to 'to'
- val from = disconnectedConnections(failedNodeName)
- val to = remoteServerAddress
-
- Serialization.serialize((from, to)) match {
- case Left(error) ⇒ throw error
- case Right(bytes) ⇒
-
- val command = RemoteSystemDaemonMessageProtocol.newBuilder
- .setMessageType(FAIL_OVER_CONNECTIONS)
- .setPayload(ByteString.copyFrom(bytes))
- .build
-
- // FIXME now we are broadcasting to ALL nodes in the cluster even though a fraction might have a reference to the actors - should that be fixed?
- nodeConnections.get.connections.values foreach {
- case (_, connection) ⇒ sendCommandToNode(connection, command, async = true)
- }
- }
- }
- }
- }
-
- /**
- * Used when the ephemeral "home" node is already gone, so we can't check if it is available.
- */
- private def migrateWithoutCheckingThatActorResidesOnItsHomeNode(
- from: NodeAddress, to: NodeAddress, actorAddress: String, replicateFromUuid: Option[UUID]) {
-
- EventHandler.debug(this, "Migrating actor [%s] from node [%s] to node [%s]".format(actorAddress, from, to))
- if (!isInUseOnNode(actorAddress, to) && !isShutdown) {
- release(actorAddress)
-
- val remoteAddress = remoteSocketAddressForNode(to.nodeName).getOrElse(throw new ClusterException("No remote address registered for [" + to.nodeName + "]"))
-
- ignore[ZkNoNodeException](zkClient.delete(actorAddressToNodesPathFor(actorAddress, from.nodeName)))
-
- // FIXME who takes care of this line?
- //ignore[ZkNoNodeException](zkClient.delete(nodeToUuidsPathFor(from.nodeName, uuid)))
-
- // 'use' (check out) actor on the remote 'to' node
- useActorOnNode(to.nodeName, actorAddress, replicateFromUuid)
- }
- }
-
- private def createZooKeeperPathStructureIfNeeded() {
- ignore[ZkNodeExistsException] {
- zkClient.create(CLUSTER_PATH, null, CreateMode.PERSISTENT)
- EventHandler.info(this, "Created node [%s]".format(CLUSTER_PATH))
- }
-
- basePaths.foreach { path ⇒
- try {
- ignore[ZkNodeExistsException](zkClient.create(path, null, CreateMode.PERSISTENT))
- EventHandler.debug(this, "Created node [%s]".format(path))
- } catch {
- case e ⇒
- val error = new ClusterException(e.toString)
- EventHandler.error(error, this)
- throw error
- }
- }
- }
-
- private def registerListeners() = {
- zkClient.subscribeStateChanges(stateListener)
- zkClient.subscribeChildChanges(MEMBERSHIP_PATH, membershipListener)
- }
-
- private def unregisterListeners() = {
- zkClient.unsubscribeStateChanges(stateListener)
- zkClient.unsubscribeChildChanges(MEMBERSHIP_PATH, membershipListener)
- }
-
- private def fetchMembershipNodes() {
- val membershipChildren = zkClient.getChildren(MEMBERSHIP_PATH)
- locallyCachedMembershipNodes.clear()
- membershipChildren.iterator.foreach(locallyCachedMembershipNodes.add)
- connectToAllNewlyArrivedMembershipNodesInCluster(membershipNodes, Nil)
- }
-
- private def isReplicated(actorAddress: String): Boolean = DeploymentConfig.isReplicated(Deployer.deploymentFor(actorAddress))
-
- private def createMBean = {
- val clusterMBean = new StandardMBean(classOf[ClusterNodeMBean]) with ClusterNodeMBean {
-
- override def stop() = self.shutdown()
-
- override def disconnect() = self.disconnect()
-
- override def reconnect() = self.reconnect()
-
- override def resign() = self.resign()
-
- override def getNodeAddress = self.nodeAddress
-
- override def getRemoteServerHostname = self.hostname
-
- override def getRemoteServerPort = self.port
-
- override def getNodeName = self.nodeAddress.nodeName
-
- override def getClusterName = self.nodeAddress.clusterName
-
- override def getZooKeeperServerAddresses = self.zkServerAddresses
-
- override def getMemberNodes = self.locallyCachedMembershipNodes.iterator.map(_.toString).toArray
-
- override def getLeaderLockName = self.leader.toString
-
- override def isLeader = self.isLeader
-
- override def getUuidsForActorsInUse = self.uuidsForActorsInUse.map(_.toString).toArray
-
- override def getAddressesForActorsInUse = self.addressesForActorsInUse.map(_.toString).toArray
-
- override def getUuidsForClusteredActors = self.uuidsForClusteredActors.map(_.toString).toArray
-
- override def getAddressesForClusteredActors = self.addressesForClusteredActors.map(_.toString).toArray
-
- override def getNodesForActorInUseWithAddress(address: String) = self.nodesForActorsInUseWithAddress(address)
-
- override def getUuidsForActorsInUseOnNode(nodeName: String) = self.uuidsForActorsInUseOnNode(nodeName).map(_.toString).toArray
-
- override def getAddressesForActorsInUseOnNode(nodeName: String) = self.addressesForActorsInUseOnNode(nodeName).map(_.toString).toArray
-
- override def setConfigElement(key: String, value: String): Unit = self.setConfigElement(key, value.getBytes("UTF-8"))
-
- override def getConfigElement(key: String) = new String(self.getConfigElement(key).getOrElse(Array[Byte]()), "UTF-8")
-
- override def removeConfigElement(key: String): Unit = self.removeConfigElement(key)
-
- override def getConfigElementKeys = self.getConfigElementKeys.toArray
-
- override def getMembershipPathFor(node: String) = self.membershipPathFor(node)
-
- override def getConfigurationPathFor(key: String) = self.configurationPathFor(key)
-
- override def getActorAddresstoNodesPathFor(actorAddress: String) = self.actorAddressToNodesPathFor(actorAddress)
-
- override def getActorAddressToNodesPathForWithNodeName(actorAddress: String, nodeName: String) = self.actorAddressToNodesPathFor(actorAddress, nodeName)
-
- override def getNodeToUuidsPathFor(node: String) = self.nodeToUuidsPathFor(node)
-
- override def getNodeToUuidsPathFor(node: String, uuid: UUID) = self.nodeToUuidsPathFor(node, uuid)
-
- override def getActorAddressRegistryPathFor(actorAddress: String) = self.actorAddressRegistryPathFor(actorAddress)
-
- override def getActorAddressRegistrySerializerPathFor(actorAddress: String) = self.actorAddressRegistrySerializerPathFor(actorAddress)
-
- override def getActorAddressRegistryUuidPathFor(actorAddress: String) = self.actorAddressRegistryUuidPathFor(actorAddress)
-
- override def getActorUuidRegistryNodePathFor(uuid: UUID) = self.actorUuidRegistryNodePathFor(uuid)
-
- override def getActorUuidRegistryRemoteAddressPathFor(uuid: UUID) = self.actorUuidRegistryNodePathFor(uuid)
-
- override def getActorAddressToUuidsPathFor(actorAddress: String) = self.actorAddressToUuidsPathFor(actorAddress)
-
- override def getActorAddressToUuidsPathForWithNodeName(actorAddress: String, uuid: UUID) = self.actorAddressToUuidsPathFor(actorAddress, uuid)
- }
-
- JMX.register(clusterJmxObjectName, clusterMBean)
-
- // FIXME need monitoring to lookup the cluster MBean dynamically
- // Monitoring.registerLocalMBean(clusterJmxObjectName, clusterMBean)
- }
-}
-
-class MembershipChildListener(self: ClusterNode) extends IZkChildListener with ErrorHandler {
- def handleChildChange(parentPath: String, currentChilds: JList[String]) {
- withErrorHandler {
- if (!self.isShutdown) {
- if (currentChilds ne null) {
- val currentClusterNodes = currentChilds.toList
- if (!currentClusterNodes.isEmpty) EventHandler.debug(this,
- "MembershipChildListener at [%s] has children [%s]"
- .format(self.nodeAddress.nodeName, currentClusterNodes.mkString(" ")))
-
- // take a snapshot of the old cluster nodes and then update the list with the current connected nodes in the cluster
- val oldClusterNodes = self.locallyCachedMembershipNodes.toArray.toSet.asInstanceOf[Set[String]]
- self.locallyCachedMembershipNodes.clear()
- currentClusterNodes foreach (self.locallyCachedMembershipNodes.add)
-
- val newlyConnectedMembershipNodes = (Set(currentClusterNodes: _*) diff oldClusterNodes).toList
- val newlyDisconnectedMembershipNodes = (oldClusterNodes diff Set(currentClusterNodes: _*)).toList
-
- // update the connections with the new set of cluster nodes
- val disconnectedConnections = self.connectToAllNewlyArrivedMembershipNodesInCluster(newlyConnectedMembershipNodes, newlyDisconnectedMembershipNodes)
-
- // if node(s) left cluster then migrate actors residing on the failed node
- if (!newlyDisconnectedMembershipNodes.isEmpty) {
- self.migrateActorsOnFailedNodes(newlyDisconnectedMembershipNodes, currentClusterNodes, oldClusterNodes.toList, disconnectedConnections)
- }
-
- // publish NodeConnected and NodeDisconnect events to the listeners
- newlyConnectedMembershipNodes foreach (node ⇒ self.publish(NodeConnected(node)))
- newlyDisconnectedMembershipNodes foreach { node ⇒
- self.publish(NodeDisconnected(node))
- // remove metrics of a disconnected node from ZK and local cache
- self.metricsManager.removeNodeMetrics(node)
- }
- }
- }
- }
- }
-}
-
-class StateListener(self: ClusterNode) extends IZkStateListener {
- def handleStateChanged(state: KeeperState) {
- state match {
- case KeeperState.SyncConnected ⇒
- EventHandler.debug(this, "Cluster node [%s] - Connected".format(self.nodeAddress))
- self.publish(ThisNode.Connected)
- case KeeperState.Disconnected ⇒
- EventHandler.debug(this, "Cluster node [%s] - Disconnected".format(self.nodeAddress))
- self.publish(ThisNode.Disconnected)
- case KeeperState.Expired ⇒
- EventHandler.debug(this, "Cluster node [%s] - Expired".format(self.nodeAddress))
- self.publish(ThisNode.Expired)
- }
- }
-
- /**
- * Re-initialize after the zookeeper session has expired and a new session has been created.
- */
- def handleNewSession() {
- EventHandler.debug(this, "Session expired re-initializing node [%s]".format(self.nodeAddress))
- self.boot()
- self.publish(NewSession)
- }
-}
-
-trait ErrorHandler {
- def withErrorHandler[T](body: ⇒ T) = {
- try {
- ignore[ZkInterruptedException](body) // FIXME Is it good to ignore ZkInterruptedException? If not, how should we handle it?
- } catch {
- case e: Throwable ⇒
- EventHandler.error(e, this, e.toString)
- throw e
- }
- }
-}
-
-object RemoteClusterDaemon {
- val Address = "akka-cluster-daemon".intern
-
- // FIXME configure computeGridDispatcher to what?
- val computeGridDispatcher = Dispatchers.newDispatcher("akka:compute-grid").build
-}
-
-/**
- * Internal "daemon" actor for cluster internal communication.
- *
- * It acts as the brain of the cluster that responds to cluster events (messages) and undertakes action.
- */
-class RemoteClusterDaemon(cluster: ClusterNode) extends Actor {
-
- import RemoteClusterDaemon._
- import Cluster._
-
- override def preRestart(reason: Throwable, msg: Option[Any]) {
- EventHandler.debug(this, "RemoteClusterDaemon failed due to [%s] restarting...".format(reason))
- }
-
- def receive: Receive = {
- case message: RemoteSystemDaemonMessageProtocol ⇒
- EventHandler.debug(this,
- "Received command [\n%s] to RemoteClusterDaemon on node [%s]".format(message, cluster.nodeAddress.nodeName))
-
- message.getMessageType match {
- case USE ⇒ handleUse(message)
- case RELEASE ⇒ handleRelease(message)
- case STOP ⇒ cluster.shutdown()
- case DISCONNECT ⇒ cluster.disconnect()
- case RECONNECT ⇒ cluster.reconnect()
- case RESIGN ⇒ cluster.resign()
- case FAIL_OVER_CONNECTIONS ⇒ handleFailover(message)
- case FUNCTION_FUN0_UNIT ⇒ handle_fun0_unit(message)
- case FUNCTION_FUN0_ANY ⇒ handle_fun0_any(message)
- case FUNCTION_FUN1_ARG_UNIT ⇒ handle_fun1_arg_unit(message)
- case FUNCTION_FUN1_ARG_ANY ⇒ handle_fun1_arg_any(message)
- //TODO: should we not deal with unrecognized message types?
- }
-
- case unknown ⇒ EventHandler.warning(this, "Unknown message [%s]".format(unknown))
- }
-
- def handleRelease(message: RemoteProtocol.RemoteSystemDaemonMessageProtocol) {
- if (message.hasActorUuid) {
- cluster.actorAddressForUuid(uuidProtocolToUuid(message.getActorUuid)) foreach { address ⇒
- cluster.release(address)
- }
- } else if (message.hasActorAddress) {
- cluster release message.getActorAddress
- } else {
- EventHandler.warning(this,
- "None of 'uuid' or 'actorAddress'' is specified, ignoring remote cluster daemon command [%s]".format(message))
- }
- }
-
- def handleUse(message: RemoteProtocol.RemoteSystemDaemonMessageProtocol) {
- def deserializeMessages(entriesAsBytes: Vector[Array[Byte]]): Vector[AnyRef] = {
- import akka.cluster.RemoteProtocol._
- import akka.cluster.MessageSerializer
-
- entriesAsBytes map { bytes ⇒
- val messageBytes =
- if (Cluster.shouldCompressData) LZF.uncompress(bytes)
- else bytes
- MessageSerializer.deserialize(MessageProtocol.parseFrom(messageBytes), None)
- }
- }
-
- def actorOfRefToUseForReplay(snapshotAsBytes: Option[Array[Byte]], actorAddress: String, newActorRef: LocalActorRef): ActorRef = {
- snapshotAsBytes match {
-
- // we have a new actor ref - the snapshot
- case Some(bytes) ⇒
- // stop the new actor ref and use the snapshot instead
- //TODO: What if that actor already has been retrieved and is being used??
- //So do we have a race here?
- cluster.remoteService.unregister(actorAddress)
-
- // deserialize the snapshot actor ref and register it as remote actor
- val uncompressedBytes =
- if (Cluster.shouldCompressData) LZF.uncompress(bytes)
- else bytes
-
- val snapshotActorRef = fromBinary(uncompressedBytes, newActorRef.uuid)
- cluster.remoteService.register(actorAddress, snapshotActorRef)
-
- // FIXME we should call 'stop()' here (to GC the actor), but can't since that will currently
- //shut down the TransactionLog for this UUID - since both this actor and the new snapshotActorRef
- //have the same UUID (which they should)
- //newActorRef.stop()
-
- snapshotActorRef
-
- // we have no snapshot - use the new actor ref
- case None ⇒
- newActorRef
- }
- }
-
- try {
- if (message.hasActorAddress) {
- val actorAddress = message.getActorAddress
- cluster.serializerForActor(actorAddress) foreach { serializer ⇒
- cluster.use(actorAddress, serializer) foreach { newActorRef ⇒
- cluster.remoteService.register(actorAddress, newActorRef)
-
- if (message.hasReplicateActorFromUuid) {
- // replication is used - fetch the messages and replay them
- val replicateFromUuid = uuidProtocolToUuid(message.getReplicateActorFromUuid)
- val deployment = Deployer.deploymentFor(actorAddress)
- val replicationScheme = DeploymentConfig.replicationSchemeFor(deployment).getOrElse(
- throw new IllegalStateException(
- "Actor [" + actorAddress + "] should have been configured as a replicated actor but could not find its ReplicationScheme"))
- val isWriteBehind = DeploymentConfig.isWriteBehindReplication(replicationScheme)
-
- try {
- // get the transaction log for the actor UUID
- val readonlyTxLog = TransactionLog.logFor(replicateFromUuid.toString, isWriteBehind, replicationScheme)
-
- // get the latest snapshot (Option[Array[Byte]]) and all the subsequent messages (Array[Byte])
- val (snapshotAsBytes, entriesAsBytes) = readonlyTxLog.latestSnapshotAndSubsequentEntries
-
- // deserialize and restore actor snapshot. This call will automatically recreate a transaction log.
- val actorRef = actorOfRefToUseForReplay(snapshotAsBytes, actorAddress, newActorRef)
-
- // deserialize the messages
- val messages: Vector[AnyRef] = deserializeMessages(entriesAsBytes)
-
- EventHandler.info(this, "Replaying [%s] messages to actor [%s]".format(messages.size, actorAddress))
-
- // replay all messages
- messages foreach { message ⇒
- EventHandler.debug(this, "Replaying message [%s] to actor [%s]".format(message, actorAddress))
-
- // FIXME how to handle '?' messages?
- // We can *not* replay them with the correct semantics. Should we:
- // 1. Ignore/drop them and log warning?
- // 2. Throw exception when about to log them?
- // 3. Other?
- actorRef ! message
- }
-
- } catch {
- case e: Throwable ⇒
- EventHandler.error(e, this, e.toString)
- throw e
- }
- }
- }
- }
- } else {
- EventHandler.error(this, "Actor 'address' is not defined, ignoring remote cluster daemon command [%s]".format(message))
- }
-
- self.reply(Success(cluster.remoteServerAddress.toString))
- } catch {
- case error: Throwable ⇒
- self.reply(Failure(error))
- throw error
- }
- }
-
- def handle_fun0_unit(message: RemoteProtocol.RemoteSystemDaemonMessageProtocol) {
- new LocalActorRef(
- Props(
- self ⇒ {
- case f: Function0[_] ⇒ try { f() } finally { self.stop() }
- }).copy(dispatcher = computeGridDispatcher), Props.randomName, systemService = true) ! payloadFor(message, classOf[Function0[Unit]])
- }
-
- def handle_fun0_any(message: RemoteProtocol.RemoteSystemDaemonMessageProtocol) {
- new LocalActorRef(
- Props(
- self ⇒ {
- case f: Function0[_] ⇒ try { self.reply(f()) } finally { self.stop() }
- }).copy(dispatcher = computeGridDispatcher), Props.randomName, systemService = true) forward payloadFor(message, classOf[Function0[Any]])
- }
-
- def handle_fun1_arg_unit(message: RemoteProtocol.RemoteSystemDaemonMessageProtocol) {
- new LocalActorRef(
- Props(
- self ⇒ {
- case (fun: Function[_, _], param: Any) ⇒ try { fun.asInstanceOf[Any ⇒ Unit].apply(param) } finally { self.stop() }
- }).copy(dispatcher = computeGridDispatcher), Props.randomName, systemService = true) ! payloadFor(message, classOf[Tuple2[Function1[Any, Unit], Any]])
- }
-
- def handle_fun1_arg_any(message: RemoteProtocol.RemoteSystemDaemonMessageProtocol) {
- new LocalActorRef(
- Props(
- self ⇒ {
- case (fun: Function[_, _], param: Any) ⇒ try { self.reply(fun.asInstanceOf[Any ⇒ Any](param)) } finally { self.stop() }
- }).copy(dispatcher = computeGridDispatcher), Props.randomName, systemService = true) forward payloadFor(message, classOf[Tuple2[Function1[Any, Any], Any]])
- }
-
- def handleFailover(message: RemoteProtocol.RemoteSystemDaemonMessageProtocol) {
- val (from, to) = payloadFor(message, classOf[(InetSocketAddress, InetSocketAddress)])
- cluster.failOverClusterActorRefConnections(from, to)
- }
-
- private def payloadFor[T](message: RemoteSystemDaemonMessageProtocol, clazz: Class[T]): T = {
- Serialization.deserialize(message.getPayload.toByteArray, clazz, None) match {
- case Left(error) ⇒ throw error
- case Right(instance) ⇒ instance.asInstanceOf[T]
- }
- }
-}
diff --git a/akka-cluster/src/main/scala/akka/cluster/ClusterActorRef.scala b/akka-cluster/src/main/scala/akka/cluster/ClusterActorRef.scala
deleted file mode 100644
index 29f56a5966..0000000000
--- a/akka-cluster/src/main/scala/akka/cluster/ClusterActorRef.scala
+++ /dev/null
@@ -1,129 +0,0 @@
-/**
- * Copyright (C) 2009-2012 Typesafe Inc.
- */
-
-package akka.cluster
-
-import akka.actor._
-import akka.util._
-import ReflectiveAccess._
-import akka.routing._
-import akka.cluster._
-import FailureDetector._
-import akka.event.EventHandler
-import akka.config.ConfigurationException
-
-import java.net.InetSocketAddress
-import java.util.concurrent.atomic.AtomicReference
-
-import collection.immutable.Map
-import annotation.tailrec
-
-/**
- * ClusterActorRef factory and locator.
- */
-object ClusterActorRef {
- import FailureDetectorType._
- import RouterType._
-
- def newRef(
- actorAddress: String,
- routerType: RouterType,
- failureDetectorType: FailureDetectorType,
- timeout: Long): ClusterActorRef = {
-
- val routerFactory: () ⇒ Router = routerType match {
- case Direct ⇒ () ⇒ new DirectRouter
- case Random ⇒ () ⇒ new RandomRouter
- case RoundRobin ⇒ () ⇒ new RoundRobinRouter
- case LeastCPU ⇒ sys.error("Router LeastCPU not supported yet")
- case LeastRAM ⇒ sys.error("Router LeastRAM not supported yet")
- case LeastMessages ⇒ sys.error("Router LeastMessages not supported yet")
- case Custom ⇒ sys.error("Router Custom not supported yet")
- }
-
- val failureDetectorFactory: (Map[InetSocketAddress, ActorRef]) ⇒ FailureDetector = failureDetectorType match {
- case RemoveConnectionOnFirstFailureLocalFailureDetector ⇒
- (connections: Map[InetSocketAddress, ActorRef]) ⇒ new RemoveConnectionOnFirstFailureLocalFailureDetector(connections.values)
-
- case RemoveConnectionOnFirstFailureRemoteFailureDetector ⇒
- (connections: Map[InetSocketAddress, ActorRef]) ⇒ new RemoveConnectionOnFirstFailureRemoteFailureDetector(connections)
-
- case CustomFailureDetector(implClass) ⇒
- (connections: Map[InetSocketAddress, ActorRef]) ⇒ FailureDetector.createCustomFailureDetector(implClass, connections)
- }
-
- new ClusterActorRef(
- RoutedProps()
- .withTimeout(timeout)
- .withRouter(routerFactory)
- .withFailureDetector(failureDetectorFactory),
- actorAddress)
- }
-
- /**
- * Finds the cluster actor reference that has a specific address.
- */
- def actorFor(address: String): Option[ActorRef] =
- Actor.registry.local.actorFor(Address.clusterActorRefPrefix + address)
-
- private[cluster] def createRemoteActorRef(actorAddress: String, inetSocketAddress: InetSocketAddress) = {
- RemoteActorRef(inetSocketAddress, actorAddress, Actor.TIMEOUT, None)
- }
-}
-
-/**
- * ActorRef representing a one or many instances of a clustered, load-balanced and sometimes replicated actor
- * where the instances can reside on other nodes in the cluster.
- */
-private[akka] class ClusterActorRef(props: RoutedProps, val address: String) extends AbstractRoutedActorRef(props) {
-
- import ClusterActorRef._
-
- ClusterModule.ensureEnabled()
-
- val addresses = Cluster.node.inetSocketAddressesForActor(address)
-
- EventHandler.debug(this,
- "Checking out cluster actor ref with address [%s] and router [%s] on [%s] connected to [\n\t%s]"
- .format(address, router, Cluster.node.remoteServerAddress, addresses.map(_._2).mkString("\n\t")))
-
- addresses foreach {
- case (_, address) ⇒ Cluster.node.clusterActorRefs.put(address, this)
- }
-
- val connections: FailureDetector = {
- val remoteConnections = (Map[InetSocketAddress, ActorRef]() /: addresses) {
- case (map, (uuid, inetSocketAddress)) ⇒
- map + (inetSocketAddress -> createRemoteActorRef(address, inetSocketAddress))
- }
- props.failureDetectorFactory(remoteConnections)
- }
-
- router.init(connections)
-
- def nrOfConnections: Int = connections.size
-
- private[akka] def failOver(from: InetSocketAddress, to: InetSocketAddress) {
- connections.failOver(from, to)
- }
-
- def stop() {
- synchronized {
- if (_status == ActorRefInternals.RUNNING) {
- Actor.registry.local.unregisterClusterActorRef(this)
- _status = ActorRefInternals.SHUTDOWN
- postMessageToMailbox(Terminate, None)
-
- // FIXME here we need to fire off Actor.cluster.remove(address) (which needs to be properly implemented first, see ticket)
- connections.stopAll()
- }
- }
- }
-
- /* If you start me up */
- if (_status == ActorRefInternals.UNSTARTED) {
- _status = ActorRefInternals.RUNNING
- Actor.registry.local.registerClusterActorRef(this)
- }
-}
diff --git a/akka-cluster/src/main/scala/akka/cluster/ClusterDeployer.scala b/akka-cluster/src/main/scala/akka/cluster/ClusterDeployer.scala
deleted file mode 100644
index 61a393360c..0000000000
--- a/akka-cluster/src/main/scala/akka/cluster/ClusterDeployer.scala
+++ /dev/null
@@ -1,205 +0,0 @@
-/**
- * Copyright (C) 2009-2012 Typesafe Inc.
- */
-
-package akka.cluster
-
-import akka.actor.DeploymentConfig._
-import akka.actor._
-import akka.event.EventHandler
-import akka.config.Config
-import akka.util.Switch
-import akka.util.Helpers._
-import akka.cluster.zookeeper.AkkaZkClient
-
-import org.apache.zookeeper.CreateMode
-import org.apache.zookeeper.recipes.lock.{ WriteLock, LockListener }
-
-import org.I0Itec.zkclient.exception.{ ZkNoNodeException, ZkNodeExistsException }
-
-import scala.collection.immutable.Seq
-import scala.collection.JavaConversions.collectionAsScalaIterable
-
-import java.util.concurrent.{ CountDownLatch, TimeUnit }
-
-/**
- * A ClusterDeployer is responsible for deploying a Deploy.
- */
-object ClusterDeployer extends ActorDeployer {
- val clusterName = Cluster.name
- val nodeName = Config.nodename
- val clusterPath = "/%s" format clusterName
-
- val deploymentPath = clusterPath + "/deployment"
- val deploymentAddressPath = deploymentPath + "/%s"
-
- val deploymentCoordinationPath = clusterPath + "/deployment-coordination"
- val deploymentInProgressLockPath = deploymentCoordinationPath + "/in-progress"
- val isDeploymentCompletedInClusterLockPath = deploymentCoordinationPath + "/completed" // should not be part of basePaths
-
- val basePaths = List(clusterPath, deploymentPath, deploymentCoordinationPath, deploymentInProgressLockPath)
-
- private val isConnected = new Switch(false)
- private val deploymentCompleted = new CountDownLatch(1)
-
- private val zkClient = new AkkaZkClient(
- Cluster.zooKeeperServers,
- Cluster.sessionTimeout,
- Cluster.connectionTimeout,
- Cluster.defaultZooKeeperSerializer)
-
- private val deploymentInProgressLockListener = new LockListener {
- def lockAcquired() {
- EventHandler.info(this, "Clustered deployment started")
- }
-
- def lockReleased() {
- EventHandler.info(this, "Clustered deployment completed")
- deploymentCompleted.countDown()
- }
- }
-
- private val deploymentInProgressLock = new WriteLock(
- zkClient.connection.getZookeeper,
- deploymentInProgressLockPath,
- null,
- deploymentInProgressLockListener)
-
- private val systemDeployments: List[Deploy] = Nil
-
- def shutdown() {
- isConnected switchOff {
- // undeploy all
- try {
- for {
- child ← collectionAsScalaIterable(zkClient.getChildren(deploymentPath))
- deployment ← zkClient.readData(deploymentAddressPath.format(child)).asInstanceOf[Deploy]
- } zkClient.delete(deploymentAddressPath.format(deployment.address))
-
- invalidateDeploymentInCluster()
- } catch {
- case e: Exception ⇒
- handleError(new DeploymentException("Could not undeploy all deployment data in ZooKeeper due to: " + e))
- }
-
- // shut down ZooKeeper client
- zkClient.close()
- EventHandler.info(this, "ClusterDeployer shut down successfully")
- }
- }
-
- def lookupDeploymentFor(address: String): Option[Deploy] = ensureRunning {
- LocalDeployer.lookupDeploymentFor(address) match { // try local cache
- case Some(deployment) ⇒ // in local cache
- deployment
- case None ⇒ // not in cache, check cluster
- val deployment =
- try {
- Some(zkClient.readData(deploymentAddressPath.format(address)).asInstanceOf[Deploy])
- } catch {
- case e: ZkNoNodeException ⇒ None
- case e: Exception ⇒
- EventHandler.warning(this, e.toString)
- None
- }
- deployment foreach (LocalDeployer.deploy(_)) // cache it in local cache
- deployment
- }
- }
-
- def fetchDeploymentsFromCluster: List[Deploy] = ensureRunning {
- val addresses =
- try {
- zkClient.getChildren(deploymentPath).toList
- } catch {
- case e: ZkNoNodeException ⇒ List[String]()
- }
- val deployments = addresses map { address ⇒
- zkClient.readData(deploymentAddressPath.format(address)).asInstanceOf[Deploy]
- }
- EventHandler.info(this, "Fetched deployment plans from cluster [\n\t%s\n]" format deployments.mkString("\n\t"))
- deployments
- }
-
- private[akka] def init(deployments: Seq[Deploy]) {
- isConnected switchOn {
- EventHandler.info(this, "Initializing ClusterDeployer")
-
- basePaths foreach { path ⇒
- try {
- ignore[ZkNodeExistsException](zkClient.create(path, null, CreateMode.PERSISTENT))
- EventHandler.debug(this, "Created ZooKeeper path for deployment [%s]".format(path))
- } catch {
- case e ⇒
- val error = new DeploymentException(e.toString)
- EventHandler.error(error, this)
- throw error
- }
- }
-
- val allDeployments = deployments ++ systemDeployments
-
- if (!isDeploymentCompletedInCluster) {
- if (deploymentInProgressLock.lock()) {
- // try to be the one doing the clustered deployment
- EventHandler.info(this, "Pushing clustered deployment plans [\n\t" + allDeployments.mkString("\n\t") + "\n]")
- allDeployments foreach (deploy(_)) // deploy
- markDeploymentCompletedInCluster()
- deploymentInProgressLock.unlock() // signal deployment complete
-
- } else {
- deploymentCompleted.await(30, TimeUnit.SECONDS) // wait until deployment is completed by other "master" node
- }
- }
-
- // fetch clustered deployments and deploy them locally
- fetchDeploymentsFromCluster foreach (LocalDeployer.deploy(_))
- }
- }
-
- private[akka] def deploy(deployment: Deploy) {
- ensureRunning {
- LocalDeployer.deploy(deployment)
- deployment match {
- case Deploy(_, _, _, _, Local) | Deploy(_, _, _, _, _: Local) ⇒ //TODO LocalDeployer.deploy(deployment)??
- case Deploy(address, recipe, routing, _, _) ⇒ // cluster deployment
- /*TODO recipe foreach { r ⇒
- Deployer.newClusterActorRef(() ⇒ Actor.actorOf(r.implementationClass), address, deployment)
- }*/
- val path = deploymentAddressPath.format(address)
- try {
- ignore[ZkNodeExistsException](zkClient.create(path, null, CreateMode.PERSISTENT))
- zkClient.writeData(path, deployment)
- } catch {
- case e: NullPointerException ⇒
- handleError(new DeploymentException(
- "Could not store deployment data [" + deployment + "] in ZooKeeper since client session is closed"))
- case e: Exception ⇒
- handleError(new DeploymentException(
- "Could not store deployment data [" + deployment + "] in ZooKeeper due to: " + e))
- }
- }
- }
- }
-
- private def markDeploymentCompletedInCluster() {
- ignore[ZkNodeExistsException](zkClient.create(isDeploymentCompletedInClusterLockPath, null, CreateMode.PERSISTENT))
- }
-
- private def isDeploymentCompletedInCluster = zkClient.exists(isDeploymentCompletedInClusterLockPath)
-
- // FIXME in future - add watch to this path to be able to trigger redeployment, and use this method to trigger redeployment
- private def invalidateDeploymentInCluster() {
- ignore[ZkNoNodeException](zkClient.delete(isDeploymentCompletedInClusterLockPath))
- }
-
- private def ensureRunning[T](body: ⇒ T): T = {
- if (isConnected.isOn) body
- else throw new IllegalStateException("ClusterDeployer is not running")
- }
-
- private[akka] def handleError(e: Throwable): Nothing = {
- EventHandler.error(e, this, e.toString)
- throw e
- }
-}
diff --git a/akka-remote/src/main/scala/akka/remote/Gossiper.scala b/akka-cluster/src/main/scala/akka/cluster/Gossiper.scala
similarity index 97%
rename from akka-remote/src/main/scala/akka/remote/Gossiper.scala
rename to akka-cluster/src/main/scala/akka/cluster/Gossiper.scala
index 55165f0891..e234d6e158 100644
--- a/akka-remote/src/main/scala/akka/remote/Gossiper.scala
+++ b/akka-cluster/src/main/scala/akka/cluster/Gossiper.scala
@@ -2,13 +2,15 @@
* Copyright (C) 2009-2012 Typesafe Inc.
*/
-package akka.remote
+package akka.cluster
import akka.actor._
import akka.actor.Status._
+import akka.remote._
import akka.event.Logging
-import akka.util._
import akka.dispatch.Await
+import akka.pattern.ask
+import akka.util._
import akka.config.ConfigurationException
import java.util.concurrent.atomic.{ AtomicReference, AtomicBoolean }
@@ -20,9 +22,6 @@ import System.{ currentTimeMillis ⇒ newTimestamp }
import scala.collection.immutable.{ Map, SortedSet }
import scala.annotation.tailrec
-import akka.dispatch.Await
-import akka.pattern.ask
-
import com.google.protobuf.ByteString
/**
@@ -136,7 +135,7 @@ case class Gossiper(remote: RemoteActorRefProvider, system: ActorSystemImpl) {
private val memberFingerprint = address.##
private val serialization = remote.serialization
- private val failureDetector = remote.failureDetector
+ private val failureDetector = new AccrualFailureDetector(remoteSettings.FailureDetectorThreshold, remoteSettings.FailureDetectorMaxSampleSize, system)
private val initialDelayForGossip = remoteSettings.InitialDelayForGossip
private val gossipFrequency = remoteSettings.GossipFrequency
@@ -154,12 +153,14 @@ case class Gossiper(remote: RemoteActorRefProvider, system: ActorSystemImpl) {
private val isRunning = new AtomicBoolean(true)
private val log = Logging(system, "Gossiper")
private val random = SecureRandom.getInstance("SHA1PRNG")
- private val connectionManager = new RemoteConnectionManager(system, remote, Map.empty[Address, ActorRef])
// Is it right to put this guy under the /system path or should we have a top-level /cluster or something else...?
private val clusterDaemon = system.systemActorOf(Props(new ClusterDaemon(system, this)), "cluster")
private val state = new AtomicReference[State](State(currentGossip = newGossip()))
+ // FIXME manage connections in some other way so we can delete the RemoteConnectionManager (SINCE IT SUCKS!!!)
+ private val connectionManager = new RemoteConnectionManager(system, remote, failureDetector, Map.empty[Address, ActorRef])
+
log.info("Starting cluster Gossiper...")
// join the cluster by connecting to one of the seed members and retrieve current cluster state (Gossip)
diff --git a/akka-cluster/src/main/scala/akka/cluster/LocalCluster.scala b/akka-cluster/src/main/scala/akka/cluster/LocalCluster.scala
deleted file mode 100644
index d8a0ac6027..0000000000
--- a/akka-cluster/src/main/scala/akka/cluster/LocalCluster.scala
+++ /dev/null
@@ -1,105 +0,0 @@
-/**
- * Copyright (C) 2009-2012 Typesafe Inc.
- */
-package akka.cluster
-
-import akka.config.Config
-import Config._
-import akka.util._
-import Helpers._
-import akka.actor._
-import Actor._
-import akka.event.EventHandler
-import akka.cluster.zookeeper._
-
-import org.apache.zookeeper._
-import org.apache.zookeeper.Watcher.Event._
-import org.apache.zookeeper.data.Stat
-import org.apache.zookeeper.recipes.lock.{ WriteLock, LockListener }
-
-import org.I0Itec.zkclient._
-import org.I0Itec.zkclient.serialize._
-import org.I0Itec.zkclient.exception._
-
-import java.util.concurrent.atomic.{ AtomicBoolean, AtomicReference }
-
-object LocalCluster {
- val clusterDirectory = config.getString("akka.cluster.log-directory", "_akka_cluster")
- val clusterDataDirectory = clusterDirectory + "/data"
- val clusterLogDirectory = clusterDirectory + "/log"
-
- val clusterName = Config.clusterName
- val nodename = Config.nodename
- val zooKeeperServers = config.getString("akka.cluster.zookeeper-server-addresses", "localhost:2181")
- val sessionTimeout = Duration(config.getInt("akka.cluster.session-timeout", 60), TIME_UNIT).toMillis.toInt
- val connectionTimeout = Duration(config.getInt("akka.cluster.connection-timeout", 60), TIME_UNIT).toMillis.toInt
- val defaultZooKeeperSerializer = new SerializableSerializer
-
- val zkServer = new AtomicReference[Option[ZkServer]](None)
-
- lazy val zkClient = new AkkaZkClient(zooKeeperServers, sessionTimeout, connectionTimeout, defaultZooKeeperSerializer)
-
- /**
- * Looks up the local hostname.
- */
- def lookupLocalhostName = NetworkUtil.getLocalhostName
-
- /**
- * Starts up a local ZooKeeper server. Should only be used for testing purposes.
- */
- def startLocalCluster(): ZkServer =
- startLocalCluster(clusterDataDirectory, clusterLogDirectory, 2181, 5000)
-
- /**
- * Starts up a local ZooKeeper server. Should only be used for testing purposes.
- */
- def startLocalCluster(port: Int, tickTime: Int): ZkServer =
- startLocalCluster(clusterDataDirectory, clusterLogDirectory, port, tickTime)
-
- /**
- * Starts up a local ZooKeeper server. Should only be used for testing purposes.
- */
- def startLocalCluster(tickTime: Int): ZkServer =
- startLocalCluster(clusterDataDirectory, clusterLogDirectory, 2181, tickTime)
-
- /**
- * Starts up a local ZooKeeper server. Should only be used for testing purposes.
- */
- def startLocalCluster(dataPath: String, logPath: String): ZkServer =
- startLocalCluster(dataPath, logPath, 2181, 500)
-
- /**
- * Starts up a local ZooKeeper server. Should only be used for testing purposes.
- */
- def startLocalCluster(dataPath: String, logPath: String, port: Int, tickTime: Int): ZkServer = {
- try {
- val zk = AkkaZooKeeper.startLocalServer(dataPath, logPath, port, tickTime)
- zkServer.set(Some(zk))
- zk
- } catch {
- case e: Throwable ⇒
- EventHandler.error(e, this, "Could not start local ZooKeeper cluster")
- throw e
- }
- }
-
- /**
- * Shut down the local ZooKeeper server.
- */
- def shutdownLocalCluster() {
- withPrintStackTraceOnError {
- EventHandler.debug(this, "Shuts down local cluster")
- zkServer.getAndSet(None).foreach(_.shutdown())
- }
- }
-
- def createQueue(rootPath: String, blocking: Boolean = true) =
- new ZooKeeperQueue(zkClient, rootPath, blocking)
-
- def barrier(name: String, count: Int): ZooKeeperBarrier =
- ZooKeeperBarrier(zkClient, clusterName, name, nodename, count)
-
- def barrier(name: String, count: Int, timeout: Duration): ZooKeeperBarrier =
- ZooKeeperBarrier(zkClient, clusterName, name, nodename, count, timeout)
-}
-
diff --git a/akka-remote/src/main/scala/akka/remote/RemoteConnectionManager.scala b/akka-cluster/src/main/scala/akka/cluster/RemoteConnectionManager.scala
similarity index 96%
rename from akka-remote/src/main/scala/akka/remote/RemoteConnectionManager.scala
rename to akka-cluster/src/main/scala/akka/cluster/RemoteConnectionManager.scala
index fd2a9135d7..63020367a5 100644
--- a/akka-remote/src/main/scala/akka/remote/RemoteConnectionManager.scala
+++ b/akka-cluster/src/main/scala/akka/cluster/RemoteConnectionManager.scala
@@ -2,9 +2,10 @@
* Copyright (C) 2009-2012 Typesafe Inc.
*/
-package akka.remote
+package akka.cluster
import akka.actor._
+import akka.remote._
import akka.routing._
import akka.event.Logging
@@ -19,6 +20,7 @@ import java.util.concurrent.atomic.AtomicReference
class RemoteConnectionManager(
system: ActorSystemImpl,
remote: RemoteActorRefProvider,
+ failureDetector: AccrualFailureDetector,
initialConnections: Map[Address, ActorRef] = Map.empty[Address, ActorRef])
extends ConnectionManager {
@@ -30,8 +32,6 @@ class RemoteConnectionManager(
def iterable: Iterable[ActorRef] = connections.values
}
- def failureDetector = remote.failureDetector
-
private val state: AtomicReference[State] = new AtomicReference[State](newState())
/**
@@ -145,6 +145,6 @@ class RemoteConnectionManager(
}
}
- private[remote] def newConnection(remoteAddress: Address, actorPath: ActorPath) =
+ private[cluster] def newConnection(remoteAddress: Address, actorPath: ActorPath) =
new RemoteActorRef(remote, remote.transport, actorPath, Nobody)
}
diff --git a/akka-cluster/src/main/scala/akka/cluster/TransactionLog.scala b/akka-cluster/src/main/scala/akka/cluster/TransactionLog.scala
deleted file mode 100644
index ce9eb300f5..0000000000
--- a/akka-cluster/src/main/scala/akka/cluster/TransactionLog.scala
+++ /dev/null
@@ -1,604 +0,0 @@
-package akka.cluster
-
-/**
- * Copyright (C) 2009-2012 Typesafe Inc.
- */
-
-import org.apache.bookkeeper.client.{ BookKeeper, LedgerHandle, LedgerEntry, BKException, AsyncCallback }
-import org.apache.zookeeper.CreateMode
-
-import org.I0Itec.zkclient.exception._
-
-import akka.AkkaException
-import akka.config._
-import Config._
-import akka.util._
-import akka.actor._
-import DeploymentConfig.ReplicationScheme
-import akka.event.EventHandler
-import akka.dispatch.{ DefaultPromise, Promise, MessageInvocation }
-import akka.cluster.zookeeper._
-import akka.serialization.ActorSerialization._
-import akka.serialization.Compression.LZF
-
-import java.util.Enumeration
-
-// FIXME allow user to choose dynamically between 'async' and 'sync' tx logging (asyncAddEntry(byte[] data, AddCallback cb, Object ctx))
-// FIXME clean up old entries in log after doing a snapshot
-
-class ReplicationException(message: String, cause: Throwable = null) extends AkkaException(message) {
- def this(msg: String) = this(msg, null)
-}
-
-/**
- * A TransactionLog makes chunks of data durable.
- */
-class TransactionLog private (
- ledger: LedgerHandle,
- val id: String,
- val isAsync: Boolean,
- replicationScheme: ReplicationScheme) {
-
- import TransactionLog._
-
- val logId = ledger.getId
- val txLogPath = transactionLogPath(id)
- val snapshotPath = txLogPath + "/snapshot"
-
- private val isOpen = new Switch(true)
-
- /**
- * Record an Actor message invocation.
- *
- * @param invocation the MessageInvocation to record
- * @param actorRef the LocalActorRef that received the message.
- * @throws ReplicationException if the TransactionLog already is closed.
- */
- def recordEntry(invocation: MessageInvocation, actorRef: LocalActorRef) {
- val entryId = ledger.getLastAddPushed + 1
- val needsSnapshot = entryId != 0 && (entryId % snapshotFrequency) == 0
-
- if (needsSnapshot) {
- //todo: could it be that the message is never persisted when a snapshot is added?
- val bytes = toBinary(actorRef, false, replicationScheme)
- recordSnapshot(bytes)
- } else {
- val bytes = MessageSerializer.serialize(invocation.message.asInstanceOf[AnyRef]).toByteArray
- recordEntry(bytes)
- }
- }
-
- /**
- * Record an entry.
- *
- * @param entry the entry in byte form to record.
- * @throws ReplicationException if the TransactionLog already is closed.
- */
- def recordEntry(entry: Array[Byte]) {
- if (isOpen.isOn) {
- val entryBytes =
- if (shouldCompressData) LZF.compress(entry)
- else entry
-
- try {
- if (isAsync) {
- ledger.asyncAddEntry(
- entryBytes,
- new AsyncCallback.AddCallback {
- def addComplete(returnCode: Int, ledgerHandle: LedgerHandle, entryId: Long, ctx: AnyRef) {
- handleReturnCode(returnCode)
- EventHandler.debug(this, "Writing entry [%s] to log [%s]".format(entryId, logId))
- }
- },
- null)
- } else {
- handleReturnCode(ledger.addEntry(entryBytes))
- val entryId = ledger.getLastAddPushed
- EventHandler.debug(this, "Writing entry [%s] to log [%s]".format(entryId, logId))
- }
- } catch {
- case e: Throwable ⇒ handleError(e)
- }
- } else transactionClosedError
- }
-
- /**
- * Record a snapshot.
- *
- * @param snapshot the snapshot in byteform to record.
- * @throws ReplicationException if the TransactionLog already is closed.
- */
- def recordSnapshot(snapshot: Array[Byte]) {
- if (isOpen.isOn) {
- val snapshotBytes =
- if (shouldCompressData) LZF.compress(snapshot)
- else snapshot
-
- try {
- if (isAsync) {
- ledger.asyncAddEntry(
- snapshotBytes,
- new AsyncCallback.AddCallback {
- def addComplete(returnCode: Int, ledgerHandle: LedgerHandle, snapshotId: Long, ctx: AnyRef) {
- handleReturnCode(returnCode)
- EventHandler.debug(this, "Writing snapshot to log [%s]".format(snapshotId))
- storeSnapshotMetaDataInZooKeeper(snapshotId)
- }
- },
- null)
- } else {
- //todo: could this be racy, since writing the snapshot itself and storing the snapsnot id, is not
- //an atomic operation?
-
- //first store the snapshot.
- handleReturnCode(ledger.addEntry(snapshotBytes))
- val snapshotId = ledger.getLastAddPushed
-
- //this is the location where all previous entries can be removed.
- //TODO: how to remove data?
-
- EventHandler.debug(this, "Writing snapshot to log [%s]".format(snapshotId))
- //and now store the snapshot metadata.
- storeSnapshotMetaDataInZooKeeper(snapshotId)
- }
- } catch {
- case e: Throwable ⇒ handleError(e)
- }
- } else transactionClosedError
- }
-
- /**
- * Get all the entries for this transaction log.
- *
- * @throws ReplicationException if the TransactionLog already is closed.
- */
- def entries: Vector[Array[Byte]] = entriesInRange(0, ledger.getLastAddConfirmed)
-
- /**
- * Get the latest snapshot and all subsequent entries from this snapshot.
- */
- def latestSnapshotAndSubsequentEntries: (Option[Array[Byte]], Vector[Array[Byte]]) = {
- latestSnapshotId match {
- case Some(snapshotId) ⇒
- EventHandler.debug(this, "Reading entries from snapshot id [%s] for log [%s]".format(snapshotId, logId))
-
- val cursor = snapshotId + 1
- val lastIndex = ledger.getLastAddConfirmed
-
- val snapshot = Some(entriesInRange(snapshotId, snapshotId).head)
-
- val entries =
- if ((cursor - lastIndex) == 0) Vector.empty[Array[Byte]]
- else entriesInRange(cursor, lastIndex)
-
- (snapshot, entries)
-
- case None ⇒
- (None, entries)
- }
- }
-
- /**
- * Get a range of entries from 'from' to 'to' for this transaction log.
- *
- * @param from the first element of the range
- * @param the last index from the range (including).
- * @return a Vector containing Byte Arrays. Each element in the vector is a record.
- * @throws IllegalArgumenException if from or to is negative, or if 'from' is bigger than 'to'.
- * @throws ReplicationException if the TransactionLog already is closed.
- */
- def entriesInRange(from: Long, to: Long): Vector[Array[Byte]] = if (isOpen.isOn) {
- try {
- if (from < 0) throw new IllegalArgumentException("'from' index can't be negative [" + from + "]")
- if (to < 0) throw new IllegalArgumentException("'to' index can't be negative [" + from + "]")
- if (to < from) throw new IllegalArgumentException("'to' index can't be smaller than 'from' index [" + from + "," + to + "]")
- EventHandler.debug(this, "Reading entries [%s -> %s] for log [%s]".format(from, to, logId))
-
- if (isAsync) {
- val future = Promise[Vector[Array[Byte]]]()
- ledger.asyncReadEntries(
- from, to,
- new AsyncCallback.ReadCallback {
- def readComplete(returnCode: Int, ledgerHandle: LedgerHandle, enumeration: Enumeration[LedgerEntry], ctx: AnyRef) {
- val future = ctx.asInstanceOf[Promise[Vector[Array[Byte]]]]
- val entries = toByteArrays(enumeration)
-
- if (returnCode == BKException.Code.OK) future.success(entries)
- else future.failure(BKException.create(returnCode))
- }
- },
- future)
- await(future)
- } else {
- toByteArrays(ledger.readEntries(from, to))
- }
- } catch {
- case e: Throwable ⇒ handleError(e)
- }
- } else transactionClosedError
-
- /**
- * Get the last entry written to this transaction log.
- *
- * Returns -1 if there has never been an entry.
- */
- def latestEntryId: Long = ledger.getLastAddConfirmed
-
- /**
- * Get the id for the last snapshot written to this transaction log.
- */
- def latestSnapshotId: Option[Long] = {
- try {
- val snapshotId = zkClient.readData(snapshotPath).asInstanceOf[Long]
- EventHandler.debug(this, "Retrieved latest snapshot id [%s] from transaction log [%s]".format(snapshotId, logId))
- Some(snapshotId)
- } catch {
- case e: ZkNoNodeException ⇒ None
- case e: Throwable ⇒ handleError(e)
- }
- }
-
- /**
- * Delete this transaction log. So all entries but also all metadata will be removed.
- *
- * TODO: Behavior unclear what happens when already deleted (what happens to the ledger).
- * TODO: Behavior unclear what happens when already closed.
- */
- def delete() {
- if (isOpen.isOn) {
- EventHandler.debug(this, "Deleting transaction log [%s]".format(logId))
- try {
- if (isAsync) {
- bookieClient.asyncDeleteLedger(
- logId,
- new AsyncCallback.DeleteCallback {
- def deleteComplete(returnCode: Int, ctx: AnyRef) {
- (returnCode)
- }
- },
- null)
- } else {
- bookieClient.deleteLedger(logId)
- }
-
- //also remote everything else that belongs to this TransactionLog.
- zkClient.delete(snapshotPath)
- zkClient.delete(txLogPath)
- } catch {
- case e: Throwable ⇒ handleError(e)
- }
- }
- }
-
- /**
- * Close this transaction log.
- *
- * If already closed, the call is ignored.
- */
- def close() {
- isOpen switchOff {
- EventHandler.debug(this, "Closing transaction log [%s]".format(logId))
- try {
- if (isAsync) {
- ledger.asyncClose(
- new AsyncCallback.CloseCallback {
- def closeComplete(
- returnCode: Int,
- ledgerHandle: LedgerHandle,
- ctx: AnyRef) {
- handleReturnCode(returnCode)
- }
- },
- null)
- } else {
- ledger.close()
- }
- } catch {
- case e: Throwable ⇒ handleError(e)
- }
- }
- }
-
- private def toByteArrays(enumeration: Enumeration[LedgerEntry]): Vector[Array[Byte]] = {
- var entries = Vector[Array[Byte]]()
- while (enumeration.hasMoreElements) {
- val bytes = enumeration.nextElement.getEntry
- val entry =
- if (shouldCompressData) LZF.uncompress(bytes)
- else bytes
- entries = entries :+ entry
- }
- entries
- }
-
- private def storeSnapshotMetaDataInZooKeeper(snapshotId: Long) {
- if (isOpen.isOn) {
- try {
- zkClient.create(snapshotPath, null, CreateMode.PERSISTENT)
- } catch {
- case e: ZkNodeExistsException ⇒ {} // do nothing
- case e: Throwable ⇒ handleError(e)
- }
-
- try {
- zkClient.writeData(snapshotPath, snapshotId)
- } catch {
- case e: Throwable ⇒
- handleError(new ReplicationException(
- "Could not store transaction log snapshot meta-data in ZooKeeper for UUID [" + id + "]"))
- }
- EventHandler.debug(this, "Writing snapshot [%s] to log [%s]".format(snapshotId, logId))
- } else transactionClosedError
- }
-
- private def handleReturnCode(block: ⇒ Long) {
- val code = block.toInt
- if (code == BKException.Code.OK) {} // all fine
- else handleError(BKException.create(code))
- }
-
- private def transactionClosedError: Nothing = {
- handleError(new ReplicationException(
- "Transaction log [" + logId +
- "] is closed. You need to open up new a new one with 'TransactionLog.logFor(id)'"))
- }
-}
-
-/**
- * TODO: Documentation.
- */
-object TransactionLog {
-
- val zooKeeperServers = config.getString("akka.cluster.zookeeper-server-addresses", "localhost:2181")
- val sessionTimeout = Duration(config.getInt("akka.cluster.session-timeout", 60), TIME_UNIT).toMillis.toInt
- val connectionTimeout = Duration(config.getInt("akka.cluster.connection-timeout", 60), TIME_UNIT).toMillis.toInt
-
- val digestType = config.getString("akka.cluster.replication.digest-type", "CRC32") match {
- case "CRC32" ⇒ BookKeeper.DigestType.CRC32
- case "MAC" ⇒ BookKeeper.DigestType.MAC
- case unknown ⇒ throw new ConfigurationException(
- "akka.cluster.replication.digest-type is invalid [" + unknown + "], must be either 'CRC32' or 'MAC'")
- }
- val password = config.getString("akka.cluster.replication.password", "secret").getBytes("UTF-8")
- val ensembleSize = config.getInt("akka.cluster.replication.ensemble-size", 3)
- val quorumSize = config.getInt("akka.cluster.replication.quorum-size", 2)
- val snapshotFrequency = config.getInt("akka.cluster.replication.snapshot-frequency", 1000)
- val timeout = Duration(config.getInt("akka.cluster.replication.timeout", 30), TIME_UNIT).toMillis
- val shouldCompressData = config.getBool("akka.remote.use-compression", false)
-
- private[akka] val transactionLogNode = "/transaction-log-ids"
-
- private val isConnected = new Switch(false)
-
- @volatile
- private[akka] var bookieClient: BookKeeper = _
-
- @volatile
- private[akka] var zkClient: AkkaZkClient = _
-
- private[akka] def apply(
- ledger: LedgerHandle,
- id: String,
- isAsync: Boolean,
- replicationScheme: ReplicationScheme) =
- new TransactionLog(ledger, id, isAsync, replicationScheme)
-
- /**
- * Starts up the transaction log.
- */
- def start() {
- isConnected switchOn {
- bookieClient = new BookKeeper(zooKeeperServers)
- zkClient = new AkkaZkClient(zooKeeperServers, sessionTimeout, connectionTimeout)
-
- try {
- zkClient.create(transactionLogNode, null, CreateMode.PERSISTENT)
- } catch {
- case e: ZkNodeExistsException ⇒ {} // do nothing
- case e: Throwable ⇒ handleError(e)
- }
-
- EventHandler.info(this,
- ("Transaction log service started with" +
- "\n\tdigest type [%s]" +
- "\n\tensemble size [%s]" +
- "\n\tquorum size [%s]" +
- "\n\tlogging time out [%s]").format(
- digestType,
- ensembleSize,
- quorumSize,
- timeout))
- }
- }
-
- /**
- * Shuts down the transaction log.
- */
- def shutdown() {
- isConnected switchOff {
- try {
- EventHandler.info(this, "Shutting down transaction log...")
- zkClient.close()
- bookieClient.halt()
- EventHandler.info(this, "Transaction log shut down successfully")
- } catch {
- case e: Throwable ⇒ handleError(e)
- }
- }
- }
-
- def transactionLogPath(id: String): String = transactionLogNode + "/" + id
-
- /**
- * Checks if a TransactionLog for the given id already exists.
- */
- def exists(id: String): Boolean = {
- val txLogPath = transactionLogPath(id)
- zkClient.exists(txLogPath)
- }
-
- /**
- * Creates a new transaction log for the 'id' specified. If a TransactionLog already exists for the id,
- * it will be overwritten.
- */
- def newLogFor(id: String, isAsync: Boolean, replicationScheme: ReplicationScheme): TransactionLog = {
- val txLogPath = transactionLogPath(id)
-
- val ledger = try {
- if (exists(id)) {
- //if it exists, we need to delete it first. This gives it the overwrite semantics we are looking for.
- try {
- val ledger = bookieClient.createLedger(ensembleSize, quorumSize, digestType, password)
- val txLog = TransactionLog(ledger, id, false, null)
- txLog.delete()
- txLog.close()
- } catch {
- case e: Throwable ⇒ handleError(e)
- }
- }
-
- val future = Promise[LedgerHandle]()
- if (isAsync) {
- bookieClient.asyncCreateLedger(
- ensembleSize, quorumSize, digestType, password,
- new AsyncCallback.CreateCallback {
- def createComplete(
- returnCode: Int,
- ledgerHandle: LedgerHandle,
- ctx: AnyRef) {
- val future = ctx.asInstanceOf[Promise[LedgerHandle]]
- if (returnCode == BKException.Code.OK) future.success(ledgerHandle)
- else future.failure(BKException.create(returnCode))
- }
- },
- future)
- await(future)
- } else {
- bookieClient.createLedger(ensembleSize, quorumSize, digestType, password)
- }
- } catch {
- case e: Throwable ⇒ handleError(e)
- }
-
- val logId = ledger.getId
- try {
- zkClient.create(txLogPath, null, CreateMode.PERSISTENT)
- zkClient.writeData(txLogPath, logId)
- logId //TODO: does this have any effect?
- } catch {
- case e: Throwable ⇒
- bookieClient.deleteLedger(logId) // clean up
- handleError(new ReplicationException(
- "Could not store transaction log [" + logId +
- "] meta-data in ZooKeeper for UUID [" + id + "]", e))
- }
-
- EventHandler.info(this, "Created new transaction log [%s] for UUID [%s]".format(logId, id))
- TransactionLog(ledger, id, isAsync, replicationScheme)
- }
-
- /**
- * Fetches an existing transaction log for the 'id' specified.
- *
- * @throws ReplicationException if the log with the given id doesn't exist.
- */
- def logFor(id: String, isAsync: Boolean, replicationScheme: ReplicationScheme): TransactionLog = {
- val txLogPath = transactionLogPath(id)
-
- val logId = try {
- val logId = zkClient.readData(txLogPath).asInstanceOf[Long]
- EventHandler.debug(this,
- "Retrieved transaction log [%s] for UUID [%s]".format(logId, id))
- logId
- } catch {
- case e: ZkNoNodeException ⇒
- handleError(new ReplicationException(
- "Transaction log for UUID [" + id + "] does not exist in ZooKeeper"))
- case e: Throwable ⇒ handleError(e)
- }
-
- val ledger = try {
- if (isAsync) {
- val future = Promise[LedgerHandle]()
- bookieClient.asyncOpenLedger(
- logId, digestType, password,
- new AsyncCallback.OpenCallback {
- def openComplete(returnCode: Int, ledgerHandle: LedgerHandle, ctx: AnyRef) {
- val future = ctx.asInstanceOf[Promise[LedgerHandle]]
- if (returnCode == BKException.Code.OK) future.success(ledgerHandle)
- else future.failure(BKException.create(returnCode))
- }
- },
- future)
- await(future)
- } else {
- bookieClient.openLedger(logId, digestType, password)
- }
- } catch {
- case e: Throwable ⇒ handleError(e)
- }
-
- TransactionLog(ledger, id, isAsync, replicationScheme)
- }
-
- private[akka] def await[T](future: Promise[T]): T = {
- future.await.value.get match {
- case Right(result) => result
- case Left(throwable) => handleError(throwable)
- }
- }
-
- private[akka] def handleError(e: Throwable): Nothing = {
- EventHandler.error(e, this, e.toString)
- throw e
- }
-}
-
-/**
- * TODO: Documentation.
- */
-object LocalBookKeeperEnsemble {
- private val isRunning = new Switch(false)
-
- //TODO: should probably come from the config file.
- private val port = 5555
-
- @volatile
- private var localBookKeeper: LocalBookKeeper = _
-
- /**
- * Starts the LocalBookKeeperEnsemble.
- *
- * Call can safely be made when already started.
- *
- * This call will block until it is started.
- */
- def start() {
- isRunning switchOn {
- EventHandler.info(this, "Starting up LocalBookKeeperEnsemble...")
- localBookKeeper = new LocalBookKeeper(TransactionLog.ensembleSize)
- localBookKeeper.runZookeeper(port)
- localBookKeeper.initializeZookeper()
- localBookKeeper.runBookies()
- EventHandler.info(this, "LocalBookKeeperEnsemble started up successfully")
- }
- }
-
- /**
- * Shuts down the LocalBookKeeperEnsemble.
- *
- * Call can safely bemade when already shutdown.
- *
- * This call will block until the shutdown completes.
- */
- def shutdown() {
- isRunning switchOff {
- EventHandler.info(this, "Shutting down LocalBookKeeperEnsemble...")
- localBookKeeper.bs.foreach(_.shutdown()) // stop bookies
- localBookKeeper.zkc.close() // stop zk client
- localBookKeeper.zks.shutdown() // stop zk server
- localBookKeeper.serverFactory.shutdown() // stop zk NIOServer
- EventHandler.info(this, "LocalBookKeeperEnsemble shut down successfully")
- }
- }
-}
diff --git a/akka-remote/src/main/scala/akka/remote/VectorClock.scala b/akka-cluster/src/main/scala/akka/cluster/VectorClock.scala
similarity index 99%
rename from akka-remote/src/main/scala/akka/remote/VectorClock.scala
rename to akka-cluster/src/main/scala/akka/cluster/VectorClock.scala
index 42ea917669..a6a54de1d9 100644
--- a/akka-remote/src/main/scala/akka/remote/VectorClock.scala
+++ b/akka-cluster/src/main/scala/akka/cluster/VectorClock.scala
@@ -2,7 +2,7 @@
* Copyright (C) 2009-2012 Typesafe Inc.
*/
-package akka.remote
+package akka.cluster
import akka.AkkaException
diff --git a/akka-cluster/src/main/scala/akka/cluster/metrics/LocalNodeMetricsManager.scala b/akka-cluster/src/main/scala/akka/cluster/metrics/LocalNodeMetricsManager.scala
deleted file mode 100644
index c366ed598c..0000000000
--- a/akka-cluster/src/main/scala/akka/cluster/metrics/LocalNodeMetricsManager.scala
+++ /dev/null
@@ -1,226 +0,0 @@
-/**
- * Copyright (C) 2009-2012 Typesafe Inc.
- */
-
-package akka.cluster.metrics
-
-import akka.cluster._
-import Cluster._
-import akka.cluster.zookeeper._
-import akka.actor._
-import Actor._
-import scala.collection.JavaConversions._
-import scala.collection.JavaConverters._
-import java.util.concurrent.{ ConcurrentHashMap, ConcurrentSkipListSet }
-import java.util.concurrent.atomic.AtomicReference
-import akka.util.{ Duration, Switch }
-import akka.util.Helpers._
-import akka.util.duration._
-import org.I0Itec.zkclient.exception.ZkNoNodeException
-import akka.event.EventHandler
-
-/*
- * Instance of the metrics manager running on the node. To keep the fine performance, metrics of all the
- * nodes in the cluster are cached internally, and refreshed from monitoring MBeans / Sigar (when if's local node),
- * of ZooKeeper (if it's metrics of all the nodes in the cluster) after a specified timeout -
- * metricsRefreshTimeout
- * metricsRefreshTimeout defaults to 2 seconds, and can be declaratively defined through
- * akka.conf:
- *
- * @exampl {{{
- * akka.cluster.metrics-refresh-timeout = 2
- * }}}
- */
-class LocalNodeMetricsManager(zkClient: AkkaZkClient, private val metricsRefreshTimeout: Duration)
- extends NodeMetricsManager {
-
- /*
- * Provides metrics of the system that the node is running on, through monitoring MBeans, Hyperic Sigar
- * and other systems
- */
- lazy private val metricsProvider = SigarMetricsProvider(refreshTimeout.toMillis.toInt) fold ((thrw) ⇒ {
- EventHandler.warning(this, """Hyperic Sigar library failed to load due to %s: %s.
-All the metrics will be retreived from monitoring MBeans, and may be incorrect at some platforms.
-In order to get better metrics, please put "sigar.jar" to the classpath, and add platform-specific native libary to "java.library.path"."""
- .format(thrw.getClass.getName, thrw.getMessage))
- new JMXMetricsProvider
- },
- sigar ⇒ sigar)
-
- /*
- * Metrics of all nodes in the cluster
- */
- private val localNodeMetricsCache = new ConcurrentHashMap[String, NodeMetrics]
-
- @volatile
- private var _refreshTimeout = metricsRefreshTimeout
-
- /*
- * Plugged monitors (both local and cluster-wide)
- */
- private val alterationMonitors = new ConcurrentSkipListSet[MetricsAlterationMonitor]
-
- private val _isRunning = new Switch(false)
-
- /*
- * If the value is true, metrics manages is started and running. Stopped, otherwise
- */
- def isRunning = _isRunning.isOn
-
- /*
- * Starts metrics manager. When metrics manager is started, it refreshes cache from ZooKeeper
- * after refreshTimeout, and invokes plugged monitors
- */
- def start() = {
- _isRunning.switchOn { refresh() }
- this
- }
-
- private[cluster] def metricsForNode(nodeName: String): String = "%s/%s".format(node.NODE_METRICS, nodeName)
-
- /*
- * Adds monitor that reacts, when specific conditions are satisfied
- */
- def addMonitor(monitor: MetricsAlterationMonitor) = alterationMonitors add monitor
-
- def removeMonitor(monitor: MetricsAlterationMonitor) = alterationMonitors remove monitor
-
- def refreshTimeout_=(newValue: Duration) = _refreshTimeout = newValue
-
- /*
- * Timeout after which metrics, cached in the metrics manager, will be refreshed from ZooKeeper
- */
- def refreshTimeout = _refreshTimeout
-
- /*
- * Stores metrics of the node in ZooKeeper
- */
- private[akka] def storeMetricsInZK(metrics: NodeMetrics) = {
- val metricsPath = metricsForNode(metrics.nodeName)
- if (zkClient.exists(metricsPath)) {
- zkClient.writeData(metricsPath, metrics)
- } else {
- ignore[ZkNoNodeException](zkClient.createEphemeral(metricsPath, metrics))
- }
- }
-
- /*
- * Gets metrics of the node from ZooKeeper
- */
- private[akka] def getMetricsFromZK(nodeName: String) = {
- zkClient.readData[NodeMetrics](metricsForNode(nodeName))
- }
-
- /*
- * Removed metrics of the node from local cache and ZooKeeper
- */
- def removeNodeMetrics(nodeName: String) = {
- val metricsPath = metricsForNode(nodeName)
- if (zkClient.exists(metricsPath)) {
- ignore[ZkNoNodeException](zkClient.delete(metricsPath))
- }
-
- localNodeMetricsCache.remove(nodeName)
- }
-
- /*
- * Gets metrics of a local node directly from JMX monitoring beans/Hyperic Sigar
- */
- def getLocalMetrics = metricsProvider.getLocalMetrics
-
- /*
- * Gets metrics of the node, specified by the name. If useCached is true (default value),
- * metrics snapshot is taken from the local cache; otherwise, it's retreived from ZooKeeper'
- */
- def getMetrics(nodeName: String, useCached: Boolean = true): Option[NodeMetrics] =
- if (useCached)
- Option(localNodeMetricsCache.get(nodeName))
- else
- try {
- Some(getMetricsFromZK(nodeName))
- } catch {
- case ex: ZkNoNodeException ⇒ None
- }
-
- /*
- * Return metrics of all nodes in the cluster from ZooKeeper
- */
- private[akka] def getAllMetricsFromZK: Map[String, NodeMetrics] = {
- val metricsPaths = zkClient.getChildren(node.NODE_METRICS).toList.toArray.asInstanceOf[Array[String]]
- metricsPaths.flatMap { nodeName ⇒ getMetrics(nodeName, false).map((nodeName, _)) } toMap
- }
-
- /*
- * Gets cached metrics of all nodes in the cluster
- */
- def getAllMetrics: Array[NodeMetrics] = localNodeMetricsCache.values.asScala.toArray
-
- /*
- * Refreshes locally cached metrics from ZooKeeper, and invokes plugged monitors
- */
- private[akka] def refresh() {
-
- storeMetricsInZK(getLocalMetrics)
- refreshMetricsCacheFromZK()
-
- if (isRunning) {
- Scheduler.schedule({ () ⇒ refresh() }, refreshTimeout.length, refreshTimeout.length, refreshTimeout.unit)
- invokeMonitors()
- }
- }
-
- /*
- * Refreshes metrics manager cache from ZooKeeper
- */
- private def refreshMetricsCacheFromZK() {
- val allMetricsFromZK = getAllMetricsFromZK
-
- localNodeMetricsCache.keySet.foreach { key ⇒
- if (!allMetricsFromZK.contains(key))
- localNodeMetricsCache.remove(key)
- }
-
- // RACY: metrics for the node might have been removed both from ZK and local cache by the moment,
- // but will be re-cached, since they're still present in allMetricsFromZK snapshot. Not important, because
- // cache will be fixed soon, at the next iteration of refresh
- allMetricsFromZK map {
- case (node, metrics) ⇒
- localNodeMetricsCache.put(node, metrics)
- }
- }
-
- /*
- * Invokes monitors with the cached metrics
- */
- private def invokeMonitors(): Unit = if (!alterationMonitors.isEmpty) {
- // RACY: metrics for some nodes might have been removed/added by that moment. Not important,
- // because monitors will be fed with up-to-date metrics shortly, at the next iteration of refresh
- val clusterNodesMetrics = getAllMetrics
- val localNodeMetrics = clusterNodesMetrics.find(_.nodeName == nodeAddress.nodeName)
- val iterator = alterationMonitors.iterator
-
- // RACY: there might be new monitors added after the iterator has been obtained. Not important,
- // becuse refresh interval is meant to be very short, and all the new monitors will be called ad the
- // next refresh iteration
- while (iterator.hasNext) {
-
- val monitor = iterator.next
-
- monitor match {
- case localMonitor: LocalMetricsAlterationMonitor ⇒
- localNodeMetrics.map { metrics ⇒
- if (localMonitor reactsOn metrics)
- localMonitor react metrics
- }
-
- case clusterMonitor: ClusterMetricsAlterationMonitor ⇒
- if (clusterMonitor reactsOn clusterNodesMetrics)
- clusterMonitor react clusterNodesMetrics
- }
-
- }
- }
-
- def stop() = _isRunning.switchOff
-
-}
diff --git a/akka-cluster/src/main/scala/akka/cluster/metrics/MetricsProvider.scala b/akka-cluster/src/main/scala/akka/cluster/metrics/MetricsProvider.scala
deleted file mode 100644
index 0b366ef9c8..0000000000
--- a/akka-cluster/src/main/scala/akka/cluster/metrics/MetricsProvider.scala
+++ /dev/null
@@ -1,154 +0,0 @@
-/**
- * Copyright (C) 2009-2012 Typesafe Inc.
- */
-
-package akka.cluster.metrics
-
-import akka.cluster._
-import akka.event.EventHandler
-import java.lang.management.ManagementFactory
-import akka.util.ReflectiveAccess._
-import akka.util.Switch
-
-/*
- * Snapshot of the JVM / system that's the node is running on
- *
- * @param nodeName name of the node, where metrics are gathered at
- * @param usedHeapMemory amount of heap memory currently used
- * @param committedHeapMemory amount of heap memory guaranteed to be available
- * @param maxHeapMemory maximum amount of heap memory that can be used
- * @param avaiableProcessors number of the processors avalable to the JVM
- * @param systemLoadAverage system load average. If OS-specific Sigar's native library is plugged,
- * it's used to calculate average load on the CPUs in the system. Otherwise, value is retreived from monitoring
- * MBeans. Hyperic Sigar provides more precise values, and, thus, if the library is provided, it's used by default.
- *
- */
-case class DefaultNodeMetrics(nodeName: String,
- usedHeapMemory: Long,
- committedHeapMemory: Long,
- maxHeapMemory: Long,
- avaiableProcessors: Int,
- systemLoadAverage: Double) extends NodeMetrics
-
-object MetricsProvider {
-
- /*
- * Maximum value of system load average
- */
- val MAX_SYS_LOAD_AVG = 1
-
- /*
- * Minimum value of system load average
- */
- val MIN_SYS_LOAD_AVG = 0
-
- /*
- * Default value of system load average
- */
- val DEF_SYS_LOAD_AVG = 0.5
-
-}
-
-/*
- * Abstracts metrics provider that returns metrics of the system the node is running at
- */
-trait MetricsProvider {
-
- /*
- * Gets metrics of the local system
- */
- def getLocalMetrics: NodeMetrics
-
-}
-
-/*
- * Loads JVM metrics through JMX monitoring beans
- */
-class JMXMetricsProvider extends MetricsProvider {
-
- import MetricsProvider._
-
- private val memoryMXBean = ManagementFactory.getMemoryMXBean
-
- private val osMXBean = ManagementFactory.getOperatingSystemMXBean
-
- /*
- * Validates and calculates system load average
- *
- * @param avg system load average obtained from a specific monitoring provider (may be incorrect)
- * @return system load average, or default value(0.5), if passed value was out of permitted
- * bounds (0.0 to 1.0)
- */
- @inline
- protected final def calcSystemLoadAverage(avg: Double) =
- if (avg >= MIN_SYS_LOAD_AVG && avg <= MAX_SYS_LOAD_AVG) avg else DEF_SYS_LOAD_AVG
-
- protected def systemLoadAverage = calcSystemLoadAverage(osMXBean.getSystemLoadAverage)
-
- def getLocalMetrics =
- DefaultNodeMetrics(Cluster.nodeAddress.nodeName,
- memoryMXBean.getHeapMemoryUsage.getUsed,
- memoryMXBean.getHeapMemoryUsage.getCommitted,
- memoryMXBean.getHeapMemoryUsage.getMax,
- osMXBean.getAvailableProcessors,
- systemLoadAverage)
-
-}
-
-/*
- * Loads wider range of metrics of a better quality with Hyperic Sigar (native library)
- *
- * @param refreshTimeout Sigar gathers metrics during this interval
- */
-class SigarMetricsProvider private (private val sigarInstance: AnyRef) extends JMXMetricsProvider {
-
- private val reportErrors = new Switch(true)
-
- private val getCpuPercMethod = sigarInstance.getClass.getMethod("getCpuPerc")
- private val sigarCpuCombinedMethod = getCpuPercMethod.getReturnType.getMethod("getCombined")
-
- /*
- * Wraps reflective calls to Hyperic Sigar
- *
- * @param f reflective call to Hyperic Sigar
- * @param fallback function, which is invoked, if call to Sigar has been finished with exception
- */
- private def callSigarMethodOrElse[T](callSigar: ⇒ T, fallback: ⇒ T): T =
- try callSigar catch {
- case thrw ⇒
- reportErrors.switchOff {
- EventHandler.warning(this, "Failed to get metrics from Hyperic Sigar. %s: %s"
- .format(thrw.getClass.getName, thrw.getMessage))
- }
- fallback
- }
-
- /*
- * Obtains system load average from Sigar
- * If the value cannot be obtained, falls back to system load average taken from JMX
- */
- override def systemLoadAverage = callSigarMethodOrElse(
- calcSystemLoadAverage(sigarCpuCombinedMethod
- .invoke(getCpuPercMethod.invoke(sigarInstance)).asInstanceOf[Double]),
- super.systemLoadAverage)
-
-}
-
-object SigarMetricsProvider {
-
- /*
- * Instantiates Sigar metrics provider through reflections, in order to avoid creating dependencies to
- * Hiperic Sigar library
- */
- def apply(refreshTimeout: Int): Either[Throwable, MetricsProvider] = try {
- for {
- sigarInstance ← createInstance[AnyRef]("org.hyperic.sigar.Sigar", noParams, noArgs).right
- sigarProxyCacheClass: Class[_] ← getClassFor("org.hyperic.sigar.SigarProxyCache").right
- } yield new SigarMetricsProvider(sigarProxyCacheClass
- .getMethod("newInstance", Array(sigarInstance.getClass, classOf[Int]): _*)
- .invoke(null, sigarInstance, new java.lang.Integer(refreshTimeout)))
- } catch {
- case thrw ⇒ Left(thrw)
- }
-
-}
diff --git a/akka-cluster/src/main/scala/akka/cluster/storage/Storage.scala b/akka-cluster/src/main/scala/akka/cluster/storage/Storage.scala
deleted file mode 100644
index a402f2def1..0000000000
--- a/akka-cluster/src/main/scala/akka/cluster/storage/Storage.scala
+++ /dev/null
@@ -1,366 +0,0 @@
-package akka.cluster.storage
-
-/**
- * Copyright (C) 2009-2012 Typesafe Inc.
- */
-import akka.cluster.zookeeper.AkkaZkClient
-import akka.AkkaException
-import org.apache.zookeeper.{ KeeperException, CreateMode }
-import org.apache.zookeeper.data.Stat
-import java.util.concurrent.ConcurrentHashMap
-import annotation.tailrec
-import java.lang.{ RuntimeException, UnsupportedOperationException }
-
-/**
- * Simple abstraction to store an Array of bytes based on some String key.
- *
- * Nothing is being said about ACID, transactions etc. It depends on the implementation
- * of this Storage interface of what is and isn't done on the lowest level.
- *
- * The amount of data that is allowed to be insert/updated is implementation specific. The InMemoryStorage
- * has no limits, but the ZooKeeperStorage has a maximum size of 1 mb.
- *
- * TODO: Class is up for better names.
- * TODO: Instead of a String as key, perhaps also a byte-array.
- */
-trait Storage {
-
- /**
- * Loads the VersionedData for the given key.
- *
- * This call doesn't care about the actual version of the data.
- *
- * @param key: the key of the VersionedData to load.
- * @return the VersionedData for the given entry.
- * @throws MissingDataException if the entry with the given key doesn't exist.
- * @throws StorageException if anything goes wrong while accessing the storage
- */
- def load(key: String): VersionedData
-
- /**
- * Loads the VersionedData for the given key and expectedVersion.
- *
- * This call can be used for optimistic locking since the version is included.
- *
- * @param key: the key of the VersionedData to load
- * @param expectedVersion the version the data to load should have.
- * @throws MissingDataException if the data with the given key doesn't exist.
- * @throws BadVersionException if the version is not the expected version.
- * @throws StorageException if anything goes wrong while accessing the storage
- */
- def load(key: String, expectedVersion: Long): VersionedData
-
- /**
- * Checks if a VersionedData with the given key exists.
- *
- * @param key the key to check the existence for.
- * @return true if exists, false if not.
- * @throws StorageException if anything goes wrong while accessing the storage
- */
- def exists(key: String): Boolean
-
- /**
- * Inserts a byte-array based on some key.
- *
- * @param key the key of the Data to insert.
- * @param bytes the data to insert.
- * @return the version of the written data (can be used for optimistic locking).
- * @throws DataExistsException when VersionedData with the given Key already exists.
- * @throws StorageException if anything goes wrong while accessing the storage
- */
- def insert(key: String, bytes: Array[Byte]): Long
-
- /**
- * Inserts the data if there is no data for that key, or overwrites it if it is there.
- *
- * This is the method you want to call if you just want to save something and don't
- * care about any lost update issues.
- *
- * @param key the key of the data
- * @param bytes the data to insert
- * @return the version of the written data (can be used for optimistic locking).
- * @throws StorageException if anything goes wrong while accessing the storage
- */
- def insertOrOverwrite(key: String, bytes: Array[Byte]): Long
-
- /**
- * Overwrites the current data for the given key. This call doesn't care about the version of the existing data.
- *
- * @param key the key of the data to overwrite
- * @param bytes the data to insert.
- * @return the version of the written data (can be used for optimistic locking).
- * @throws MissingDataException when the entry with the given key doesn't exist.
- * @throws StorageException if anything goes wrong while accessing the storage
- */
- def overwrite(key: String, bytes: Array[Byte]): Long
-
- /**
- * Updates an existing value using an optimistic lock. So it expect the current data to have the expectedVersion
- * and only then, it will do the update.
- *
- * @param key the key of the data to update
- * @param bytes the content to write for the given key
- * @param expectedVersion the version of the content that is expected to be there.
- * @return the version of the written data (can be used for optimistic locking).
- * @throws MissingDataException if no data for the given key exists
- * @throws BadVersionException if the version if the found data doesn't match the expected version. So essentially
- * if another update was already done.
- * @throws StorageException if anything goes wrong while accessing the storage
- */
- def update(key: String, bytes: Array[Byte], expectedVersion: Long): Long
-}
-
-/**
- * The VersionedData is a container of data (some bytes) and a version (a Long).
- */
-class VersionedData(val data: Array[Byte], val version: Long) {}
-
-/**
- * An AkkaException thrown by the Storage module.
- */
-class StorageException(msg: String = null, cause: java.lang.Throwable = null) extends AkkaException(msg, cause) {
- def this(msg: String) = this(msg, null);
-}
-
-/**
- * *
- * A StorageException thrown when an operation is done on a non existing node.
- */
-class MissingDataException(msg: String = null, cause: java.lang.Throwable = null) extends StorageException(msg, cause) {
- def this(msg: String) = this(msg, null);
-}
-
-/**
- * A StorageException thrown when an operation is done on an existing node, but no node was expected.
- */
-class DataExistsException(msg: String = null, cause: java.lang.Throwable = null) extends StorageException(msg, cause) {
- def this(msg: String) = this(msg, null);
-}
-
-/**
- * A StorageException thrown when an operation causes an optimistic locking failure.
- */
-class BadVersionException(msg: String = null, cause: java.lang.Throwable = null) extends StorageException(msg, cause) {
- def this(msg: String) = this(msg, null);
-}
-
-/**
- * A Storage implementation based on ZooKeeper.
- *
- * The store method is atomic:
- * - so everything is written or nothing is written
- * - is isolated, so threadsafe,
- * but it will not participate in any transactions.
- *
- */
-class ZooKeeperStorage(zkClient: AkkaZkClient, root: String = "/peter/storage") extends Storage {
-
- var path = ""
-
- //makes sure that the complete root exists on zookeeper.
- root.split("/").foreach(
- item ⇒ if (item.size > 0) {
-
- path = path + "/" + item
-
- if (!zkClient.exists(path)) {
- //it could be that another thread is going to create this root node as well, so ignore it when it happens.
- try {
- zkClient.create(path, "".getBytes, CreateMode.PERSISTENT)
- } catch {
- case ignore: KeeperException.NodeExistsException ⇒
- }
- }
- })
-
- def toZkPath(key: String): String = {
- root + "/" + key
- }
-
- def load(key: String) = try {
- val stat = new Stat
- val arrayOfBytes = zkClient.connection.readData(root + "/" + key, stat, false)
- new VersionedData(arrayOfBytes, stat.getVersion)
- } catch {
- case e: KeeperException.NoNodeException ⇒ throw new MissingDataException(
- String.format("Failed to load key [%s]: no data was found", key), e)
- case e: KeeperException ⇒ throw new StorageException(
- String.format("Failed to load key [%s]", key), e)
- }
-
- def load(key: String, expectedVersion: Long) = try {
- val stat = new Stat
- val arrayOfBytes = zkClient.connection.readData(root + "/" + key, stat, false)
-
- if (stat.getVersion != expectedVersion) throw new BadVersionException(
- "Failed to update key [" + key + "]: version mismatch, expected [" + expectedVersion + "]" +
- " but found [" + stat.getVersion + "]")
-
- new VersionedData(arrayOfBytes, stat.getVersion)
- } catch {
- case e: KeeperException.NoNodeException ⇒ throw new MissingDataException(
- String.format("Failed to load key [%s]: no data was found", key), e)
- case e: KeeperException ⇒ throw new StorageException(
- String.format("Failed to load key [%s]", key), e)
- }
-
- def insertOrOverwrite(key: String, bytes: Array[Byte]) = {
- try {
- throw new UnsupportedOperationException()
- } catch {
- case e: KeeperException.NodeExistsException ⇒ throw new DataExistsException(
- String.format("Failed to insert key [%s]: an entry already exists with the same key", key), e)
- case e: KeeperException ⇒ throw new StorageException(
- String.format("Failed to insert key [%s]", key), e)
- }
- }
-
- def insert(key: String, bytes: Array[Byte]): Long = {
- try {
- zkClient.connection.create(root + "/" + key, bytes, CreateMode.PERSISTENT)
- //todo: how to get hold of the version.
- val version: Long = 0
- version
- } catch {
- case e: KeeperException.NodeExistsException ⇒ throw new DataExistsException(
- String.format("Failed to insert key [%s]: an entry already exists with the same key", key), e)
- case e: KeeperException ⇒ throw new StorageException(
- String.format("Failed to insert key [%s]", key), e)
- }
- }
-
- def exists(key: String) = try {
- zkClient.connection.exists(toZkPath(key), false)
- } catch {
- case e: KeeperException ⇒ throw new StorageException(
- String.format("Failed to check existance for key [%s]", key), e)
- }
-
- def update(key: String, bytes: Array[Byte], expectedVersion: Long): Long = {
- try {
- zkClient.connection.writeData(root + "/" + key, bytes, expectedVersion.asInstanceOf[Int])
- throw new RuntimeException()
- } catch {
- case e: KeeperException.BadVersionException ⇒ throw new BadVersionException(
- String.format("Failed to update key [%s]: version mismatch", key), e)
- case e: KeeperException ⇒ throw new StorageException(
- String.format("Failed to update key [%s]", key), e)
- }
- }
-
- def overwrite(key: String, bytes: Array[Byte]): Long = {
- try {
- zkClient.connection.writeData(root + "/" + key, bytes)
- -1L
- } catch {
- case e: KeeperException.NoNodeException ⇒ throw new MissingDataException(
- String.format("Failed to overwrite key [%s]: a previous entry already exists", key), e)
- case e: KeeperException ⇒ throw new StorageException(
- String.format("Failed to overwrite key [%s]", key), e)
- }
- }
-}
-
-object InMemoryStorage {
- val InitialVersion = 0;
-}
-
-/**
- * An in memory {@link RawStore} implementation. Useful for testing purposes.
- */
-final class InMemoryStorage extends Storage {
-
- private val map = new ConcurrentHashMap[String, VersionedData]()
-
- def load(key: String) = {
- val result = map.get(key)
-
- if (result == null) throw new MissingDataException(
- String.format("Failed to load key [%s]: no data was found", key))
-
- result
- }
-
- def load(key: String, expectedVersion: Long) = {
- val result = load(key)
-
- if (result.version != expectedVersion) throw new BadVersionException(
- "Failed to load key [" + key + "]: version mismatch, expected [" + result.version + "] " +
- "but found [" + expectedVersion + "]")
-
- result
- }
-
- def exists(key: String) = map.containsKey(key)
-
- def insert(key: String, bytes: Array[Byte]): Long = {
- val version: Long = InMemoryStorage.InitialVersion
- val result = new VersionedData(bytes, version)
-
- val previous = map.putIfAbsent(key, result)
- if (previous != null) throw new DataExistsException(
- String.format("Failed to insert key [%s]: the key already has been inserted previously", key))
-
- version
- }
-
- @tailrec
- def update(key: String, bytes: Array[Byte], expectedVersion: Long): Long = {
- val found = map.get(key)
-
- if (found == null) throw new MissingDataException(
- String.format("Failed to update key [%s], no previous entry exist", key))
-
- if (expectedVersion != found.version) throw new BadVersionException(
- "Failed to update key [" + key + "]: version mismatch, expected [" + expectedVersion + "]" +
- " but found [" + found.version + "]")
-
- val newVersion: Long = expectedVersion + 1
-
- if (map.replace(key, found, new VersionedData(bytes, newVersion))) newVersion
- else update(key, bytes, expectedVersion)
- }
-
- @tailrec
- def overwrite(key: String, bytes: Array[Byte]): Long = {
- val current = map.get(key)
-
- if (current == null) throw new MissingDataException(
- String.format("Failed to overwrite key [%s], no previous entry exist", key))
-
- val update = new VersionedData(bytes, current.version + 1)
-
- if (map.replace(key, current, update)) update.version
- else overwrite(key, bytes)
- }
-
- def insertOrOverwrite(key: String, bytes: Array[Byte]): Long = {
- val version = InMemoryStorage.InitialVersion
- val result = new VersionedData(bytes, version)
-
- val previous = map.putIfAbsent(key, result)
-
- if (previous == null) result.version
- else overwrite(key, bytes)
- }
-}
-
-//TODO: To minimize the number of dependencies, should the Storage not be placed in a seperate module?
-//class VoldemortRawStorage(storeClient: StoreClient) extends Storage {
-//
-// def load(Key: String) = {
-// try {
-//
-// } catch {
-// case
-// }
-// }
-//
-// override def insert(key: String, bytes: Array[Byte]) {
-// throw new UnsupportedOperationException()
-// }
-//
-// def update(key: String, bytes: Array[Byte]) {
-// throw new UnsupportedOperationException()
-// }
-//}
diff --git a/akka-cluster/src/main/scala/akka/cluster/zookeeper/AkkaZkClient.scala b/akka-cluster/src/main/scala/akka/cluster/zookeeper/AkkaZkClient.scala
deleted file mode 100644
index 9137959877..0000000000
--- a/akka-cluster/src/main/scala/akka/cluster/zookeeper/AkkaZkClient.scala
+++ /dev/null
@@ -1,34 +0,0 @@
-/**
- * Copyright (C) 2009-2012 Typesafe Inc.
- */
-package akka.cluster.zookeeper
-
-import org.I0Itec.zkclient._
-import org.I0Itec.zkclient.serialize._
-import org.I0Itec.zkclient.exception._
-
-/**
- * ZooKeeper client. Holds the ZooKeeper connection and manages its session.
- */
-class AkkaZkClient(zkServers: String,
- sessionTimeout: Int,
- connectionTimeout: Int,
- zkSerializer: ZkSerializer = new SerializableSerializer)
- extends ZkClient(zkServers, sessionTimeout, connectionTimeout, zkSerializer) {
-
- def connection: ZkConnection = _connection.asInstanceOf[ZkConnection]
-
- def reconnect() {
- val zkLock = getEventLock
-
- zkLock.lock()
- try {
- _connection.close()
- _connection.connect(this)
- } catch {
- case e: InterruptedException ⇒ throw new ZkInterruptedException(e)
- } finally {
- zkLock.unlock()
- }
- }
-}
diff --git a/akka-cluster/src/main/scala/akka/cluster/zookeeper/AkkaZooKeeper.scala b/akka-cluster/src/main/scala/akka/cluster/zookeeper/AkkaZooKeeper.scala
deleted file mode 100644
index b5165ffb72..0000000000
--- a/akka-cluster/src/main/scala/akka/cluster/zookeeper/AkkaZooKeeper.scala
+++ /dev/null
@@ -1,32 +0,0 @@
-/**
- * Copyright (C) 2009-2012 Typesafe Inc.
- */
-package akka.cluster.zookeeper
-
-import org.I0Itec.zkclient._
-import org.apache.commons.io.FileUtils
-import java.io.File
-
-object AkkaZooKeeper {
- /**
- * Starts up a local ZooKeeper server. Should only be used for testing purposes.
- */
- def startLocalServer(dataPath: String, logPath: String): ZkServer =
- startLocalServer(dataPath, logPath, 2181, 500)
-
- /**
- * Starts up a local ZooKeeper server. Should only be used for testing purposes.
- */
- def startLocalServer(dataPath: String, logPath: String, port: Int, tickTime: Int): ZkServer = {
- FileUtils.deleteDirectory(new File(dataPath))
- FileUtils.deleteDirectory(new File(logPath))
- val zkServer = new ZkServer(
- dataPath, logPath,
- new IDefaultNameSpace() {
- def createDefaultNameSpace(zkClient: ZkClient) {}
- },
- port, tickTime)
- zkServer.start()
- zkServer
- }
-}
diff --git a/akka-cluster/src/main/scala/akka/cluster/zookeeper/ZooKeeperBarrier.scala b/akka-cluster/src/main/scala/akka/cluster/zookeeper/ZooKeeperBarrier.scala
deleted file mode 100644
index c1f51ceb96..0000000000
--- a/akka-cluster/src/main/scala/akka/cluster/zookeeper/ZooKeeperBarrier.scala
+++ /dev/null
@@ -1,104 +0,0 @@
-/**
- * Copyright (C) 2009-2012 Typesafe Inc.
- */
-package akka.cluster.zookeeper
-
-import akka.util.Duration
-import akka.util.duration._
-
-import org.I0Itec.zkclient._
-import org.I0Itec.zkclient.exception._
-
-import java.util.{ List ⇒ JList }
-import java.util.concurrent.CountDownLatch
-
-class BarrierTimeoutException(message: String) extends RuntimeException(message)
-
-/**
- * Barrier based on Zookeeper barrier tutorial.
- */
-object ZooKeeperBarrier {
- val BarriersNode = "/barriers"
- val DefaultTimeout = 60 seconds
-
- def apply(zkClient: ZkClient, name: String, node: String, count: Int) =
- new ZooKeeperBarrier(zkClient, name, node, count, DefaultTimeout)
-
- def apply(zkClient: ZkClient, name: String, node: String, count: Int, timeout: Duration) =
- new ZooKeeperBarrier(zkClient, name, node, count, timeout)
-
- def apply(zkClient: ZkClient, cluster: String, name: String, node: String, count: Int) =
- new ZooKeeperBarrier(zkClient, cluster + "-" + name, node, count, DefaultTimeout)
-
- def apply(zkClient: ZkClient, cluster: String, name: String, node: String, count: Int, timeout: Duration) =
- new ZooKeeperBarrier(zkClient, cluster + "-" + name, node, count, timeout)
-
- def ignore[E: Manifest](body: ⇒ Unit) {
- try {
- body
- } catch {
- case e if manifest[E].erasure.isAssignableFrom(e.getClass) ⇒ ()
- }
- }
-}
-
-/**
- * Barrier based on Zookeeper barrier tutorial.
- */
-class ZooKeeperBarrier(zkClient: ZkClient, name: String, node: String, count: Int, timeout: Duration)
- extends IZkChildListener {
-
- import ZooKeeperBarrier.{ BarriersNode, ignore }
-
- val barrier = BarriersNode + "/" + name
- val entry = barrier + "/" + node
- val ready = barrier + "/ready"
-
- val exitBarrier = new CountDownLatch(1)
-
- ignore[ZkNodeExistsException](zkClient.createPersistent(BarriersNode))
- ignore[ZkNodeExistsException](zkClient.createPersistent(barrier))
-
- def apply(body: ⇒ Unit) {
- enter()
- body
- leave()
- }
-
- /**
- * An await does a enter/leave making this barrier a 'single' barrier instead of a double barrier.
- */
- def await() {
- enter()
- leave()
- }
-
- def enter() = {
- zkClient.createEphemeral(entry)
- if (zkClient.countChildren(barrier) >= count)
- ignore[ZkNodeExistsException](zkClient.createPersistent(ready))
- else
- zkClient.waitUntilExists(ready, timeout.unit, timeout.length)
- if (!zkClient.exists(ready)) {
- throw new BarrierTimeoutException("Timeout (%s) while waiting for entry barrier" format timeout)
- }
- zkClient.subscribeChildChanges(barrier, this)
- }
-
- def leave() {
- zkClient.delete(entry)
- exitBarrier.await(timeout.length, timeout.unit)
- if (zkClient.countChildren(barrier) > 0) {
- zkClient.unsubscribeChildChanges(barrier, this)
- throw new BarrierTimeoutException("Timeout (%s) while waiting for exit barrier" format timeout)
- }
- zkClient.unsubscribeChildChanges(barrier, this)
- }
-
- def handleChildChange(path: String, children: JList[String]) {
- if (children.size <= 1) {
- ignore[ZkNoNodeException](zkClient.delete(ready))
- exitBarrier.countDown()
- }
- }
-}
diff --git a/akka-remote/src/multi-jvm/scala/akka/remote/GossipMembershipMultiJvmSpec.scala b/akka-cluster/src/multi-jvm/scala/akka/cluster/GossipMembershipMultiJvmSpec.scala
similarity index 99%
rename from akka-remote/src/multi-jvm/scala/akka/remote/GossipMembershipMultiJvmSpec.scala
rename to akka-cluster/src/multi-jvm/scala/akka/cluster/GossipMembershipMultiJvmSpec.scala
index 418f6f385b..c380d3e5eb 100644
--- a/akka-remote/src/multi-jvm/scala/akka/remote/GossipMembershipMultiJvmSpec.scala
+++ b/akka-cluster/src/multi-jvm/scala/akka/cluster/GossipMembershipMultiJvmSpec.scala
@@ -1,4 +1,4 @@
-// package akka.remote
+// package akka.cluster
// import akka.actor.Actor
// import akka.remote._
diff --git a/akka-cluster/src/multi-jvm/scala/akka/cluster/api/changelisteners/newleader/NewLeaderChangeListenerMultiJvmNode1.conf b/akka-cluster/src/multi-jvm/scala/akka/cluster/api/changelisteners/newleader/NewLeaderChangeListenerMultiJvmNode1.conf
deleted file mode 100644
index 2f642a20f0..0000000000
--- a/akka-cluster/src/multi-jvm/scala/akka/cluster/api/changelisteners/newleader/NewLeaderChangeListenerMultiJvmNode1.conf
+++ /dev/null
@@ -1,2 +0,0 @@
-akka.enabled-modules = ["cluster"]
-akka.event-handler-level = "WARNING"
diff --git a/akka-cluster/src/multi-jvm/scala/akka/cluster/api/changelisteners/newleader/NewLeaderChangeListenerMultiJvmNode1.opts b/akka-cluster/src/multi-jvm/scala/akka/cluster/api/changelisteners/newleader/NewLeaderChangeListenerMultiJvmNode1.opts
deleted file mode 100644
index dc86c1c9c0..0000000000
--- a/akka-cluster/src/multi-jvm/scala/akka/cluster/api/changelisteners/newleader/NewLeaderChangeListenerMultiJvmNode1.opts
+++ /dev/null
@@ -1 +0,0 @@
--Dakka.cluster.nodename=node1 -Dakka.remote.port=9991
diff --git a/akka-cluster/src/multi-jvm/scala/akka/cluster/api/changelisteners/newleader/NewLeaderChangeListenerMultiJvmNode2.conf b/akka-cluster/src/multi-jvm/scala/akka/cluster/api/changelisteners/newleader/NewLeaderChangeListenerMultiJvmNode2.conf
deleted file mode 100644
index 2f642a20f0..0000000000
--- a/akka-cluster/src/multi-jvm/scala/akka/cluster/api/changelisteners/newleader/NewLeaderChangeListenerMultiJvmNode2.conf
+++ /dev/null
@@ -1,2 +0,0 @@
-akka.enabled-modules = ["cluster"]
-akka.event-handler-level = "WARNING"
diff --git a/akka-cluster/src/multi-jvm/scala/akka/cluster/api/changelisteners/newleader/NewLeaderChangeListenerMultiJvmNode2.opts b/akka-cluster/src/multi-jvm/scala/akka/cluster/api/changelisteners/newleader/NewLeaderChangeListenerMultiJvmNode2.opts
deleted file mode 100644
index bb140941a5..0000000000
--- a/akka-cluster/src/multi-jvm/scala/akka/cluster/api/changelisteners/newleader/NewLeaderChangeListenerMultiJvmNode2.opts
+++ /dev/null
@@ -1 +0,0 @@
--Dakka.cluster.nodename=node2 -Dakka.remote.port=9992
diff --git a/akka-cluster/src/multi-jvm/scala/akka/cluster/api/changelisteners/newleader/NewLeaderChangeListenerMultiJvmSpec.scala b/akka-cluster/src/multi-jvm/scala/akka/cluster/api/changelisteners/newleader/NewLeaderChangeListenerMultiJvmSpec.scala
deleted file mode 100644
index f1b9f5a7ae..0000000000
--- a/akka-cluster/src/multi-jvm/scala/akka/cluster/api/changelisteners/newleader/NewLeaderChangeListenerMultiJvmSpec.scala
+++ /dev/null
@@ -1,63 +0,0 @@
-/**
- * Copyright (C) 2009-2012 Typesafe Inc.
- */
-
-package akka.cluster.api.changelisteners.newleader
-
-import org.scalatest.WordSpec
-import org.scalatest.matchers.MustMatchers
-import org.scalatest.BeforeAndAfterAll
-
-import akka.cluster._
-import ChangeListener._
-import Cluster._
-import akka.cluster.LocalCluster._
-
-import java.util.concurrent._
-
-object NewLeaderChangeListenerMultiJvmSpec {
- var NrOfNodes = 2
-}
-
-class NewLeaderChangeListenerMultiJvmNode1 extends MasterClusterTestNode {
- import NewLeaderChangeListenerMultiJvmSpec._
-
- val testNodes = NrOfNodes
-
- "A NewLeader change listener" must {
-
- "be invoked after leader election is completed" ignore {
- barrier("start-node1", NrOfNodes) {
- Cluster.node.start()
- }
-
- barrier("start-node2", NrOfNodes).await()
-
- System.exit(0)
- }
- }
-}
-
-class NewLeaderChangeListenerMultiJvmNode2 extends ClusterTestNode {
- import NewLeaderChangeListenerMultiJvmSpec._
-
- "A NewLeader change listener" must {
-
- "be invoked after leader election is completed" ignore {
- val latch = new CountDownLatch(1)
-
- barrier("start-node1", NrOfNodes).await()
-
- barrier("start-node2", NrOfNodes) {
- node.register(new ChangeListener {
- override def newLeader(node: String, client: ClusterNode) {
- latch.countDown
- }
- })
- }
- latch.await(10, TimeUnit.SECONDS) must be === true
-
- node.shutdown()
- }
- }
-}
diff --git a/akka-cluster/src/multi-jvm/scala/akka/cluster/api/changelisteners/nodeconnected/NodeConnectedChangeListenerMultiJvmNode1.conf b/akka-cluster/src/multi-jvm/scala/akka/cluster/api/changelisteners/nodeconnected/NodeConnectedChangeListenerMultiJvmNode1.conf
deleted file mode 100644
index 2f642a20f0..0000000000
--- a/akka-cluster/src/multi-jvm/scala/akka/cluster/api/changelisteners/nodeconnected/NodeConnectedChangeListenerMultiJvmNode1.conf
+++ /dev/null
@@ -1,2 +0,0 @@
-akka.enabled-modules = ["cluster"]
-akka.event-handler-level = "WARNING"
diff --git a/akka-cluster/src/multi-jvm/scala/akka/cluster/api/changelisteners/nodeconnected/NodeConnectedChangeListenerMultiJvmNode1.opts b/akka-cluster/src/multi-jvm/scala/akka/cluster/api/changelisteners/nodeconnected/NodeConnectedChangeListenerMultiJvmNode1.opts
deleted file mode 100644
index dc86c1c9c0..0000000000
--- a/akka-cluster/src/multi-jvm/scala/akka/cluster/api/changelisteners/nodeconnected/NodeConnectedChangeListenerMultiJvmNode1.opts
+++ /dev/null
@@ -1 +0,0 @@
--Dakka.cluster.nodename=node1 -Dakka.remote.port=9991
diff --git a/akka-cluster/src/multi-jvm/scala/akka/cluster/api/changelisteners/nodeconnected/NodeConnectedChangeListenerMultiJvmNode2.conf b/akka-cluster/src/multi-jvm/scala/akka/cluster/api/changelisteners/nodeconnected/NodeConnectedChangeListenerMultiJvmNode2.conf
deleted file mode 100644
index 2f642a20f0..0000000000
--- a/akka-cluster/src/multi-jvm/scala/akka/cluster/api/changelisteners/nodeconnected/NodeConnectedChangeListenerMultiJvmNode2.conf
+++ /dev/null
@@ -1,2 +0,0 @@
-akka.enabled-modules = ["cluster"]
-akka.event-handler-level = "WARNING"
diff --git a/akka-cluster/src/multi-jvm/scala/akka/cluster/api/changelisteners/nodeconnected/NodeConnectedChangeListenerMultiJvmNode2.opts b/akka-cluster/src/multi-jvm/scala/akka/cluster/api/changelisteners/nodeconnected/NodeConnectedChangeListenerMultiJvmNode2.opts
deleted file mode 100644
index bb140941a5..0000000000
--- a/akka-cluster/src/multi-jvm/scala/akka/cluster/api/changelisteners/nodeconnected/NodeConnectedChangeListenerMultiJvmNode2.opts
+++ /dev/null
@@ -1 +0,0 @@
--Dakka.cluster.nodename=node2 -Dakka.remote.port=9992
diff --git a/akka-cluster/src/multi-jvm/scala/akka/cluster/api/changelisteners/nodeconnected/NodeConnectedChangeListenerMultiJvmSpec.scala b/akka-cluster/src/multi-jvm/scala/akka/cluster/api/changelisteners/nodeconnected/NodeConnectedChangeListenerMultiJvmSpec.scala
deleted file mode 100644
index deec5c19e6..0000000000
--- a/akka-cluster/src/multi-jvm/scala/akka/cluster/api/changelisteners/nodeconnected/NodeConnectedChangeListenerMultiJvmSpec.scala
+++ /dev/null
@@ -1,65 +0,0 @@
-/**
- * Copyright (C) 2009-2012 Typesafe Inc.
- */
-
-package akka.cluster.api.changelisteners.nodeconnected
-
-import org.scalatest.WordSpec
-import org.scalatest.matchers.MustMatchers
-import org.scalatest.BeforeAndAfterAll
-
-import akka.cluster._
-import ChangeListener._
-import Cluster._
-import akka.cluster.LocalCluster._
-
-import java.util.concurrent._
-
-object NodeConnectedChangeListenerMultiJvmSpec {
- var NrOfNodes = 2
-}
-
-class NodeConnectedChangeListenerMultiJvmNode1 extends MasterClusterTestNode {
- import NodeConnectedChangeListenerMultiJvmSpec._
-
- val testNodes = NrOfNodes
-
- "A NodeConnected change listener" must {
-
- "be invoked when a new node joins the cluster" in {
- val latch = new CountDownLatch(1)
- node.register(new ChangeListener {
- override def nodeConnected(node: String, client: ClusterNode) {
- latch.countDown
- }
- })
-
- barrier("start-node1", NrOfNodes) {
- Cluster.node.start()
- }
-
- barrier("start-node2", NrOfNodes) {
- latch.await(5, TimeUnit.SECONDS) must be === true
- }
-
- node.shutdown()
- }
- }
-}
-
-class NodeConnectedChangeListenerMultiJvmNode2 extends ClusterTestNode {
- import NodeConnectedChangeListenerMultiJvmSpec._
-
- "A NodeConnected change listener" must {
-
- "be invoked when a new node joins the cluster" in {
- barrier("start-node1", NrOfNodes).await()
-
- barrier("start-node2", NrOfNodes) {
- Cluster.node.start()
- }
-
- node.shutdown()
- }
- }
-}
diff --git a/akka-cluster/src/multi-jvm/scala/akka/cluster/api/changelisteners/nodedisconnected/NodeDisconnectedChangeListenerMultiJvmNode1.conf b/akka-cluster/src/multi-jvm/scala/akka/cluster/api/changelisteners/nodedisconnected/NodeDisconnectedChangeListenerMultiJvmNode1.conf
deleted file mode 100644
index 2f642a20f0..0000000000
--- a/akka-cluster/src/multi-jvm/scala/akka/cluster/api/changelisteners/nodedisconnected/NodeDisconnectedChangeListenerMultiJvmNode1.conf
+++ /dev/null
@@ -1,2 +0,0 @@
-akka.enabled-modules = ["cluster"]
-akka.event-handler-level = "WARNING"
diff --git a/akka-cluster/src/multi-jvm/scala/akka/cluster/api/changelisteners/nodedisconnected/NodeDisconnectedChangeListenerMultiJvmNode1.opts b/akka-cluster/src/multi-jvm/scala/akka/cluster/api/changelisteners/nodedisconnected/NodeDisconnectedChangeListenerMultiJvmNode1.opts
deleted file mode 100644
index dc86c1c9c0..0000000000
--- a/akka-cluster/src/multi-jvm/scala/akka/cluster/api/changelisteners/nodedisconnected/NodeDisconnectedChangeListenerMultiJvmNode1.opts
+++ /dev/null
@@ -1 +0,0 @@
--Dakka.cluster.nodename=node1 -Dakka.remote.port=9991
diff --git a/akka-cluster/src/multi-jvm/scala/akka/cluster/api/changelisteners/nodedisconnected/NodeDisconnectedChangeListenerMultiJvmNode2.conf b/akka-cluster/src/multi-jvm/scala/akka/cluster/api/changelisteners/nodedisconnected/NodeDisconnectedChangeListenerMultiJvmNode2.conf
deleted file mode 100644
index 2f642a20f0..0000000000
--- a/akka-cluster/src/multi-jvm/scala/akka/cluster/api/changelisteners/nodedisconnected/NodeDisconnectedChangeListenerMultiJvmNode2.conf
+++ /dev/null
@@ -1,2 +0,0 @@
-akka.enabled-modules = ["cluster"]
-akka.event-handler-level = "WARNING"
diff --git a/akka-cluster/src/multi-jvm/scala/akka/cluster/api/changelisteners/nodedisconnected/NodeDisconnectedChangeListenerMultiJvmNode2.opts b/akka-cluster/src/multi-jvm/scala/akka/cluster/api/changelisteners/nodedisconnected/NodeDisconnectedChangeListenerMultiJvmNode2.opts
deleted file mode 100644
index bb140941a5..0000000000
--- a/akka-cluster/src/multi-jvm/scala/akka/cluster/api/changelisteners/nodedisconnected/NodeDisconnectedChangeListenerMultiJvmNode2.opts
+++ /dev/null
@@ -1 +0,0 @@
--Dakka.cluster.nodename=node2 -Dakka.remote.port=9992
diff --git a/akka-cluster/src/multi-jvm/scala/akka/cluster/api/changelisteners/nodedisconnected/NodeDisconnectedChangeListenerMultiJvmSpec.scala b/akka-cluster/src/multi-jvm/scala/akka/cluster/api/changelisteners/nodedisconnected/NodeDisconnectedChangeListenerMultiJvmSpec.scala
deleted file mode 100644
index 54a327126e..0000000000
--- a/akka-cluster/src/multi-jvm/scala/akka/cluster/api/changelisteners/nodedisconnected/NodeDisconnectedChangeListenerMultiJvmSpec.scala
+++ /dev/null
@@ -1,65 +0,0 @@
-/**
- * Copyright (C) 2009-2012 Typesafe Inc.
- */
-
-package akka.cluster.api.changelisteners.nodedisconnected
-
-import org.scalatest.WordSpec
-import org.scalatest.matchers.MustMatchers
-import org.scalatest.BeforeAndAfterAll
-
-import akka.cluster._
-import ChangeListener._
-import Cluster._
-import akka.cluster.LocalCluster._
-
-import java.util.concurrent._
-
-object NodeDisconnectedChangeListenerMultiJvmSpec {
- var NrOfNodes = 2
-}
-
-class NodeDisconnectedChangeListenerMultiJvmNode1 extends MasterClusterTestNode {
- import NodeDisconnectedChangeListenerMultiJvmSpec._
-
- val testNodes = NrOfNodes
-
- "A NodeDisconnected change listener" must {
-
- "be invoked when a new node leaves the cluster" in {
- val latch = new CountDownLatch(1)
- node.register(new ChangeListener {
- override def nodeDisconnected(node: String, client: ClusterNode) {
- latch.countDown
- }
- })
-
- barrier("start-node1", NrOfNodes) {
- Cluster.node.start()
- }
-
- barrier("start-node2", NrOfNodes).await()
-
- latch.await(10, TimeUnit.SECONDS) must be === true
-
- node.shutdown()
- }
- }
-}
-
-class NodeDisconnectedChangeListenerMultiJvmNode2 extends ClusterTestNode {
- import NodeDisconnectedChangeListenerMultiJvmSpec._
-
- "A NodeDisconnected change listener" must {
-
- "be invoked when a new node leaves the cluster" in {
- barrier("start-node1", NrOfNodes).await()
-
- barrier("start-node2", NrOfNodes) {
- Cluster.node.start()
- }
-
- node.shutdown()
- }
- }
-}
diff --git a/akka-cluster/src/multi-jvm/scala/akka/cluster/api/configuration/ConfigurationStorageMultiJvmNode1.conf b/akka-cluster/src/multi-jvm/scala/akka/cluster/api/configuration/ConfigurationStorageMultiJvmNode1.conf
deleted file mode 100644
index 2f642a20f0..0000000000
--- a/akka-cluster/src/multi-jvm/scala/akka/cluster/api/configuration/ConfigurationStorageMultiJvmNode1.conf
+++ /dev/null
@@ -1,2 +0,0 @@
-akka.enabled-modules = ["cluster"]
-akka.event-handler-level = "WARNING"
diff --git a/akka-cluster/src/multi-jvm/scala/akka/cluster/api/configuration/ConfigurationStorageMultiJvmNode1.opts b/akka-cluster/src/multi-jvm/scala/akka/cluster/api/configuration/ConfigurationStorageMultiJvmNode1.opts
deleted file mode 100644
index dc86c1c9c0..0000000000
--- a/akka-cluster/src/multi-jvm/scala/akka/cluster/api/configuration/ConfigurationStorageMultiJvmNode1.opts
+++ /dev/null
@@ -1 +0,0 @@
--Dakka.cluster.nodename=node1 -Dakka.remote.port=9991
diff --git a/akka-cluster/src/multi-jvm/scala/akka/cluster/api/configuration/ConfigurationStorageMultiJvmNode2.conf b/akka-cluster/src/multi-jvm/scala/akka/cluster/api/configuration/ConfigurationStorageMultiJvmNode2.conf
deleted file mode 100644
index 2f642a20f0..0000000000
--- a/akka-cluster/src/multi-jvm/scala/akka/cluster/api/configuration/ConfigurationStorageMultiJvmNode2.conf
+++ /dev/null
@@ -1,2 +0,0 @@
-akka.enabled-modules = ["cluster"]
-akka.event-handler-level = "WARNING"
diff --git a/akka-cluster/src/multi-jvm/scala/akka/cluster/api/configuration/ConfigurationStorageMultiJvmNode2.opts b/akka-cluster/src/multi-jvm/scala/akka/cluster/api/configuration/ConfigurationStorageMultiJvmNode2.opts
deleted file mode 100644
index bb140941a5..0000000000
--- a/akka-cluster/src/multi-jvm/scala/akka/cluster/api/configuration/ConfigurationStorageMultiJvmNode2.opts
+++ /dev/null
@@ -1 +0,0 @@
--Dakka.cluster.nodename=node2 -Dakka.remote.port=9992
diff --git a/akka-cluster/src/multi-jvm/scala/akka/cluster/api/configuration/ConfigurationStorageMultiJvmSpec.scala b/akka-cluster/src/multi-jvm/scala/akka/cluster/api/configuration/ConfigurationStorageMultiJvmSpec.scala
deleted file mode 100644
index f9aabbb004..0000000000
--- a/akka-cluster/src/multi-jvm/scala/akka/cluster/api/configuration/ConfigurationStorageMultiJvmSpec.scala
+++ /dev/null
@@ -1,89 +0,0 @@
-/**
- * Copyright (C) 2009-2012 Typesafe Inc.
- */
-
-package akka.cluster.api.configuration
-
-import org.scalatest.WordSpec
-import org.scalatest.matchers.MustMatchers
-import org.scalatest.BeforeAndAfterAll
-
-import akka.cluster._
-import Cluster._
-import akka.cluster.LocalCluster._
-
-object ConfigurationStorageMultiJvmSpec {
- var NrOfNodes = 2
-}
-
-class ConfigurationStorageMultiJvmNode1 extends MasterClusterTestNode {
- import ConfigurationStorageMultiJvmSpec._
-
- val testNodes = NrOfNodes
-
- "A cluster" must {
-
- "be able to store, read and remove custom configuration data" in {
-
- barrier("start-node-1", NrOfNodes) {
- Cluster.node.start()
- }
-
- barrier("start-node-2", NrOfNodes).await()
-
- barrier("store-config-data-node-1", NrOfNodes) {
- node.setConfigElement("key1", "value1".getBytes)
- }
-
- barrier("read-config-data-node-2", NrOfNodes).await()
-
- barrier("remove-config-data-node-2", NrOfNodes).await()
-
- barrier("try-read-config-data-node-1", NrOfNodes) {
- val option = node.getConfigElement("key1")
- option.isDefined must be(false)
-
- val elements = node.getConfigElementKeys
- elements.size must be(0)
- }
-
- node.shutdown()
- }
- }
-}
-
-class ConfigurationStorageMultiJvmNode2 extends ClusterTestNode {
- import ConfigurationStorageMultiJvmSpec._
-
- "A cluster" must {
-
- "be able to store, read and remove custom configuration data" in {
-
- barrier("start-node-1", NrOfNodes).await()
-
- barrier("start-node-2", NrOfNodes) {
- Cluster.node.start()
- }
-
- barrier("store-config-data-node-1", NrOfNodes).await()
-
- barrier("read-config-data-node-2", NrOfNodes) {
- val option = node.getConfigElement("key1")
- option.isDefined must be(true)
- option.get must be("value1".getBytes)
-
- val elements = node.getConfigElementKeys
- elements.size must be(1)
- elements.head must be("key1")
- }
-
- barrier("remove-config-data-node-2", NrOfNodes) {
- node.removeConfigElement("key1")
- }
-
- barrier("try-read-config-data-node-1", NrOfNodes).await()
-
- node.shutdown()
- }
- }
-}
diff --git a/akka-cluster/src/multi-jvm/scala/akka/cluster/api/leader/election/LeaderElectionMultiJvmNode1.conf b/akka-cluster/src/multi-jvm/scala/akka/cluster/api/leader/election/LeaderElectionMultiJvmNode1.conf
deleted file mode 100644
index 2f642a20f0..0000000000
--- a/akka-cluster/src/multi-jvm/scala/akka/cluster/api/leader/election/LeaderElectionMultiJvmNode1.conf
+++ /dev/null
@@ -1,2 +0,0 @@
-akka.enabled-modules = ["cluster"]
-akka.event-handler-level = "WARNING"
diff --git a/akka-cluster/src/multi-jvm/scala/akka/cluster/api/leader/election/LeaderElectionMultiJvmNode1.opts b/akka-cluster/src/multi-jvm/scala/akka/cluster/api/leader/election/LeaderElectionMultiJvmNode1.opts
deleted file mode 100644
index dc86c1c9c0..0000000000
--- a/akka-cluster/src/multi-jvm/scala/akka/cluster/api/leader/election/LeaderElectionMultiJvmNode1.opts
+++ /dev/null
@@ -1 +0,0 @@
--Dakka.cluster.nodename=node1 -Dakka.remote.port=9991
diff --git a/akka-cluster/src/multi-jvm/scala/akka/cluster/api/leader/election/LeaderElectionMultiJvmNode2.conf b/akka-cluster/src/multi-jvm/scala/akka/cluster/api/leader/election/LeaderElectionMultiJvmNode2.conf
deleted file mode 100644
index 2f642a20f0..0000000000
--- a/akka-cluster/src/multi-jvm/scala/akka/cluster/api/leader/election/LeaderElectionMultiJvmNode2.conf
+++ /dev/null
@@ -1,2 +0,0 @@
-akka.enabled-modules = ["cluster"]
-akka.event-handler-level = "WARNING"
diff --git a/akka-cluster/src/multi-jvm/scala/akka/cluster/api/leader/election/LeaderElectionMultiJvmNode2.opts b/akka-cluster/src/multi-jvm/scala/akka/cluster/api/leader/election/LeaderElectionMultiJvmNode2.opts
deleted file mode 100644
index bb140941a5..0000000000
--- a/akka-cluster/src/multi-jvm/scala/akka/cluster/api/leader/election/LeaderElectionMultiJvmNode2.opts
+++ /dev/null
@@ -1 +0,0 @@
--Dakka.cluster.nodename=node2 -Dakka.remote.port=9992
diff --git a/akka-cluster/src/multi-jvm/scala/akka/cluster/api/leader/election/LeaderElectionMultiJvmSpec.scala b/akka-cluster/src/multi-jvm/scala/akka/cluster/api/leader/election/LeaderElectionMultiJvmSpec.scala
deleted file mode 100644
index 479f77e0d3..0000000000
--- a/akka-cluster/src/multi-jvm/scala/akka/cluster/api/leader/election/LeaderElectionMultiJvmSpec.scala
+++ /dev/null
@@ -1,71 +0,0 @@
-/**
- * Copyright (C) 2009-2012 Typesafe Inc.
- */
-
-package akka.cluster.api.leader.election
-
-import org.scalatest.WordSpec
-import org.scalatest.matchers.MustMatchers
-import org.scalatest.BeforeAndAfterAll
-
-import akka.cluster._
-import ChangeListener._
-import Cluster._
-import akka.cluster.LocalCluster._
-
-import java.util.concurrent._
-
-object LeaderElectionMultiJvmSpec {
- var NrOfNodes = 2
-}
-/*
-class LeaderElectionMultiJvmNode1 extends MasterClusterTestNode {
- import LeaderElectionMultiJvmSpec._
-
- val testNodes = NrOfNodes
-
- "A cluster" must {
-
- "be able to elect a single leader in the cluster and perform re-election if leader resigns" in {
-
- barrier("start-node1", NrOfNodes) {
- Cluster.node.start()
- }
- node.isLeader must be === true
-
- barrier("start-node2", NrOfNodes) {
- }
- node.isLeader must be === true
-
- barrier("stop-node1", NrOfNodes) {
- node.resign()
- }
- }
- }
-}
-
-class LeaderElectionMultiJvmNode2 extends ClusterTestNode {
- import LeaderElectionMultiJvmSpec._
-
- "A cluster" must {
-
- "be able to elect a single leader in the cluster and perform re-election if leader resigns" in {
-
- barrier("start-node1", NrOfNodes) {
- }
- node.isLeader must be === false
-
- barrier("start-node2", NrOfNodes) {
- Cluster.node.start()
- }
- node.isLeader must be === false
-
- barrier("stop-node1", NrOfNodes) {
- }
- Thread.sleep(1000) // wait for re-election
-
- node.isLeader must be === true
- }
- }
-}
-*/
diff --git a/akka-cluster/src/multi-jvm/scala/akka/cluster/api/registry/RegistryStoreMultiJvmNode1.conf b/akka-cluster/src/multi-jvm/scala/akka/cluster/api/registry/RegistryStoreMultiJvmNode1.conf
deleted file mode 100644
index 2f642a20f0..0000000000
--- a/akka-cluster/src/multi-jvm/scala/akka/cluster/api/registry/RegistryStoreMultiJvmNode1.conf
+++ /dev/null
@@ -1,2 +0,0 @@
-akka.enabled-modules = ["cluster"]
-akka.event-handler-level = "WARNING"
diff --git a/akka-cluster/src/multi-jvm/scala/akka/cluster/api/registry/RegistryStoreMultiJvmNode1.opts b/akka-cluster/src/multi-jvm/scala/akka/cluster/api/registry/RegistryStoreMultiJvmNode1.opts
deleted file mode 100644
index dc86c1c9c0..0000000000
--- a/akka-cluster/src/multi-jvm/scala/akka/cluster/api/registry/RegistryStoreMultiJvmNode1.opts
+++ /dev/null
@@ -1 +0,0 @@
--Dakka.cluster.nodename=node1 -Dakka.remote.port=9991
diff --git a/akka-cluster/src/multi-jvm/scala/akka/cluster/api/registry/RegistryStoreMultiJvmNode2.conf b/akka-cluster/src/multi-jvm/scala/akka/cluster/api/registry/RegistryStoreMultiJvmNode2.conf
deleted file mode 100644
index 2f642a20f0..0000000000
--- a/akka-cluster/src/multi-jvm/scala/akka/cluster/api/registry/RegistryStoreMultiJvmNode2.conf
+++ /dev/null
@@ -1,2 +0,0 @@
-akka.enabled-modules = ["cluster"]
-akka.event-handler-level = "WARNING"
diff --git a/akka-cluster/src/multi-jvm/scala/akka/cluster/api/registry/RegistryStoreMultiJvmNode2.opts b/akka-cluster/src/multi-jvm/scala/akka/cluster/api/registry/RegistryStoreMultiJvmNode2.opts
deleted file mode 100644
index bb140941a5..0000000000
--- a/akka-cluster/src/multi-jvm/scala/akka/cluster/api/registry/RegistryStoreMultiJvmNode2.opts
+++ /dev/null
@@ -1 +0,0 @@
--Dakka.cluster.nodename=node2 -Dakka.remote.port=9992
diff --git a/akka-cluster/src/multi-jvm/scala/akka/cluster/api/registry/RegistryStoreMultiJvmSpec.scala b/akka-cluster/src/multi-jvm/scala/akka/cluster/api/registry/RegistryStoreMultiJvmSpec.scala
deleted file mode 100644
index c20bf9269c..0000000000
--- a/akka-cluster/src/multi-jvm/scala/akka/cluster/api/registry/RegistryStoreMultiJvmSpec.scala
+++ /dev/null
@@ -1,116 +0,0 @@
-/**
- * Copyright (C) 2009-2012 Typesafe Inc.
- */
-
-package akka.cluster.api.registry
-
-import org.scalatest.WordSpec
-import org.scalatest.matchers.MustMatchers
-import org.scalatest.BeforeAndAfterAll
-
-import akka.actor._
-import Actor._
-import akka.cluster._
-import ChangeListener._
-import Cluster._
-import akka.config.Config
-import akka.serialization.Serialization
-import akka.cluster.LocalCluster._
-
-import java.util.concurrent._
-
-object RegistryStoreMultiJvmSpec {
- var NrOfNodes = 2
-
- class HelloWorld1 extends Actor with Serializable {
- def receive = {
- case "Hello" ⇒
- reply("World from node [" + Config.nodename + "]")
- }
- }
-
- class HelloWorld2 extends Actor with Serializable {
- var counter = 0
- def receive = {
- case "Hello" ⇒
- Thread.sleep(1000)
- counter += 1
- case "Count" ⇒
- reply(counter)
- }
- }
-}
-
-class RegistryStoreMultiJvmNode1 extends MasterClusterTestNode {
- import RegistryStoreMultiJvmSpec._
-
- val testNodes = NrOfNodes
-
- "A cluster" must {
-
- "be able to store an ActorRef in the cluster without a replication strategy and retrieve it with 'use'" in {
-
- barrier("start-node-1", NrOfNodes) {
- Cluster.node.start()
- }
-
- barrier("start-node-2", NrOfNodes).await()
-
- barrier("store-1-in-node-1", NrOfNodes) {
- node.store("hello-world-1", classOf[HelloWorld1], Serialization.serializerFor(classOf[HelloWorld1]))
- }
-
- barrier("use-1-in-node-2", NrOfNodes).await()
-
- barrier("store-2-in-node-1", NrOfNodes) {
- node.store("hello-world-2", classOf[HelloWorld1], false, Serialization.serializerFor(classOf[HelloWorld1]))
- }
-
- barrier("use-2-in-node-2", NrOfNodes).await()
-
- node.shutdown()
- }
- }
-}
-
-class RegistryStoreMultiJvmNode2 extends ClusterTestNode {
- import RegistryStoreMultiJvmSpec._
-
- "A cluster" must {
-
- "be able to store an actor in the cluster with 'store' and retrieve it with 'use'" in {
-
- barrier("start-node-1", NrOfNodes).await()
-
- barrier("start-node-2", NrOfNodes) {
- Cluster.node.start()
- }
-
- barrier("store-1-in-node-1", NrOfNodes).await()
-
- barrier("use-1-in-node-2", NrOfNodes) {
- val actorOrOption = node.use("hello-world-1")
- if (actorOrOption.isEmpty) fail("Actor could not be retrieved")
-
- val actorRef = actorOrOption.get
- actorRef.address must be("hello-world-1")
-
- (actorRef ? "Hello").as[String].get must be("World from node [node2]")
- }
-
- barrier("store-2-in-node-1", NrOfNodes).await()
-
- barrier("use-2-in-node-2", NrOfNodes) {
- val actorOrOption = node.use("hello-world-2")
- if (actorOrOption.isEmpty) fail("Actor could not be retrieved")
-
- val actorRef = actorOrOption.get
- actorRef.address must be("hello-world-2")
-
- (actorRef ? "Hello").as[String].get must be("World from node [node2]")
- }
-
- node.shutdown()
- }
- }
-}
diff --git a/akka-cluster/src/multi-jvm/scala/akka/cluster/deployment/DeploymentMultiJvmNode1.conf b/akka-cluster/src/multi-jvm/scala/akka/cluster/deployment/DeploymentMultiJvmNode1.conf
deleted file mode 100644
index 88df1a6421..0000000000
--- a/akka-cluster/src/multi-jvm/scala/akka/cluster/deployment/DeploymentMultiJvmNode1.conf
+++ /dev/null
@@ -1,4 +0,0 @@
-akka.enabled-modules = ["cluster"]
-akka.event-handler-level = "WARNING"
-akka.actor.deployment.service-hello.router = "round-robin"
-akka.actor.deployment.service-hello.nr-of-instances = 1
diff --git a/akka-cluster/src/multi-jvm/scala/akka/cluster/deployment/DeploymentMultiJvmNode1.opts b/akka-cluster/src/multi-jvm/scala/akka/cluster/deployment/DeploymentMultiJvmNode1.opts
deleted file mode 100644
index dc86c1c9c0..0000000000
--- a/akka-cluster/src/multi-jvm/scala/akka/cluster/deployment/DeploymentMultiJvmNode1.opts
+++ /dev/null
@@ -1 +0,0 @@
--Dakka.cluster.nodename=node1 -Dakka.remote.port=9991
diff --git a/akka-cluster/src/multi-jvm/scala/akka/cluster/deployment/DeploymentMultiJvmNode2.conf b/akka-cluster/src/multi-jvm/scala/akka/cluster/deployment/DeploymentMultiJvmNode2.conf
deleted file mode 100644
index 88df1a6421..0000000000
--- a/akka-cluster/src/multi-jvm/scala/akka/cluster/deployment/DeploymentMultiJvmNode2.conf
+++ /dev/null
@@ -1,4 +0,0 @@
-akka.enabled-modules = ["cluster"]
-akka.event-handler-level = "WARNING"
-akka.actor.deployment.service-hello.router = "round-robin"
-akka.actor.deployment.service-hello.nr-of-instances = 1
diff --git a/akka-cluster/src/multi-jvm/scala/akka/cluster/deployment/DeploymentMultiJvmNode2.opts b/akka-cluster/src/multi-jvm/scala/akka/cluster/deployment/DeploymentMultiJvmNode2.opts
deleted file mode 100644
index bb140941a5..0000000000
--- a/akka-cluster/src/multi-jvm/scala/akka/cluster/deployment/DeploymentMultiJvmNode2.opts
+++ /dev/null
@@ -1 +0,0 @@
--Dakka.cluster.nodename=node2 -Dakka.remote.port=9992
diff --git a/akka-cluster/src/multi-jvm/scala/akka/cluster/deployment/DeploymentMultiJvmSpec.scala b/akka-cluster/src/multi-jvm/scala/akka/cluster/deployment/DeploymentMultiJvmSpec.scala
deleted file mode 100644
index ef0b79b4a7..0000000000
--- a/akka-cluster/src/multi-jvm/scala/akka/cluster/deployment/DeploymentMultiJvmSpec.scala
+++ /dev/null
@@ -1,75 +0,0 @@
-/**
- * Copyright (C) 2009-2012 Typesafe Inc.
- */
-
-package akka.cluster.deployment
-
-import org.scalatest.WordSpec
-import org.scalatest.matchers.MustMatchers
-import org.scalatest.BeforeAndAfterAll
-
-import akka.actor._
-import Actor._
-import akka.cluster._
-import Cluster._
-import akka.cluster.LocalCluster._
-
-object DeploymentMultiJvmSpec {
- var NrOfNodes = 2
-}
-
-class DeploymentMultiJvmNode1 extends MasterClusterTestNode {
- import DeploymentMultiJvmSpec._
-
- val testNodes = NrOfNodes
-
- "A ClusterDeployer" must {
-
- "be able to deploy deployments in akka.conf and lookup the deployments by 'address'" in {
-
- barrier("start-node-1", NrOfNodes) {
- Cluster.node.start()
- }
-
- barrier("start-node-2", NrOfNodes).await()
-
- barrier("perform-deployment-on-node-1", NrOfNodes) {
- Deployer.start()
- }
-
- barrier("lookup-deployment-node-2", NrOfNodes).await()
-
- node.shutdown()
- }
- }
-}
-
-class DeploymentMultiJvmNode2 extends ClusterTestNode {
- import DeploymentMultiJvmSpec._
-
- "A cluster" must {
-
- "be able to store, read and remove custom configuration data" in {
-
- barrier("start-node-1", NrOfNodes).await()
-
- barrier("start-node-2", NrOfNodes) {
- Cluster.node.start()
- }
-
- barrier("perform-deployment-on-node-1", NrOfNodes).await()
-
- barrier("lookup-deployment-node-2", NrOfNodes) {
- Deployer.start()
- val deployments = Deployer.deploymentsInConfig
- deployments map { oldDeployment ⇒
- val newDeployment = ClusterDeployer.lookupDeploymentFor(oldDeployment.address)
- newDeployment must be('defined)
- oldDeployment must equal(newDeployment.get)
- }
- }
-
- node.shutdown()
- }
- }
-}
diff --git a/akka-cluster/src/multi-jvm/scala/akka/cluster/metrics/local/LocalMetricsMultiJvmNode1.conf b/akka-cluster/src/multi-jvm/scala/akka/cluster/metrics/local/LocalMetricsMultiJvmNode1.conf
deleted file mode 100644
index 8d5284be46..0000000000
--- a/akka-cluster/src/multi-jvm/scala/akka/cluster/metrics/local/LocalMetricsMultiJvmNode1.conf
+++ /dev/null
@@ -1,4 +0,0 @@
-akka.enabled-modules = ["cluster"]
-akka.event-handlers = ["akka.testkit.TestEventListener"]
-akka.event-handler-level = "WARNING"
-akka.cluster.metrics-refresh-timeout = 1
diff --git a/akka-cluster/src/multi-jvm/scala/akka/cluster/metrics/local/LocalMetricsMultiJvmNode1.opts b/akka-cluster/src/multi-jvm/scala/akka/cluster/metrics/local/LocalMetricsMultiJvmNode1.opts
deleted file mode 100644
index dc86c1c9c0..0000000000
--- a/akka-cluster/src/multi-jvm/scala/akka/cluster/metrics/local/LocalMetricsMultiJvmNode1.opts
+++ /dev/null
@@ -1 +0,0 @@
--Dakka.cluster.nodename=node1 -Dakka.remote.port=9991
diff --git a/akka-cluster/src/multi-jvm/scala/akka/cluster/metrics/local/LocalMetricsMultiJvmSpec.scala b/akka-cluster/src/multi-jvm/scala/akka/cluster/metrics/local/LocalMetricsMultiJvmSpec.scala
deleted file mode 100644
index 380d68d8ef..0000000000
--- a/akka-cluster/src/multi-jvm/scala/akka/cluster/metrics/local/LocalMetricsMultiJvmSpec.scala
+++ /dev/null
@@ -1,134 +0,0 @@
-/**
- * Copyright (C) 2009-2012 Typesafe Inc.
- */
-
-package akka.cluster.metrics.local
-
-import akka.cluster._
-import akka.actor._
-import Actor._
-import Cluster._
-import akka.dispatch._
-import akka.util.Duration
-import akka.util.duration._
-import akka.cluster.metrics._
-import java.util.concurrent.atomic.AtomicInteger
-
-object LocalMetricsMultiJvmSpec {
- val NrOfNodes = 1
-}
-
-class LocalMetricsMultiJvmNode1 extends MasterClusterTestNode {
-
- import LocalMetricsMultiJvmSpec._
-
- val testNodes = NrOfNodes
-
- override def beforeAll = {
- super.beforeAll()
- node
- }
-
- override def afterAll = {
- node.shutdown()
- super.afterAll()
- }
-
- "Metrics manager" must {
-
- def timeout = node.metricsManager.refreshTimeout
-
- "be initialized with refresh timeout value, specified in akka.conf" in {
- timeout must be(1.second)
- }
-
- "return up-to-date local node metrics straight from MBeans/Sigar" in {
- node.metricsManager.getLocalMetrics must not be (null)
-
- node.metricsManager.getLocalMetrics.systemLoadAverage must be(0.5 plusOrMinus 0.5)
- }
-
- "return metrics cached in the MetricsManagerLocalMetrics" in {
- node.metricsManager.getMetrics(nodeAddress.nodeName) must not be (null)
- }
-
- "return local node metrics from ZNode" in {
- node.metricsManager.getMetrics(nodeAddress.nodeName, false) must not be (null)
- }
-
- "return cached metrics of all nodes in the cluster" in {
- node.metricsManager.getAllMetrics.size must be(1)
- node.metricsManager.getAllMetrics.find(_.nodeName == "node1") must not be (null)
- }
-
- "throw no exceptions, when user attempts to get metrics of a non-existing node" in {
- node.metricsManager.getMetrics("nonexisting") must be(None)
- node.metricsManager.getMetrics("nonexisting", false) must be(None)
- }
-
- "regularly update cached metrics" in {
- val oldMetrics = node.metricsManager.getLocalMetrics
- Thread sleep timeout.toMillis
- node.metricsManager.getLocalMetrics must not be (oldMetrics)
- }
-
- "allow to track JVM state and bind handles through MetricsAlterationMonitors" in {
- val monitorReponse = Promise[String]()
-
- node.metricsManager.addMonitor(new LocalMetricsAlterationMonitor {
-
- val id = "heapMemoryThresholdMonitor"
-
- def reactsOn(metrics: NodeMetrics) = metrics.usedHeapMemory > 1
-
- def react(metrics: NodeMetrics) = monitorReponse.success("Too much memory is used!")
-
- })
-
- Await.result(monitorReponse, 5 seconds) must be("Too much memory is used!")
-
- }
-
- class FooMonitor(monitorWorked: AtomicInteger) extends LocalMetricsAlterationMonitor {
- val id = "fooMonitor"
- def reactsOn(metrics: NodeMetrics) = true
- def react(metrics: NodeMetrics) = monitorWorked.set(monitorWorked.get + 1)
- }
-
- "allow to unregister the monitor" in {
-
- val monitorWorked = new AtomicInteger(0)
- val fooMonitor = new FooMonitor(monitorWorked)
-
- node.metricsManager.addMonitor(fooMonitor)
- node.metricsManager.removeMonitor(fooMonitor)
-
- val oldValue = monitorWorked.get
- Thread sleep timeout.toMillis
- monitorWorked.get must be(oldValue)
-
- }
-
- "stop notifying monitors, when stopped" in {
-
- node.metricsManager.stop()
-
- val monitorWorked = new AtomicInteger(0)
-
- node.metricsManager.addMonitor(new LocalMetricsAlterationMonitor {
- val id = "fooMonitor"
- def reactsOn(metrics: NodeMetrics) = true
- def react(metrics: NodeMetrics) = monitorWorked.set(monitorWorked.get + 1)
- })
-
- monitorWorked.get must be(0)
-
- node.metricsManager.start()
- Thread sleep (timeout.toMillis * 2)
- monitorWorked.get must be > (1)
-
- }
-
- }
-
-}
diff --git a/akka-cluster/src/multi-jvm/scala/akka/cluster/metrics/remote/RemoteMetricsMultiJvmNode1.conf b/akka-cluster/src/multi-jvm/scala/akka/cluster/metrics/remote/RemoteMetricsMultiJvmNode1.conf
deleted file mode 100644
index 172e980612..0000000000
--- a/akka-cluster/src/multi-jvm/scala/akka/cluster/metrics/remote/RemoteMetricsMultiJvmNode1.conf
+++ /dev/null
@@ -1,3 +0,0 @@
-akka.enabled-modules = ["cluster"]
-akka.event-handlers = ["akka.testkit.TestEventListener"]
-akka.event-handler-level = "WARNING"
diff --git a/akka-cluster/src/multi-jvm/scala/akka/cluster/metrics/remote/RemoteMetricsMultiJvmNode1.opts b/akka-cluster/src/multi-jvm/scala/akka/cluster/metrics/remote/RemoteMetricsMultiJvmNode1.opts
deleted file mode 100644
index dc86c1c9c0..0000000000
--- a/akka-cluster/src/multi-jvm/scala/akka/cluster/metrics/remote/RemoteMetricsMultiJvmNode1.opts
+++ /dev/null
@@ -1 +0,0 @@
--Dakka.cluster.nodename=node1 -Dakka.remote.port=9991
diff --git a/akka-cluster/src/multi-jvm/scala/akka/cluster/metrics/remote/RemoteMetricsMultiJvmNode2.conf b/akka-cluster/src/multi-jvm/scala/akka/cluster/metrics/remote/RemoteMetricsMultiJvmNode2.conf
deleted file mode 100644
index 172e980612..0000000000
--- a/akka-cluster/src/multi-jvm/scala/akka/cluster/metrics/remote/RemoteMetricsMultiJvmNode2.conf
+++ /dev/null
@@ -1,3 +0,0 @@
-akka.enabled-modules = ["cluster"]
-akka.event-handlers = ["akka.testkit.TestEventListener"]
-akka.event-handler-level = "WARNING"
diff --git a/akka-cluster/src/multi-jvm/scala/akka/cluster/metrics/remote/RemoteMetricsMultiJvmNode2.opts b/akka-cluster/src/multi-jvm/scala/akka/cluster/metrics/remote/RemoteMetricsMultiJvmNode2.opts
deleted file mode 100644
index bb140941a5..0000000000
--- a/akka-cluster/src/multi-jvm/scala/akka/cluster/metrics/remote/RemoteMetricsMultiJvmNode2.opts
+++ /dev/null
@@ -1 +0,0 @@
--Dakka.cluster.nodename=node2 -Dakka.remote.port=9992
diff --git a/akka-cluster/src/multi-jvm/scala/akka/cluster/metrics/remote/RemoteMetricsMultiJvmSpec.scala b/akka-cluster/src/multi-jvm/scala/akka/cluster/metrics/remote/RemoteMetricsMultiJvmSpec.scala
deleted file mode 100644
index 8c4730dc90..0000000000
--- a/akka-cluster/src/multi-jvm/scala/akka/cluster/metrics/remote/RemoteMetricsMultiJvmSpec.scala
+++ /dev/null
@@ -1,133 +0,0 @@
-/**
- * Copyright (C) 2009-2012 Typesafe Inc.
- */
-
-package akka.cluster.metrics.remote
-
-import akka.cluster._
-import akka.actor._
-import Actor._
-import Cluster._
-import akka.dispatch._
-import akka.util.Duration
-import akka.util.duration._
-import akka.cluster.metrics._
-import java.util.concurrent._
-import atomic.AtomicInteger
-
-object RemoteMetricsMultiJvmSpec {
- val NrOfNodes = 2
-
- val MetricsRefreshTimeout = 100.millis
-}
-
-class AllMetricsAvailableMonitor(_id: String, completionLatch: CountDownLatch, clusterSize: Int) extends ClusterMetricsAlterationMonitor {
-
- val id = _id
-
- def reactsOn(allMetrics: Array[NodeMetrics]) = allMetrics.size == clusterSize
-
- def react(allMetrics: Array[NodeMetrics]) = completionLatch.countDown
-
-}
-
-class RemoteMetricsMultiJvmNode1 extends MasterClusterTestNode {
-
- import RemoteMetricsMultiJvmSpec._
-
- val testNodes = NrOfNodes
-
- "Metrics manager" must {
- "provide metrics of all nodes in the cluster" in {
-
- val allMetricsAvaiable = new CountDownLatch(1)
-
- node.metricsManager.refreshTimeout = MetricsRefreshTimeout
- node.metricsManager.addMonitor(new AllMetricsAvailableMonitor("all-metrics-available", allMetricsAvaiable, NrOfNodes))
-
- LocalCluster.barrier("node-start", NrOfNodes).await()
-
- allMetricsAvaiable.await()
-
- LocalCluster.barrier("check-all-remote-metrics", NrOfNodes) {
- node.metricsManager.getAllMetrics.size must be(2)
- }
-
- val cachedMetrics = node.metricsManager.getMetrics("node2")
- val metricsFromZnode = node.metricsManager.getMetrics("node2", false)
-
- LocalCluster.barrier("check-single-remote-metrics", NrOfNodes) {
- cachedMetrics must not be (null)
- metricsFromZnode must not be (null)
- }
-
- Thread sleep MetricsRefreshTimeout.toMillis
-
- LocalCluster.barrier("remote-metrics-is-updated", NrOfNodes) {
- node.metricsManager.getMetrics("node2") must not be (cachedMetrics)
- node.metricsManager.getMetrics("node2", false) must not be (metricsFromZnode)
- }
-
- val someMetricsGone = new CountDownLatch(1)
- node.metricsManager.addMonitor(new AllMetricsAvailableMonitor("some-metrics-gone", someMetricsGone, 1))
-
- LocalCluster.barrier("some-nodes-leave", NrOfNodes).await()
-
- someMetricsGone.await(10, TimeUnit.SECONDS) must be(true)
-
- node.metricsManager.getMetrics("node2") must be(None)
- node.metricsManager.getMetrics("node2", false) must be(None)
- node.metricsManager.getAllMetrics.size must be(1)
-
- node.shutdown()
-
- }
- }
-
-}
-
-class RemoteMetricsMultiJvmNode2 extends ClusterTestNode {
-
- import RemoteMetricsMultiJvmSpec._
-
- val testNodes = NrOfNodes
-
- "Metrics manager" must {
- "provide metrics of all nodes in the cluster" in {
-
- val allMetricsAvaiable = new CountDownLatch(1)
-
- node.metricsManager.refreshTimeout = MetricsRefreshTimeout
- node.metricsManager.addMonitor(new AllMetricsAvailableMonitor("all-metrics-available", allMetricsAvaiable, NrOfNodes))
-
- LocalCluster.barrier("node-start", NrOfNodes).await()
-
- allMetricsAvaiable.await()
-
- LocalCluster.barrier("check-all-remote-metrics", NrOfNodes) {
- node.metricsManager.getAllMetrics.size must be(2)
- }
-
- val cachedMetrics = node.metricsManager.getMetrics("node1")
- val metricsFromZnode = node.metricsManager.getMetrics("node1", false)
-
- LocalCluster.barrier("check-single-remote-metrics", NrOfNodes) {
- cachedMetrics must not be (null)
- metricsFromZnode must not be (null)
- }
-
- Thread sleep MetricsRefreshTimeout.toMillis
-
- LocalCluster.barrier("remote-metrics-is-updated", NrOfNodes) {
- node.metricsManager.getMetrics("node1") must not be (cachedMetrics)
- node.metricsManager.getMetrics("node1", false) must not be (metricsFromZnode)
- }
-
- LocalCluster.barrier("some-nodes-leave", NrOfNodes) {
- node.shutdown()
- }
- }
- }
-
-}
-
diff --git a/akka-cluster/src/multi-jvm/scala/akka/cluster/migration/MigrationExplicitMultiJvmNode1.conf b/akka-cluster/src/multi-jvm/scala/akka/cluster/migration/MigrationExplicitMultiJvmNode1.conf
deleted file mode 100644
index 2f642a20f0..0000000000
--- a/akka-cluster/src/multi-jvm/scala/akka/cluster/migration/MigrationExplicitMultiJvmNode1.conf
+++ /dev/null
@@ -1,2 +0,0 @@
-akka.enabled-modules = ["cluster"]
-akka.event-handler-level = "WARNING"
diff --git a/akka-cluster/src/multi-jvm/scala/akka/cluster/migration/MigrationExplicitMultiJvmNode1.opts b/akka-cluster/src/multi-jvm/scala/akka/cluster/migration/MigrationExplicitMultiJvmNode1.opts
deleted file mode 100644
index dc86c1c9c0..0000000000
--- a/akka-cluster/src/multi-jvm/scala/akka/cluster/migration/MigrationExplicitMultiJvmNode1.opts
+++ /dev/null
@@ -1 +0,0 @@
--Dakka.cluster.nodename=node1 -Dakka.remote.port=9991
diff --git a/akka-cluster/src/multi-jvm/scala/akka/cluster/migration/MigrationExplicitMultiJvmNode2.conf b/akka-cluster/src/multi-jvm/scala/akka/cluster/migration/MigrationExplicitMultiJvmNode2.conf
deleted file mode 100644
index 2f642a20f0..0000000000
--- a/akka-cluster/src/multi-jvm/scala/akka/cluster/migration/MigrationExplicitMultiJvmNode2.conf
+++ /dev/null
@@ -1,2 +0,0 @@
-akka.enabled-modules = ["cluster"]
-akka.event-handler-level = "WARNING"
diff --git a/akka-cluster/src/multi-jvm/scala/akka/cluster/migration/MigrationExplicitMultiJvmNode2.opts b/akka-cluster/src/multi-jvm/scala/akka/cluster/migration/MigrationExplicitMultiJvmNode2.opts
deleted file mode 100644
index bb140941a5..0000000000
--- a/akka-cluster/src/multi-jvm/scala/akka/cluster/migration/MigrationExplicitMultiJvmNode2.opts
+++ /dev/null
@@ -1 +0,0 @@
--Dakka.cluster.nodename=node2 -Dakka.remote.port=9992
diff --git a/akka-cluster/src/multi-jvm/scala/akka/cluster/migration/MigrationExplicitMultiJvmSpec.scala b/akka-cluster/src/multi-jvm/scala/akka/cluster/migration/MigrationExplicitMultiJvmSpec.scala
deleted file mode 100644
index 7dfdec2f7c..0000000000
--- a/akka-cluster/src/multi-jvm/scala/akka/cluster/migration/MigrationExplicitMultiJvmSpec.scala
+++ /dev/null
@@ -1,112 +0,0 @@
-/**
- * Copyright (C) 2009-2012 Typesafe Inc.
- *
- *
- * package akka.cluster.migration
- *
- * import org.scalatest.WordSpec
- * import org.scalatest.matchers.MustMatchers
- * import org.scalatest.BeforeAndAfterAll
- *
- * import akka.actor._
- * import Actor._
- * import akka.cluster._
- * import ChangeListener._
- * import Cluster._
- * import akka.config.Config
- * import akka.serialization.Serialization
- * import akka.cluster.LocalCluster._
- *
- * import java.util.concurrent._
- *
- * object MigrationExplicitMultiJvmSpec {
- * var NrOfNodes = 2
- *
- * class HelloWorld extends Actor with Serializable {
- * def receive = {
- * case "Hello" ⇒
- * reply("World from node [" + Config.nodename + "]")
- * }
- * }
- * }
- *
- * class MigrationExplicitMultiJvmNode1 extends MasterClusterTestNode {
- * import MigrationExplicitMultiJvmSpec._
- *
- * val testNodes = NrOfNodes
- *
- * "A cluster" must {
- *
- * "be able to migrate an actor from one node to another" in {
- *
- * barrier("start-node-1", NrOfNodes) {
- * Cluster.node.start()
- * }
- *
- * barrier("start-node-2", NrOfNodes) {
- * }
- *
- * barrier("store-1-in-node-1", NrOfNodes) {
- * val serializer = Serialization.serializerFor(classOf[HelloWorld]).fold(x ⇒ fail("No serializer found"), s ⇒ s)
- * node.store("hello-world", classOf[HelloWorld], serializer)
- * }
- *
- * barrier("use-1-in-node-2", NrOfNodes) {
- * }
- *
- * barrier("migrate-from-node2-to-node1", NrOfNodes) {
- * }
- *
- * barrier("check-actor-is-moved-to-node1", NrOfNodes) {
- * node.isInUseOnNode("hello-world") must be(true)
- *
- * val actorRef = Actor.registry.local.actorFor("hello-world").getOrElse(fail("Actor should have been in the local actor registry"))
- * actorRef.address must be("hello-world")
- * (actorRef ? "Hello").as[String].get must be("World from node [node1]")
- * }
- *
- * node.shutdown()
- * }
- * }
- * }
- *
- * class MigrationExplicitMultiJvmNode2 extends ClusterTestNode {
- * import MigrationExplicitMultiJvmSpec._
- *
- * "A cluster" must {
- *
- * "be able to migrate an actor from one node to another" in {
- *
- * barrier("start-node-1", NrOfNodes) {
- * }
- *
- * barrier("start-node-2", NrOfNodes) {
- * Cluster.node.start()
- * }
- *
- * barrier("store-1-in-node-1", NrOfNodes) {
- * }
- *
- * barrier("use-1-in-node-2", NrOfNodes) {
- * val actorOrOption = node.use("hello-world")
- * if (actorOrOption.isEmpty) fail("Actor could not be retrieved")
- *
- * val actorRef = actorOrOption.get
- * actorRef.address must be("hello-world")
- *
- * (actorRef ? "Hello").as[String].get must be("World from node [node2]")
- * }
- *
- * barrier("migrate-from-node2-to-node1", NrOfNodes) {
- * node.migrate(NodeAddress(node.nodeAddress.clusterName, "node1"), "hello-world")
- * Thread.sleep(2000)
- * }
- *
- * barrier("check-actor-is-moved-to-node1", NrOfNodes) {
- * }
- *
- * node.shutdown()
- * }
- * }
- * }
- */
diff --git a/akka-cluster/src/multi-jvm/scala/akka/cluster/reflogic/ClusterActorRefCleanupMultiJvmNode1.conf b/akka-cluster/src/multi-jvm/scala/akka/cluster/reflogic/ClusterActorRefCleanupMultiJvmNode1.conf
deleted file mode 100644
index f510c5253c..0000000000
--- a/akka-cluster/src/multi-jvm/scala/akka/cluster/reflogic/ClusterActorRefCleanupMultiJvmNode1.conf
+++ /dev/null
@@ -1,6 +0,0 @@
-akka.enabled-modules = ["cluster"]
-akka.event-handlers = ["akka.testkit.TestEventListener"]
-akka.event-handler-level = "WARNING"
-akka.actor.deployment.service-test.router = "round-robin"
-akka.actor.deployment.service-test.cluster.preferred-nodes = ["node:node2","node:node3"]
-akka.actor.deployment.service-test.nr-of-instances = 2
\ No newline at end of file
diff --git a/akka-cluster/src/multi-jvm/scala/akka/cluster/reflogic/ClusterActorRefCleanupMultiJvmNode1.opts b/akka-cluster/src/multi-jvm/scala/akka/cluster/reflogic/ClusterActorRefCleanupMultiJvmNode1.opts
deleted file mode 100644
index dc86c1c9c0..0000000000
--- a/akka-cluster/src/multi-jvm/scala/akka/cluster/reflogic/ClusterActorRefCleanupMultiJvmNode1.opts
+++ /dev/null
@@ -1 +0,0 @@
--Dakka.cluster.nodename=node1 -Dakka.remote.port=9991
diff --git a/akka-cluster/src/multi-jvm/scala/akka/cluster/reflogic/ClusterActorRefCleanupMultiJvmNode2.conf b/akka-cluster/src/multi-jvm/scala/akka/cluster/reflogic/ClusterActorRefCleanupMultiJvmNode2.conf
deleted file mode 100644
index b7c3e53e6f..0000000000
--- a/akka-cluster/src/multi-jvm/scala/akka/cluster/reflogic/ClusterActorRefCleanupMultiJvmNode2.conf
+++ /dev/null
@@ -1,5 +0,0 @@
-akka.enabled-modules = ["cluster"]
-akka.event-handler-level = "WARNING"
-akka.actor.deployment.service-test.router = "round-robin"
-akka.actor.deployment.service-test.cluster.preferred-nodes = ["node:node2","node:node3"]
-akka.actor.deployment.service-test.nr-of-instances = 2
diff --git a/akka-cluster/src/multi-jvm/scala/akka/cluster/reflogic/ClusterActorRefCleanupMultiJvmNode2.opts b/akka-cluster/src/multi-jvm/scala/akka/cluster/reflogic/ClusterActorRefCleanupMultiJvmNode2.opts
deleted file mode 100644
index bb140941a5..0000000000
--- a/akka-cluster/src/multi-jvm/scala/akka/cluster/reflogic/ClusterActorRefCleanupMultiJvmNode2.opts
+++ /dev/null
@@ -1 +0,0 @@
--Dakka.cluster.nodename=node2 -Dakka.remote.port=9992
diff --git a/akka-cluster/src/multi-jvm/scala/akka/cluster/reflogic/ClusterActorRefCleanupMultiJvmNode3.conf b/akka-cluster/src/multi-jvm/scala/akka/cluster/reflogic/ClusterActorRefCleanupMultiJvmNode3.conf
deleted file mode 100644
index b7c3e53e6f..0000000000
--- a/akka-cluster/src/multi-jvm/scala/akka/cluster/reflogic/ClusterActorRefCleanupMultiJvmNode3.conf
+++ /dev/null
@@ -1,5 +0,0 @@
-akka.enabled-modules = ["cluster"]
-akka.event-handler-level = "WARNING"
-akka.actor.deployment.service-test.router = "round-robin"
-akka.actor.deployment.service-test.cluster.preferred-nodes = ["node:node2","node:node3"]
-akka.actor.deployment.service-test.nr-of-instances = 2
diff --git a/akka-cluster/src/multi-jvm/scala/akka/cluster/reflogic/ClusterActorRefCleanupMultiJvmNode3.opts b/akka-cluster/src/multi-jvm/scala/akka/cluster/reflogic/ClusterActorRefCleanupMultiJvmNode3.opts
deleted file mode 100644
index 089e3b7776..0000000000
--- a/akka-cluster/src/multi-jvm/scala/akka/cluster/reflogic/ClusterActorRefCleanupMultiJvmNode3.opts
+++ /dev/null
@@ -1 +0,0 @@
--Dakka.cluster.nodename=node3 -Dakka.remote.port=9993
diff --git a/akka-cluster/src/multi-jvm/scala/akka/cluster/reflogic/ClusterActorRefCleanupMultiJvmSpec.scala b/akka-cluster/src/multi-jvm/scala/akka/cluster/reflogic/ClusterActorRefCleanupMultiJvmSpec.scala
deleted file mode 100644
index 98d2aaf394..0000000000
--- a/akka-cluster/src/multi-jvm/scala/akka/cluster/reflogic/ClusterActorRefCleanupMultiJvmSpec.scala
+++ /dev/null
@@ -1,154 +0,0 @@
-/*
- * Copyright (C) 2009-2012 Typesafe Inc.
- */
-
-package akka.cluster.reflogic
-
-import akka.cluster._
-import akka.cluster.Cluster._
-import akka.actor.Actor
-import akka.event.EventHandler
-import akka.testkit.{ EventFilter, TestEvent }
-import akka.routing.RoutingException
-import java.net.ConnectException
-import java.nio.channels.{ ClosedChannelException, NotYetConnectedException }
-import akka.cluster.LocalCluster._
-
-object ClusterActorRefCleanupMultiJvmSpec {
-
- val NrOfNodes = 3
-
- class TestActor extends Actor with Serializable {
- def receive = {
- case _ ⇒ {}
- }
- }
-
-}
-
-class ClusterActorRefCleanupMultiJvmNode1 extends MasterClusterTestNode {
-
- import ClusterActorRefCleanupMultiJvmSpec._
-
- val testNodes = NrOfNodes
-
- "ClusterActorRef" must {
- "cleanup itself" ignore {
- Cluster.node.start()
- barrier("awaitStarted", NrOfNodes).await()
-
- val ref = Actor.actorOf(Props[ClusterActorRefCleanupMultiJvmSpec.TestActor]("service-test")
-
- ref.isInstanceOf[ClusterActorRef] must be(true)
-
- val clusteredRef = ref.asInstanceOf[ClusterActorRef]
-
- barrier("awaitActorCreated", NrOfNodes).await()
-
- //verify that all remote actors are there.
- clusteredRef.nrOfConnections must be(2)
-
- // ignore exceptions from killing nodes
- val ignoreExceptions = Seq(
- EventFilter[ClosedChannelException],
- EventFilter[NotYetConnectedException],
- EventFilter[RoutingException],
- EventFilter[ConnectException])
-
- EventHandler.notify(TestEvent.Mute(ignoreExceptions))
-
- //just some waiting to make sure that the node has died.
- Thread.sleep(5000)
-
- //send some request, this should trigger the cleanup
- try {
- clusteredRef ! "hello"
- clusteredRef ! "hello"
- } catch {
- case e: ClosedChannelException ⇒
- case e: NotYetConnectedException ⇒
- case e: RoutingException ⇒
- }
-
- barrier("node-3-dead", NrOfNodes - 1).await()
-
- //since the call to the node failed, the node must have been removed from the list.
- clusteredRef.nrOfConnections must be(1)
-
- //just some waiting to make sure that the node has died.
- Thread.sleep(5000)
-
- //trigger the cleanup.
- try {
- clusteredRef ! "hello"
- clusteredRef ! "hello"
- } catch {
- case e: ClosedChannelException ⇒
- case e: NotYetConnectedException ⇒
- case e: RoutingException ⇒
- }
-
- //now there must not be any remaining connections after the dead of the last actor.
- clusteredRef.nrOfConnections must be(0)
-
- //and lets make sure we now get the correct exception if we try to use the ref.
- intercept[RoutingException] {
- clusteredRef ! "Hello"
- }
-
- node.shutdown()
- }
- }
-}
-
-class ClusterActorRefCleanupMultiJvmNode2 extends ClusterTestNode {
-
- import ClusterActorRefCleanupMultiJvmSpec._
-
- val testNodes = NrOfNodes
-
- //we are only using the nodes for their capacity, not for testing on this node itself.
- "___" must {
- "___" ignore {
- Runtime.getRuntime.addShutdownHook(new Thread() {
- override def run() {
- ClusterTestNode.exit(classOf[ClusterActorRefCleanupMultiJvmNode2].getName)
- }
- })
-
- Cluster.node.start()
- barrier("awaitStarted", NrOfNodes).await()
-
- barrier("awaitActorCreated", NrOfNodes).await()
-
- barrier("node-3-dead", NrOfNodes - 1).await()
-
- System.exit(0)
- }
- }
-}
-
-class ClusterActorRefCleanupMultiJvmNode3 extends ClusterTestNode {
-
- import ClusterActorRefCleanupMultiJvmSpec._
-
- val testNodes = NrOfNodes
-
- //we are only using the nodes for their capacity, not for testing on this node itself.
- "___" must {
- "___" ignore {
- Runtime.getRuntime.addShutdownHook(new Thread() {
- override def run() {
- ClusterTestNode.exit(classOf[ClusterActorRefCleanupMultiJvmNode3].getName)
- }
- })
-
- Cluster.node.start()
- barrier("awaitStarted", NrOfNodes).await()
-
- barrier("awaitActorCreated", NrOfNodes).await()
-
- System.exit(0)
- }
- }
-}
diff --git a/akka-cluster/src/multi-jvm/scala/akka/cluster/replication/transactionlog/writebehind/nosnapshot/ReplicationTransactionLogWriteBehindNoSnapshotMultiJvmNode1.conf b/akka-cluster/src/multi-jvm/scala/akka/cluster/replication/transactionlog/writebehind/nosnapshot/ReplicationTransactionLogWriteBehindNoSnapshotMultiJvmNode1.conf
deleted file mode 100644
index dca432f404..0000000000
--- a/akka-cluster/src/multi-jvm/scala/akka/cluster/replication/transactionlog/writebehind/nosnapshot/ReplicationTransactionLogWriteBehindNoSnapshotMultiJvmNode1.conf
+++ /dev/null
@@ -1,7 +0,0 @@
-akka.enabled-modules = ["cluster"]
-akka.event-handler-level = "WARNING"
-akka.actor.deployment.hello-world-write-behind-nosnapshot.router = "direct"
-akka.actor.deployment.hello-world-write-behind-nosnapshot.nr-of-instances = 1
-akka.actor.deployment.hello-world-write-behind-nosnapshot.cluster.replication.storage = "transaction-log"
-akka.actor.deployment.hello-world-write-behind-nosnapshot.cluster.replication.strategy = "write-behind"
-akka.cluster.replication.snapshot-frequency = 1000
diff --git a/akka-cluster/src/multi-jvm/scala/akka/cluster/replication/transactionlog/writebehind/nosnapshot/ReplicationTransactionLogWriteBehindNoSnapshotMultiJvmNode1.opts b/akka-cluster/src/multi-jvm/scala/akka/cluster/replication/transactionlog/writebehind/nosnapshot/ReplicationTransactionLogWriteBehindNoSnapshotMultiJvmNode1.opts
deleted file mode 100644
index dc86c1c9c0..0000000000
--- a/akka-cluster/src/multi-jvm/scala/akka/cluster/replication/transactionlog/writebehind/nosnapshot/ReplicationTransactionLogWriteBehindNoSnapshotMultiJvmNode1.opts
+++ /dev/null
@@ -1 +0,0 @@
--Dakka.cluster.nodename=node1 -Dakka.remote.port=9991
diff --git a/akka-cluster/src/multi-jvm/scala/akka/cluster/replication/transactionlog/writebehind/nosnapshot/ReplicationTransactionLogWriteBehindNoSnapshotMultiJvmNode2.conf b/akka-cluster/src/multi-jvm/scala/akka/cluster/replication/transactionlog/writebehind/nosnapshot/ReplicationTransactionLogWriteBehindNoSnapshotMultiJvmNode2.conf
deleted file mode 100644
index dca432f404..0000000000
--- a/akka-cluster/src/multi-jvm/scala/akka/cluster/replication/transactionlog/writebehind/nosnapshot/ReplicationTransactionLogWriteBehindNoSnapshotMultiJvmNode2.conf
+++ /dev/null
@@ -1,7 +0,0 @@
-akka.enabled-modules = ["cluster"]
-akka.event-handler-level = "WARNING"
-akka.actor.deployment.hello-world-write-behind-nosnapshot.router = "direct"
-akka.actor.deployment.hello-world-write-behind-nosnapshot.nr-of-instances = 1
-akka.actor.deployment.hello-world-write-behind-nosnapshot.cluster.replication.storage = "transaction-log"
-akka.actor.deployment.hello-world-write-behind-nosnapshot.cluster.replication.strategy = "write-behind"
-akka.cluster.replication.snapshot-frequency = 1000
diff --git a/akka-cluster/src/multi-jvm/scala/akka/cluster/replication/transactionlog/writebehind/nosnapshot/ReplicationTransactionLogWriteBehindNoSnapshotMultiJvmNode2.opts b/akka-cluster/src/multi-jvm/scala/akka/cluster/replication/transactionlog/writebehind/nosnapshot/ReplicationTransactionLogWriteBehindNoSnapshotMultiJvmNode2.opts
deleted file mode 100644
index bb140941a5..0000000000
--- a/akka-cluster/src/multi-jvm/scala/akka/cluster/replication/transactionlog/writebehind/nosnapshot/ReplicationTransactionLogWriteBehindNoSnapshotMultiJvmNode2.opts
+++ /dev/null
@@ -1 +0,0 @@
--Dakka.cluster.nodename=node2 -Dakka.remote.port=9992
diff --git a/akka-cluster/src/multi-jvm/scala/akka/cluster/replication/transactionlog/writebehind/nosnapshot/ReplicationTransactionLogWriteBehindNoSnapshotMultiJvmSpec.scala b/akka-cluster/src/multi-jvm/scala/akka/cluster/replication/transactionlog/writebehind/nosnapshot/ReplicationTransactionLogWriteBehindNoSnapshotMultiJvmSpec.scala
deleted file mode 100644
index a90d26ad8d..0000000000
--- a/akka-cluster/src/multi-jvm/scala/akka/cluster/replication/transactionlog/writebehind/nosnapshot/ReplicationTransactionLogWriteBehindNoSnapshotMultiJvmSpec.scala
+++ /dev/null
@@ -1,99 +0,0 @@
-/*
- * Copyright (C) 2009-2012 Typesafe Inc.
- */
-
-// package akka.cluster.replication.transactionlog.writebehind.nosnapshot
-
-// import akka.actor._
-// import akka.cluster._
-// import Cluster._
-// import akka.config.Config
-// import akka.cluster.LocalCluster._
-
-// object ReplicationTransactionLogWriteBehindNoSnapshotMultiJvmSpec {
-// var NrOfNodes = 2
-
-// sealed trait TransactionLogMessage extends Serializable
-// case class Count(nr: Int) extends TransactionLogMessage
-// case class Log(full: String) extends TransactionLogMessage
-// case object GetLog extends TransactionLogMessage
-
-// class HelloWorld extends Actor with Serializable {
-// var log = ""
-// def receive = {
-// case Count(nr) ⇒
-// log += nr.toString
-// reply("World from node [" + Config.nodename + "]")
-// case GetLog ⇒
-// reply(Log(log))
-// }
-// }
-// }
-
-// class ReplicationTransactionLogWriteBehindNoSnapshotMultiJvmNode1 extends ClusterTestNode {
-// import ReplicationTransactionLogWriteBehindNoSnapshotMultiJvmSpec._
-
-// "A cluster" must {
-
-// "be able to replicate an actor with a transaction log and replay transaction log after actor migration" ignore {
-
-// barrier("start-node1", NrOfNodes) {
-// Cluster.node.start()
-// }
-
-// barrier("create-actor-on-node1", NrOfNodes) {
-// val actorRef = Actor.actorOf(Props[HelloWorld]("hello-world-write-behind-nosnapshot")
-// // node.isInUseOnNode("hello-world") must be(true)
-// actorRef.address must be("hello-world-write-behind-nosnapshot")
-// for (i ← 0 until 10) {
-// (actorRef ? Count(i)).as[String] must be(Some("World from node [node1]"))
-// }
-// }
-
-// barrier("start-node2", NrOfNodes).await()
-
-// node.shutdown()
-// }
-// }
-// }
-
-// class ReplicationTransactionLogWriteBehindNoSnapshotMultiJvmNode2 extends MasterClusterTestNode {
-// import ReplicationTransactionLogWriteBehindNoSnapshotMultiJvmSpec._
-
-// val testNodes = NrOfNodes
-
-// "A cluster" must {
-
-// "be able to replicate an actor with a transaction log and replay transaction log after actor migration" ignore {
-
-// barrier("start-node1", NrOfNodes).await()
-
-// barrier("create-actor-on-node1", NrOfNodes).await()
-
-// barrier("start-node2", NrOfNodes) {
-// Cluster.node.start()
-// }
-
-// Thread.sleep(5000) // wait for fail-over from node1 to node2
-
-// barrier("check-fail-over-to-node2", NrOfNodes - 1) {
-// // both remaining nodes should now have the replica
-// node.isInUseOnNode("hello-world-write-behind-nosnapshot") must be(true)
-// val actorRef = Actor.registry.local.actorFor("hello-world-write-behind-nosnapshot").getOrElse(fail("Actor should have been in the local actor registry"))
-// actorRef.address must be("hello-world-write-behind-nosnapshot")
-// (actorRef ? GetLog).as[Log].get must be(Log("0123456789"))
-// }
-
-// node.shutdown()
-// }
-// }
-
-// override def onReady() {
-// LocalBookKeeperEnsemble.start()
-// }
-
-// override def onShutdown() {
-// TransactionLog.shutdown()
-// LocalBookKeeperEnsemble.shutdown()
-// }
-// }
diff --git a/akka-cluster/src/multi-jvm/scala/akka/cluster/replication/transactionlog/writebehind/snapshot/ReplicationTransactionLogWriteBehindSnapshotMultiJvmNode1.conf b/akka-cluster/src/multi-jvm/scala/akka/cluster/replication/transactionlog/writebehind/snapshot/ReplicationTransactionLogWriteBehindSnapshotMultiJvmNode1.conf
deleted file mode 100644
index a3ec6ec2c3..0000000000
--- a/akka-cluster/src/multi-jvm/scala/akka/cluster/replication/transactionlog/writebehind/snapshot/ReplicationTransactionLogWriteBehindSnapshotMultiJvmNode1.conf
+++ /dev/null
@@ -1,7 +0,0 @@
-akka.enabled-modules = ["cluster"]
-akka.event-handler-level = "WARNING"
-akka.actor.deployment.hello-world.router = "direct"
-akka.actor.deployment.hello-world-write-behind-snapshot.nr-of-instances = 1
-akka.actor.deployment.hello-world-write-behind-snapshot.cluster.replication.storage = "transaction-log"
-akka.actor.deployment.hello-world-write-behind-snapshot.cluster.replication.strategy = "write-behind"
-akka.cluster.replication.snapshot-frequency = 7
diff --git a/akka-cluster/src/multi-jvm/scala/akka/cluster/replication/transactionlog/writebehind/snapshot/ReplicationTransactionLogWriteBehindSnapshotMultiJvmNode1.opts b/akka-cluster/src/multi-jvm/scala/akka/cluster/replication/transactionlog/writebehind/snapshot/ReplicationTransactionLogWriteBehindSnapshotMultiJvmNode1.opts
deleted file mode 100644
index dc86c1c9c0..0000000000
--- a/akka-cluster/src/multi-jvm/scala/akka/cluster/replication/transactionlog/writebehind/snapshot/ReplicationTransactionLogWriteBehindSnapshotMultiJvmNode1.opts
+++ /dev/null
@@ -1 +0,0 @@
--Dakka.cluster.nodename=node1 -Dakka.remote.port=9991
diff --git a/akka-cluster/src/multi-jvm/scala/akka/cluster/replication/transactionlog/writebehind/snapshot/ReplicationTransactionLogWriteBehindSnapshotMultiJvmNode2.conf b/akka-cluster/src/multi-jvm/scala/akka/cluster/replication/transactionlog/writebehind/snapshot/ReplicationTransactionLogWriteBehindSnapshotMultiJvmNode2.conf
deleted file mode 100644
index a3ec6ec2c3..0000000000
--- a/akka-cluster/src/multi-jvm/scala/akka/cluster/replication/transactionlog/writebehind/snapshot/ReplicationTransactionLogWriteBehindSnapshotMultiJvmNode2.conf
+++ /dev/null
@@ -1,7 +0,0 @@
-akka.enabled-modules = ["cluster"]
-akka.event-handler-level = "WARNING"
-akka.actor.deployment.hello-world.router = "direct"
-akka.actor.deployment.hello-world-write-behind-snapshot.nr-of-instances = 1
-akka.actor.deployment.hello-world-write-behind-snapshot.cluster.replication.storage = "transaction-log"
-akka.actor.deployment.hello-world-write-behind-snapshot.cluster.replication.strategy = "write-behind"
-akka.cluster.replication.snapshot-frequency = 7
diff --git a/akka-cluster/src/multi-jvm/scala/akka/cluster/replication/transactionlog/writebehind/snapshot/ReplicationTransactionLogWriteBehindSnapshotMultiJvmNode2.opts b/akka-cluster/src/multi-jvm/scala/akka/cluster/replication/transactionlog/writebehind/snapshot/ReplicationTransactionLogWriteBehindSnapshotMultiJvmNode2.opts
deleted file mode 100644
index bb140941a5..0000000000
--- a/akka-cluster/src/multi-jvm/scala/akka/cluster/replication/transactionlog/writebehind/snapshot/ReplicationTransactionLogWriteBehindSnapshotMultiJvmNode2.opts
+++ /dev/null
@@ -1 +0,0 @@
--Dakka.cluster.nodename=node2 -Dakka.remote.port=9992
diff --git a/akka-cluster/src/multi-jvm/scala/akka/cluster/replication/transactionlog/writebehind/snapshot/ReplicationTransactionLogWriteBehindSnapshotMultiJvmSpec.scala b/akka-cluster/src/multi-jvm/scala/akka/cluster/replication/transactionlog/writebehind/snapshot/ReplicationTransactionLogWriteBehindSnapshotMultiJvmSpec.scala
deleted file mode 100644
index fde113080e..0000000000
--- a/akka-cluster/src/multi-jvm/scala/akka/cluster/replication/transactionlog/writebehind/snapshot/ReplicationTransactionLogWriteBehindSnapshotMultiJvmSpec.scala
+++ /dev/null
@@ -1,118 +0,0 @@
-/*
- * Copyright (C) 2009-2012 Typesafe Inc.
- */
-
-// package akka.cluster.replication.transactionlog.writebehind.snapshot
-
-// import akka.actor._
-// import akka.cluster._
-// import Cluster._
-// import akka.config.Config
-// import akka.cluster.LocalCluster._
-
-// object ReplicationTransactionLogWriteBehindSnapshotMultiJvmSpec {
-// var NrOfNodes = 2
-
-// sealed trait TransactionLogMessage extends Serializable
-// case class Count(nr: Int) extends TransactionLogMessage
-// case class Log(full: String) extends TransactionLogMessage
-// case object GetLog extends TransactionLogMessage
-
-// class HelloWorld extends Actor with Serializable {
-// var log = ""
-// //println("Creating HelloWorld log =======> " + log)
-// def receive = {
-// case Count(nr) ⇒
-// log += nr.toString
-// //println("Message to HelloWorld log =======> " + log)
-// reply("World from node [" + Config.nodename + "]")
-// case GetLog ⇒
-// reply(Log(log))
-// }
-// }
-// }
-
-// class ReplicationTransactionLogWriteBehindSnapshotMultiJvmNode1 extends ClusterTestNode {
-// import ReplicationTransactionLogWriteBehindSnapshotMultiJvmSpec._
-
-// "A cluster" must {
-
-// "be able to replicate an actor with a transaction log and replay transaction log after actor migration" ignore {
-
-// barrier("start-node1", NrOfNodes) {
-// Cluster.node.start()
-// }
-
-// barrier("create-actor-on-node1", NrOfNodes) {
-// val actorRef = Actor.actorOf(Props[HelloWorld]("hello-world-write-behind-snapshot")
-// node.isInUseOnNode("hello-world-write-behind-snapshot") must be(true)
-// actorRef.address must be("hello-world-write-behind-snapshot")
-// var counter = 0
-// (actorRef ? Count(counter)).as[String].get must be("World from node [node1]")
-// counter += 1
-// (actorRef ? Count(counter)).as[String].get must be("World from node [node1]")
-// counter += 1
-// (actorRef ? Count(counter)).as[String].get must be("World from node [node1]")
-// counter += 1
-// (actorRef ? Count(counter)).as[String].get must be("World from node [node1]")
-// counter += 1
-// (actorRef ? Count(counter)).as[String].get must be("World from node [node1]")
-// counter += 1
-// (actorRef ? Count(counter)).as[String].get must be("World from node [node1]")
-// counter += 1
-// (actorRef ? Count(counter)).as[String].get must be("World from node [node1]")
-// counter += 1
-// (actorRef ? Count(counter)).as[String].get must be("World from node [node1]")
-// counter += 1
-// (actorRef ? Count(counter)).as[String].get must be("World from node [node1]")
-// counter += 1
-// (actorRef ? Count(counter)).as[String].get must be("World from node [node1]")
-// }
-
-// barrier("start-node2", NrOfNodes).await()
-
-// node.shutdown()
-// }
-// }
-// }
-
-// class ReplicationTransactionLogWriteBehindSnapshotMultiJvmNode2 extends MasterClusterTestNode {
-// import ReplicationTransactionLogWriteBehindSnapshotMultiJvmSpec._
-
-// val testNodes = NrOfNodes
-
-// "A cluster" must {
-
-// "be able to replicate an actor with a transaction log and replay transaction log after actor migration" ignore {
-
-// barrier("start-node1", NrOfNodes).await()
-
-// barrier("create-actor-on-node1", NrOfNodes).await()
-
-// barrier("start-node2", NrOfNodes) {
-// Cluster.node.start()
-// }
-
-// Thread.sleep(5000) // wait for fail-over from node1 to node2
-
-// barrier("check-fail-over-to-node2", NrOfNodes - 1) {
-// // both remaining nodes should now have the replica
-// node.isInUseOnNode("hello-world-write-behind-snapshot") must be(true)
-// val actorRef = Actor.registry.local.actorFor("hello-world-write-behind-snapshot").getOrElse(fail("Actor should have been in the local actor registry"))
-// actorRef.address must be("hello-world-write-behind-snapshot")
-// (actorRef ? GetLog).as[Log].get must be(Log("0123456789"))
-// }
-
-// node.shutdown()
-// }
-// }
-
-// override def onReady() {
-// LocalBookKeeperEnsemble.start()
-// }
-
-// override def onShutdown() {
-// TransactionLog.shutdown()
-// LocalBookKeeperEnsemble.shutdown()
-// }
-// }
diff --git a/akka-cluster/src/multi-jvm/scala/akka/cluster/replication/transactionlog/writethrough/nosnapshot/ReplicationTransactionLogWriteThroughNoSnapshotMultiJvmNode1.conf b/akka-cluster/src/multi-jvm/scala/akka/cluster/replication/transactionlog/writethrough/nosnapshot/ReplicationTransactionLogWriteThroughNoSnapshotMultiJvmNode1.conf
deleted file mode 100644
index 8de04a2eb1..0000000000
--- a/akka-cluster/src/multi-jvm/scala/akka/cluster/replication/transactionlog/writethrough/nosnapshot/ReplicationTransactionLogWriteThroughNoSnapshotMultiJvmNode1.conf
+++ /dev/null
@@ -1,7 +0,0 @@
-akka.enabled-modules = ["cluster"]
-akka.event-handler-level = "DEBUG"
-akka.actor.deployment.hello-world-write-through-nosnapshot.router = "direct"
-akka.actor.deployment.hello-world-write-through-nosnapshot.cluster.replication.storage = "transaction-log"
-akka.actor.deployment.hello-world-write-through-nosnapshot.cluster.replication.strategy = "write-through"
-akka.actor.deployment.service-hello.cluster.preferred-nodes = ["node:node1"]
-akka.cluster.replication.snapshot-frequency = 1000
diff --git a/akka-cluster/src/multi-jvm/scala/akka/cluster/replication/transactionlog/writethrough/nosnapshot/ReplicationTransactionLogWriteThroughNoSnapshotMultiJvmNode1.opts b/akka-cluster/src/multi-jvm/scala/akka/cluster/replication/transactionlog/writethrough/nosnapshot/ReplicationTransactionLogWriteThroughNoSnapshotMultiJvmNode1.opts
deleted file mode 100644
index dc86c1c9c0..0000000000
--- a/akka-cluster/src/multi-jvm/scala/akka/cluster/replication/transactionlog/writethrough/nosnapshot/ReplicationTransactionLogWriteThroughNoSnapshotMultiJvmNode1.opts
+++ /dev/null
@@ -1 +0,0 @@
--Dakka.cluster.nodename=node1 -Dakka.remote.port=9991
diff --git a/akka-cluster/src/multi-jvm/scala/akka/cluster/replication/transactionlog/writethrough/nosnapshot/ReplicationTransactionLogWriteThroughNoSnapshotMultiJvmNode2.conf b/akka-cluster/src/multi-jvm/scala/akka/cluster/replication/transactionlog/writethrough/nosnapshot/ReplicationTransactionLogWriteThroughNoSnapshotMultiJvmNode2.conf
deleted file mode 100644
index 8de04a2eb1..0000000000
--- a/akka-cluster/src/multi-jvm/scala/akka/cluster/replication/transactionlog/writethrough/nosnapshot/ReplicationTransactionLogWriteThroughNoSnapshotMultiJvmNode2.conf
+++ /dev/null
@@ -1,7 +0,0 @@
-akka.enabled-modules = ["cluster"]
-akka.event-handler-level = "DEBUG"
-akka.actor.deployment.hello-world-write-through-nosnapshot.router = "direct"
-akka.actor.deployment.hello-world-write-through-nosnapshot.cluster.replication.storage = "transaction-log"
-akka.actor.deployment.hello-world-write-through-nosnapshot.cluster.replication.strategy = "write-through"
-akka.actor.deployment.service-hello.cluster.preferred-nodes = ["node:node1"]
-akka.cluster.replication.snapshot-frequency = 1000
diff --git a/akka-cluster/src/multi-jvm/scala/akka/cluster/replication/transactionlog/writethrough/nosnapshot/ReplicationTransactionLogWriteThroughNoSnapshotMultiJvmNode2.opts b/akka-cluster/src/multi-jvm/scala/akka/cluster/replication/transactionlog/writethrough/nosnapshot/ReplicationTransactionLogWriteThroughNoSnapshotMultiJvmNode2.opts
deleted file mode 100644
index bb140941a5..0000000000
--- a/akka-cluster/src/multi-jvm/scala/akka/cluster/replication/transactionlog/writethrough/nosnapshot/ReplicationTransactionLogWriteThroughNoSnapshotMultiJvmNode2.opts
+++ /dev/null
@@ -1 +0,0 @@
--Dakka.cluster.nodename=node2 -Dakka.remote.port=9992
diff --git a/akka-cluster/src/multi-jvm/scala/akka/cluster/replication/transactionlog/writethrough/nosnapshot/ReplicationTransactionLogWriteThroughNoSnapshotMultiJvmSpec.scala b/akka-cluster/src/multi-jvm/scala/akka/cluster/replication/transactionlog/writethrough/nosnapshot/ReplicationTransactionLogWriteThroughNoSnapshotMultiJvmSpec.scala
deleted file mode 100644
index c2e6ed678b..0000000000
--- a/akka-cluster/src/multi-jvm/scala/akka/cluster/replication/transactionlog/writethrough/nosnapshot/ReplicationTransactionLogWriteThroughNoSnapshotMultiJvmSpec.scala
+++ /dev/null
@@ -1,99 +0,0 @@
-/*
- * Copyright (C) 2009-2012 Typesafe Inc.
- */
-
-// package akka.cluster.replication.transactionlog.writethrough.nosnapshot
-
-// import akka.actor._
-// import akka.cluster._
-// import Cluster._
-// import akka.config.Config
-// import akka.cluster.LocalCluster._
-
-// object ReplicationTransactionLogWriteThroughNoSnapshotMultiJvmSpec {
-// var NrOfNodes = 2
-
-// sealed trait TransactionLogMessage extends Serializable
-// case class Count(nr: Int) extends TransactionLogMessage
-// case class Log(full: String) extends TransactionLogMessage
-// case object GetLog extends TransactionLogMessage
-
-// class HelloWorld extends Actor with Serializable {
-// var log = ""
-// def receive = {
-// case Count(nr) ⇒
-// println("Received number: " + nr + " on " + self.address)
-// log += nr.toString
-// reply("World from node [" + Config.nodename + "]")
-// case GetLog ⇒
-// println("Received getLog on " + uuid)
-// reply(Log(log))
-// }
-// }
-// }
-
-// class ReplicationTransactionLogWriteThroughNoSnapshotMultiJvmNode1 extends ClusterTestNode {
-// import ReplicationTransactionLogWriteThroughNoSnapshotMultiJvmSpec._
-
-// "A cluster" must {
-
-// "be able to replicate an actor with a transaction log and replay transaction log after actor migration" ignore {
-
-// barrier("start-node1", NrOfNodes) {
-// Cluster.node.start()
-// }
-
-// barrier("create-actor-on-node1", NrOfNodes) {
-// val actorRef = Actor.actorOf(Props[HelloWorld]("hello-world-write-through-nosnapshot")
-// actorRef.address must be("hello-world-write-through-nosnapshot")
-// for (i ← 0 until 10)
-// (actorRef ? Count(i)).as[String] must be(Some("World from node [node1]"))
-// }
-
-// barrier("start-node2", NrOfNodes).await()
-
-// node.shutdown()
-// }
-// }
-// }
-
-// class ReplicationTransactionLogWriteThroughNoSnapshotMultiJvmNode2 extends MasterClusterTestNode {
-// import ReplicationTransactionLogWriteThroughNoSnapshotMultiJvmSpec._
-
-// val testNodes = NrOfNodes
-
-// "A cluster" must {
-
-// "be able to replicate an actor with a transaction log and replay transaction log after actor migration" ignore {
-
-// barrier("start-node1", NrOfNodes).await()
-
-// barrier("create-actor-on-node1", NrOfNodes).await()
-
-// barrier("start-node2", NrOfNodes) {
-// Cluster.node.start()
-// }
-
-// Thread.sleep(5000) // wait for fail-over from node1 to node2
-
-// barrier("check-fail-over-to-node2", NrOfNodes - 1) {
-// // both remaining nodes should now have the replica
-// node.isInUseOnNode("hello-world-write-through-nosnapshot") must be(true)
-// val actorRef = Actor.registry.local.actorFor("hello-world-write-through-nosnapshot").getOrElse(fail("Actor should have been in the local actor registry"))
-// actorRef.address must be("hello-world-write-through-nosnapshot")
-// (actorRef ? GetLog).as[Log].get must be(Log("0123456789"))
-// }
-
-// node.shutdown()
-// }
-// }
-
-// override def onReady() {
-// LocalBookKeeperEnsemble.start()
-// }
-
-// override def onShutdown() {
-// TransactionLog.shutdown()
-// LocalBookKeeperEnsemble.shutdown()
-// }
-// }
diff --git a/akka-cluster/src/multi-jvm/scala/akka/cluster/replication/transactionlog/writethrough/snapshot/ReplicationTransactionLogWriteThroughSnapshotMultiJvmNode1.conf b/akka-cluster/src/multi-jvm/scala/akka/cluster/replication/transactionlog/writethrough/snapshot/ReplicationTransactionLogWriteThroughSnapshotMultiJvmNode1.conf
deleted file mode 100644
index 82d6dc18ce..0000000000
--- a/akka-cluster/src/multi-jvm/scala/akka/cluster/replication/transactionlog/writethrough/snapshot/ReplicationTransactionLogWriteThroughSnapshotMultiJvmNode1.conf
+++ /dev/null
@@ -1,7 +0,0 @@
-akka.enabled-modules = ["cluster"]
-akka.event-handler-level = "WARNING"
-akka.actor.deployment.hello-world-write-through-snapshot.router = "direct"
-akka.actor.deployment.hello-world-write-through-snapshot.nr-of-instances = 1
-akka.actor.deployment.hello-world-write-through-snapshot.cluster.replication.storage = "transaction-log"
-akka.actor.deployment.hello-world-write-through-snapshot.cluster.replication.strategy = "write-through"
-akka.cluster.replication.snapshot-frequency = 7
diff --git a/akka-cluster/src/multi-jvm/scala/akka/cluster/replication/transactionlog/writethrough/snapshot/ReplicationTransactionLogWriteThroughSnapshotMultiJvmNode1.opts b/akka-cluster/src/multi-jvm/scala/akka/cluster/replication/transactionlog/writethrough/snapshot/ReplicationTransactionLogWriteThroughSnapshotMultiJvmNode1.opts
deleted file mode 100644
index dc86c1c9c0..0000000000
--- a/akka-cluster/src/multi-jvm/scala/akka/cluster/replication/transactionlog/writethrough/snapshot/ReplicationTransactionLogWriteThroughSnapshotMultiJvmNode1.opts
+++ /dev/null
@@ -1 +0,0 @@
--Dakka.cluster.nodename=node1 -Dakka.remote.port=9991
diff --git a/akka-cluster/src/multi-jvm/scala/akka/cluster/replication/transactionlog/writethrough/snapshot/ReplicationTransactionLogWriteThroughSnapshotMultiJvmNode2.conf b/akka-cluster/src/multi-jvm/scala/akka/cluster/replication/transactionlog/writethrough/snapshot/ReplicationTransactionLogWriteThroughSnapshotMultiJvmNode2.conf
deleted file mode 100644
index 82d6dc18ce..0000000000
--- a/akka-cluster/src/multi-jvm/scala/akka/cluster/replication/transactionlog/writethrough/snapshot/ReplicationTransactionLogWriteThroughSnapshotMultiJvmNode2.conf
+++ /dev/null
@@ -1,7 +0,0 @@
-akka.enabled-modules = ["cluster"]
-akka.event-handler-level = "WARNING"
-akka.actor.deployment.hello-world-write-through-snapshot.router = "direct"
-akka.actor.deployment.hello-world-write-through-snapshot.nr-of-instances = 1
-akka.actor.deployment.hello-world-write-through-snapshot.cluster.replication.storage = "transaction-log"
-akka.actor.deployment.hello-world-write-through-snapshot.cluster.replication.strategy = "write-through"
-akka.cluster.replication.snapshot-frequency = 7
diff --git a/akka-cluster/src/multi-jvm/scala/akka/cluster/replication/transactionlog/writethrough/snapshot/ReplicationTransactionLogWriteThroughSnapshotMultiJvmNode2.opts b/akka-cluster/src/multi-jvm/scala/akka/cluster/replication/transactionlog/writethrough/snapshot/ReplicationTransactionLogWriteThroughSnapshotMultiJvmNode2.opts
deleted file mode 100644
index bb140941a5..0000000000
--- a/akka-cluster/src/multi-jvm/scala/akka/cluster/replication/transactionlog/writethrough/snapshot/ReplicationTransactionLogWriteThroughSnapshotMultiJvmNode2.opts
+++ /dev/null
@@ -1 +0,0 @@
--Dakka.cluster.nodename=node2 -Dakka.remote.port=9992
diff --git a/akka-cluster/src/multi-jvm/scala/akka/cluster/replication/transactionlog/writethrough/snapshot/ReplicationTransactionLogWriteThroughSnapshotMultiJvmSpec.scala b/akka-cluster/src/multi-jvm/scala/akka/cluster/replication/transactionlog/writethrough/snapshot/ReplicationTransactionLogWriteThroughSnapshotMultiJvmSpec.scala
deleted file mode 100644
index 3df29dd510..0000000000
--- a/akka-cluster/src/multi-jvm/scala/akka/cluster/replication/transactionlog/writethrough/snapshot/ReplicationTransactionLogWriteThroughSnapshotMultiJvmSpec.scala
+++ /dev/null
@@ -1,116 +0,0 @@
-/*
- * Copyright (C) 2009-2012 Typesafe Inc.
- */
-
-// package akka.cluster.replication.transactionlog.writethrough.snapshot
-
-// import akka.actor._
-// import akka.cluster._
-// import Cluster._
-// import akka.config.Config
-// import akka.cluster.LocalCluster._
-
-// object ReplicationTransactionLogWriteThroughSnapshotMultiJvmSpec {
-// var NrOfNodes = 2
-
-// sealed trait TransactionLogMessage extends Serializable
-// case class Count(nr: Int) extends TransactionLogMessage
-// case class Log(full: String) extends TransactionLogMessage
-// case object GetLog extends TransactionLogMessage
-
-// class HelloWorld extends Actor with Serializable {
-// var log = ""
-// def receive = {
-// case Count(nr) ⇒
-// log += nr.toString
-// reply("World from node [" + Config.nodename + "]")
-// case GetLog ⇒
-// reply(Log(log))
-// }
-// }
-// }
-
-// class ReplicationTransactionLogWriteThroughSnapshotMultiJvmNode1 extends ClusterTestNode {
-// import ReplicationTransactionLogWriteThroughSnapshotMultiJvmSpec._
-
-// "A cluster" must {
-
-// "be able to replicate an actor with a transaction log and replay transaction log after actor migration" ignore {
-
-// barrier("start-node1", NrOfNodes) {
-// Cluster.node.start()
-// }
-
-// barrier("create-actor-on-node1", NrOfNodes) {
-// val actorRef = Actor.actorOf(Props[HelloWorld]("hello-world-write-through-snapshot")
-// node.isInUseOnNode("hello-world-write-through-snapshot") must be(true)
-// actorRef.address must be("hello-world-write-through-snapshot")
-// var counter = 0
-// (actorRef ? Count(counter)).as[String].get must be("World from node [node1]")
-// counter += 1
-// (actorRef ? Count(counter)).as[String].get must be("World from node [node1]")
-// counter += 1
-// (actorRef ? Count(counter)).as[String].get must be("World from node [node1]")
-// counter += 1
-// (actorRef ? Count(counter)).as[String].get must be("World from node [node1]")
-// counter += 1
-// (actorRef ? Count(counter)).as[String].get must be("World from node [node1]")
-// counter += 1
-// (actorRef ? Count(counter)).as[String].get must be("World from node [node1]")
-// counter += 1
-// (actorRef ? Count(counter)).as[String].get must be("World from node [node1]")
-// counter += 1
-// (actorRef ? Count(counter)).as[String].get must be("World from node [node1]")
-// counter += 1
-// (actorRef ? Count(counter)).as[String].get must be("World from node [node1]")
-// counter += 1
-// (actorRef ? Count(counter)).as[String].get must be("World from node [node1]")
-// }
-
-// barrier("start-node2", NrOfNodes).await()
-
-// node.shutdown()
-// }
-// }
-// }
-
-// class ReplicationTransactionLogWriteThroughSnapshotMultiJvmNode2 extends MasterClusterTestNode {
-// import ReplicationTransactionLogWriteThroughSnapshotMultiJvmSpec._
-
-// val testNodes = NrOfNodes
-
-// "A cluster" must {
-
-// "be able to replicate an actor with a transaction log and replay transaction log after actor migration" ignore {
-
-// barrier("start-node1", NrOfNodes).await()
-
-// barrier("create-actor-on-node1", NrOfNodes).await()
-
-// barrier("start-node2", NrOfNodes) {
-// Cluster.node.start()
-// }
-
-// Thread.sleep(5000) // wait for fail-over from node1 to node2
-
-// barrier("check-fail-over-to-node2", NrOfNodes - 1) {
-// // both remaining nodes should now have the replica
-// node.isInUseOnNode("hello-world-write-through-snapshot") must be(true)
-// val actorRef = Actor.registry.local.actorFor("hello-world-write-through-snapshot").getOrElse(fail("Actor should have been in the local actor registry"))
-// actorRef.address must be("hello-world-write-through-snapshot")
-// (actorRef ? GetLog).as[Log].get must be(Log("0123456789"))
-// }
-
-// node.shutdown()
-// }
-// }
-
-// override def onReady() {
-// LocalBookKeeperEnsemble.start()
-// }
-
-// override def onShutdown() {
-// TransactionLog.shutdown()
-// LocalBookKeeperEnsemble.shutdown()
-// }
-// }
diff --git a/akka-cluster/src/multi-jvm/scala/akka/cluster/routing/direct/failover/DirectRoutingFailoverMultiJvmNode1.conf b/akka-cluster/src/multi-jvm/scala/akka/cluster/routing/direct/failover/DirectRoutingFailoverMultiJvmNode1.conf
deleted file mode 100644
index 7332be6934..0000000000
--- a/akka-cluster/src/multi-jvm/scala/akka/cluster/routing/direct/failover/DirectRoutingFailoverMultiJvmNode1.conf
+++ /dev/null
@@ -1,5 +0,0 @@
-akka.enabled-modules = ["cluster"]
-akka.event-handlers = ["akka.testkit.TestEventListener"]
-akka.event-handler-level = "WARNING"
-akka.actor.deployment.service-hello.router = "direct"
-akka.actor.deployment.service-hello.cluster.preferred-nodes = ["node:node2"]
diff --git a/akka-cluster/src/multi-jvm/scala/akka/cluster/routing/direct/failover/DirectRoutingFailoverMultiJvmNode1.opts b/akka-cluster/src/multi-jvm/scala/akka/cluster/routing/direct/failover/DirectRoutingFailoverMultiJvmNode1.opts
deleted file mode 100644
index dc86c1c9c0..0000000000
--- a/akka-cluster/src/multi-jvm/scala/akka/cluster/routing/direct/failover/DirectRoutingFailoverMultiJvmNode1.opts
+++ /dev/null
@@ -1 +0,0 @@
--Dakka.cluster.nodename=node1 -Dakka.remote.port=9991
diff --git a/akka-cluster/src/multi-jvm/scala/akka/cluster/routing/direct/failover/DirectRoutingFailoverMultiJvmNode2.conf b/akka-cluster/src/multi-jvm/scala/akka/cluster/routing/direct/failover/DirectRoutingFailoverMultiJvmNode2.conf
deleted file mode 100644
index 7332be6934..0000000000
--- a/akka-cluster/src/multi-jvm/scala/akka/cluster/routing/direct/failover/DirectRoutingFailoverMultiJvmNode2.conf
+++ /dev/null
@@ -1,5 +0,0 @@
-akka.enabled-modules = ["cluster"]
-akka.event-handlers = ["akka.testkit.TestEventListener"]
-akka.event-handler-level = "WARNING"
-akka.actor.deployment.service-hello.router = "direct"
-akka.actor.deployment.service-hello.cluster.preferred-nodes = ["node:node2"]
diff --git a/akka-cluster/src/multi-jvm/scala/akka/cluster/routing/direct/failover/DirectRoutingFailoverMultiJvmNode2.opts b/akka-cluster/src/multi-jvm/scala/akka/cluster/routing/direct/failover/DirectRoutingFailoverMultiJvmNode2.opts
deleted file mode 100644
index bb140941a5..0000000000
--- a/akka-cluster/src/multi-jvm/scala/akka/cluster/routing/direct/failover/DirectRoutingFailoverMultiJvmNode2.opts
+++ /dev/null
@@ -1 +0,0 @@
--Dakka.cluster.nodename=node2 -Dakka.remote.port=9992
diff --git a/akka-cluster/src/multi-jvm/scala/akka/cluster/routing/direct/failover/DirectRoutingFailoverMultiJvmSpec.scala b/akka-cluster/src/multi-jvm/scala/akka/cluster/routing/direct/failover/DirectRoutingFailoverMultiJvmSpec.scala
deleted file mode 100644
index 6bc1653836..0000000000
--- a/akka-cluster/src/multi-jvm/scala/akka/cluster/routing/direct/failover/DirectRoutingFailoverMultiJvmSpec.scala
+++ /dev/null
@@ -1,90 +0,0 @@
-package akka.cluster.routing.direct.failover
-
-import akka.config.Config
-import scala.Predef._
-import akka.cluster.{ ClusterActorRef, Cluster, MasterClusterTestNode, ClusterTestNode }
-import akka.actor.{ ActorInitializationException, Actor, ActorRef }
-import akka.util.duration._
-import akka.util.{ Duration, Timer }
-import akka.event.EventHandler
-import akka.testkit.{ EventFilter, TestEvent }
-import java.net.ConnectException
-import java.nio.channels.NotYetConnectedException
-import akka.cluster.LocalCluster
-import akka.dispatch.Await
-
-object DirectRoutingFailoverMultiJvmSpec {
-
- val NrOfNodes = 2
-
- class SomeActor extends Actor with Serializable {
-
- def receive = {
- case "identify" ⇒
- reply(Config.nodename)
- }
- }
-}
-
-class DirectRoutingFailoverMultiJvmNode1 extends MasterClusterTestNode {
-
- import DirectRoutingFailoverMultiJvmSpec._
-
- val testNodes = NrOfNodes
-
- "Direct Router" must {
- "throw exception [ActorInitializationException] upon fail-over" ignore {
-
- val ignoreExceptions = Seq(EventFilter[NotYetConnectedException], EventFilter[ConnectException])
- EventHandler.notify(TestEvent.Mute(ignoreExceptions))
-
- var actor: ActorRef = null
-
- LocalCluster.barrier("node-start", NrOfNodes) {
- Cluster.node.start()
- }
-
- LocalCluster.barrier("actor-creation", NrOfNodes) {
- actor = Actor.actorOf(Props[SomeActor]("service-hello")
- }
-
- LocalCluster.barrier("verify-actor", NrOfNodes) {
- Await.result(actor ? "identify", timeout.duration) must equal("node2")
- }
-
- val timer = Timer(30.seconds, true)
- while (timer.isTicking && !Cluster.node.isInUseOnNode("service-hello")) {}
-
- LocalCluster.barrier("verify-fail-over", NrOfNodes - 1) {
- actor ! "identify" // trigger failure and removal of connection to node2
- intercept[Exception] {
- actor ! "identify" // trigger exception since no more connections
- }
- }
-
- Cluster.node.shutdown()
- }
- }
-}
-
-class DirectRoutingFailoverMultiJvmNode2 extends ClusterTestNode {
-
- import DirectRoutingFailoverMultiJvmSpec._
-
- "___" must {
- "___" ignore {
- LocalCluster.barrier("node-start", NrOfNodes) {
- Cluster.node.start()
- }
-
- LocalCluster.barrier("actor-creation", NrOfNodes).await()
-
- LocalCluster.barrier("verify-actor", NrOfNodes) {
- Cluster.node.isInUseOnNode("service-hello") must be(true)
- }
-
- Cluster.node.shutdown()
- }
- }
-}
-
diff --git a/akka-cluster/src/multi-jvm/scala/akka/cluster/routing/direct/homenode/HomeNode1MultiJvmSpec.scala b/akka-cluster/src/multi-jvm/scala/akka/cluster/routing/direct/homenode/HomeNode1MultiJvmSpec.scala
deleted file mode 100644
index 6ce2219978..0000000000
--- a/akka-cluster/src/multi-jvm/scala/akka/cluster/routing/direct/homenode/HomeNode1MultiJvmSpec.scala
+++ /dev/null
@@ -1,60 +0,0 @@
-package akka.cluster.routing.direct.homenode
-
-import akka.config.Config
-import akka.actor.Actor
-import akka.cluster.{ ClusterTestNode, MasterClusterTestNode, Cluster }
-import Cluster._
-import akka.cluster.LocalCluster._
-
-object HomeNodeMultiJvmSpec {
-
- val NrOfNodes = 2
-
- class SomeActor extends Actor with Serializable {
- def receive = {
- case "identify" ⇒ {
- reply(Config.nodename)
- }
- }
- }
-
-}
-
-class HomeNodeMultiJvmNode1 extends MasterClusterTestNode {
-
- import HomeNodeMultiJvmSpec._
-
- val testNodes = NrOfNodes
-
- "___" must {
- "___" in {
- Cluster.node.start()
- barrier("waiting-for-begin", NrOfNodes).await()
- barrier("waiting-for-end", NrOfNodes).await()
- node.shutdown()
- }
- }
-}
-
-class HomeNodeMultiJvmNode2 extends ClusterTestNode {
-
- import HomeNodeMultiJvmSpec._
-
- "Direct Router: A Direct Router" must {
- "obey 'home-node' config option when instantiated actor in cluster" in {
- Cluster.node.start()
- barrier("waiting-for-begin", NrOfNodes).await()
-
- val actorNode1 = Actor.actorOf(Props[SomeActor]("service-node1")
- val name1 = (actorNode1 ? "identify").get.asInstanceOf[String]
- name1 must equal("node1")
-
- val actorNode2 = Actor.actorOf(Props[SomeActor]("service-node2")
- val name2 = (actorNode2 ? "identify").get.asInstanceOf[String]
- name2 must equal("node2")
-
- barrier("waiting-for-end", NrOfNodes).await()
- node.shutdown()
- }
- }
-}
diff --git a/akka-cluster/src/multi-jvm/scala/akka/cluster/routing/direct/homenode/HomeNodeMultiJvmNode1.conf b/akka-cluster/src/multi-jvm/scala/akka/cluster/routing/direct/homenode/HomeNodeMultiJvmNode1.conf
deleted file mode 100644
index 893f798e1d..0000000000
--- a/akka-cluster/src/multi-jvm/scala/akka/cluster/routing/direct/homenode/HomeNodeMultiJvmNode1.conf
+++ /dev/null
@@ -1,6 +0,0 @@
-akka.enabled-modules = ["cluster"]
-akka.event-handler-level = "WARNING"
-akka.actor.deployment.service-node1.router = "direct"
-akka.actor.deployment.service-node1.cluster.preferred-nodes = ["node:node1"]
-akka.actor.deployment.service-node2.router = "direct"
-akka.actor.deployment.service-node2.cluster.preferred-nodes = ["node:node2"]
diff --git a/akka-cluster/src/multi-jvm/scala/akka/cluster/routing/direct/homenode/HomeNodeMultiJvmNode1.opts b/akka-cluster/src/multi-jvm/scala/akka/cluster/routing/direct/homenode/HomeNodeMultiJvmNode1.opts
deleted file mode 100644
index dc86c1c9c0..0000000000
--- a/akka-cluster/src/multi-jvm/scala/akka/cluster/routing/direct/homenode/HomeNodeMultiJvmNode1.opts
+++ /dev/null
@@ -1 +0,0 @@
--Dakka.cluster.nodename=node1 -Dakka.remote.port=9991
diff --git a/akka-cluster/src/multi-jvm/scala/akka/cluster/routing/direct/homenode/HomeNodeMultiJvmNode2.conf b/akka-cluster/src/multi-jvm/scala/akka/cluster/routing/direct/homenode/HomeNodeMultiJvmNode2.conf
deleted file mode 100644
index 893f798e1d..0000000000
--- a/akka-cluster/src/multi-jvm/scala/akka/cluster/routing/direct/homenode/HomeNodeMultiJvmNode2.conf
+++ /dev/null
@@ -1,6 +0,0 @@
-akka.enabled-modules = ["cluster"]
-akka.event-handler-level = "WARNING"
-akka.actor.deployment.service-node1.router = "direct"
-akka.actor.deployment.service-node1.cluster.preferred-nodes = ["node:node1"]
-akka.actor.deployment.service-node2.router = "direct"
-akka.actor.deployment.service-node2.cluster.preferred-nodes = ["node:node2"]
diff --git a/akka-cluster/src/multi-jvm/scala/akka/cluster/routing/direct/homenode/HomeNodeMultiJvmNode2.opts b/akka-cluster/src/multi-jvm/scala/akka/cluster/routing/direct/homenode/HomeNodeMultiJvmNode2.opts
deleted file mode 100644
index bb140941a5..0000000000
--- a/akka-cluster/src/multi-jvm/scala/akka/cluster/routing/direct/homenode/HomeNodeMultiJvmNode2.opts
+++ /dev/null
@@ -1 +0,0 @@
--Dakka.cluster.nodename=node2 -Dakka.remote.port=9992
diff --git a/akka-cluster/src/multi-jvm/scala/akka/cluster/routing/direct/normalusage/SingleReplicaDirectRoutingMultiJvmNode1.conf b/akka-cluster/src/multi-jvm/scala/akka/cluster/routing/direct/normalusage/SingleReplicaDirectRoutingMultiJvmNode1.conf
deleted file mode 100644
index aa0d7771c8..0000000000
--- a/akka-cluster/src/multi-jvm/scala/akka/cluster/routing/direct/normalusage/SingleReplicaDirectRoutingMultiJvmNode1.conf
+++ /dev/null
@@ -1,4 +0,0 @@
-akka.enabled-modules = ["cluster"]
-akka.event-handler-level = "WARNING"
-akka.actor.deployment.service-hello.router = "direct"
-akka.actor.deployment.service-hello.cluster.preferred-nodes = ["node:node1"]
diff --git a/akka-cluster/src/multi-jvm/scala/akka/cluster/routing/direct/normalusage/SingleReplicaDirectRoutingMultiJvmNode1.opts b/akka-cluster/src/multi-jvm/scala/akka/cluster/routing/direct/normalusage/SingleReplicaDirectRoutingMultiJvmNode1.opts
deleted file mode 100644
index dc86c1c9c0..0000000000
--- a/akka-cluster/src/multi-jvm/scala/akka/cluster/routing/direct/normalusage/SingleReplicaDirectRoutingMultiJvmNode1.opts
+++ /dev/null
@@ -1 +0,0 @@
--Dakka.cluster.nodename=node1 -Dakka.remote.port=9991
diff --git a/akka-cluster/src/multi-jvm/scala/akka/cluster/routing/direct/normalusage/SingleReplicaDirectRoutingMultiJvmNode2.conf b/akka-cluster/src/multi-jvm/scala/akka/cluster/routing/direct/normalusage/SingleReplicaDirectRoutingMultiJvmNode2.conf
deleted file mode 100644
index aa0d7771c8..0000000000
--- a/akka-cluster/src/multi-jvm/scala/akka/cluster/routing/direct/normalusage/SingleReplicaDirectRoutingMultiJvmNode2.conf
+++ /dev/null
@@ -1,4 +0,0 @@
-akka.enabled-modules = ["cluster"]
-akka.event-handler-level = "WARNING"
-akka.actor.deployment.service-hello.router = "direct"
-akka.actor.deployment.service-hello.cluster.preferred-nodes = ["node:node1"]
diff --git a/akka-cluster/src/multi-jvm/scala/akka/cluster/routing/direct/normalusage/SingleReplicaDirectRoutingMultiJvmNode2.opts b/akka-cluster/src/multi-jvm/scala/akka/cluster/routing/direct/normalusage/SingleReplicaDirectRoutingMultiJvmNode2.opts
deleted file mode 100644
index bb140941a5..0000000000
--- a/akka-cluster/src/multi-jvm/scala/akka/cluster/routing/direct/normalusage/SingleReplicaDirectRoutingMultiJvmNode2.opts
+++ /dev/null
@@ -1 +0,0 @@
--Dakka.cluster.nodename=node2 -Dakka.remote.port=9992
diff --git a/akka-cluster/src/multi-jvm/scala/akka/cluster/routing/direct/normalusage/SingleReplicaDirectRoutingMultiJvmSpec.scala b/akka-cluster/src/multi-jvm/scala/akka/cluster/routing/direct/normalusage/SingleReplicaDirectRoutingMultiJvmSpec.scala
deleted file mode 100644
index a7b61af3e7..0000000000
--- a/akka-cluster/src/multi-jvm/scala/akka/cluster/routing/direct/normalusage/SingleReplicaDirectRoutingMultiJvmSpec.scala
+++ /dev/null
@@ -1,62 +0,0 @@
-package akka.cluster.routing.direct.normalusage
-
-import akka.actor.Actor
-import akka.config.Config
-import akka.cluster.{ ClusterActorRef, ClusterTestNode, MasterClusterTestNode, Cluster }
-import akka.cluster.LocalCluster
-
-object SingleReplicaDirectRoutingMultiJvmSpec {
- val NrOfNodes = 2
-
- class SomeActor extends Actor with Serializable {
- //println("---------------------------------------------------------------------------")
- //println("SomeActor has been created on node [" + Config.nodename + "]")
- //println("---------------------------------------------------------------------------")
-
- def receive = {
- case "identify" ⇒ {
- //println("The node received the 'identify' command: " + Config.nodename)
- reply(Config.nodename)
- }
- }
- }
-}
-
-class SingleReplicaDirectRoutingMultiJvmNode1 extends MasterClusterTestNode {
-
- import SingleReplicaDirectRoutingMultiJvmSpec._
-
- val testNodes = NrOfNodes
-
- "___" must {
- "___" in {
- Cluster.node.start()
- LocalCluster.barrier("waiting-for-begin", NrOfNodes).await()
-
- LocalCluster.barrier("waiting-to-end", NrOfNodes).await()
- Cluster.node.shutdown()
- }
- }
-}
-
-class SingleReplicaDirectRoutingMultiJvmNode2 extends ClusterTestNode {
-
- import SingleReplicaDirectRoutingMultiJvmSpec._
-
- "Direct Router: when node send message to existing node it" must {
- "communicate with that node" in {
- Cluster.node.start()
- LocalCluster.barrier("waiting-for-begin", NrOfNodes).await()
-
- val actor = Actor.actorOf(Props[SomeActor]("service-hello").asInstanceOf[ClusterActorRef]
- actor.isRunning must be(true)
-
- val result = (actor ? "identify").get
- result must equal("node1")
-
- LocalCluster.barrier("waiting-to-end", NrOfNodes).await()
- Cluster.node.shutdown()
- }
- }
-}
-
diff --git a/akka-cluster/src/multi-jvm/scala/akka/cluster/routing/random/failover/RandomFailoverMultiJvmNode1.conf b/akka-cluster/src/multi-jvm/scala/akka/cluster/routing/random/failover/RandomFailoverMultiJvmNode1.conf
deleted file mode 100644
index 1772693874..0000000000
--- a/akka-cluster/src/multi-jvm/scala/akka/cluster/routing/random/failover/RandomFailoverMultiJvmNode1.conf
+++ /dev/null
@@ -1,8 +0,0 @@
-akka.enabled-modules = ["cluster"]
-akka.event-handlers = ["akka.testkit.TestEventListener"]
-akka.event-handler-level = "WARNING"
-akka.actor.deployment.service-hello.router = "random"
-akka.actor.deployment.service-hello.cluster.preferred-nodes = ["node:node1", "node:node3"]
-akka.actor.deployment.service-hello.nr-of-instances = 2
-akka.actor.timeout = 30
-akka.cluster.session-timeout = 10
diff --git a/akka-cluster/src/multi-jvm/scala/akka/cluster/routing/random/failover/RandomFailoverMultiJvmNode1.opts b/akka-cluster/src/multi-jvm/scala/akka/cluster/routing/random/failover/RandomFailoverMultiJvmNode1.opts
deleted file mode 100644
index f1306829d9..0000000000
--- a/akka-cluster/src/multi-jvm/scala/akka/cluster/routing/random/failover/RandomFailoverMultiJvmNode1.opts
+++ /dev/null
@@ -1 +0,0 @@
--Dakka.cluster.nodename=node1 -Dakka.remote.port=9991 -Dakka.event.force-sync=true
diff --git a/akka-cluster/src/multi-jvm/scala/akka/cluster/routing/random/failover/RandomFailoverMultiJvmNode2.conf b/akka-cluster/src/multi-jvm/scala/akka/cluster/routing/random/failover/RandomFailoverMultiJvmNode2.conf
deleted file mode 100644
index 1772693874..0000000000
--- a/akka-cluster/src/multi-jvm/scala/akka/cluster/routing/random/failover/RandomFailoverMultiJvmNode2.conf
+++ /dev/null
@@ -1,8 +0,0 @@
-akka.enabled-modules = ["cluster"]
-akka.event-handlers = ["akka.testkit.TestEventListener"]
-akka.event-handler-level = "WARNING"
-akka.actor.deployment.service-hello.router = "random"
-akka.actor.deployment.service-hello.cluster.preferred-nodes = ["node:node1", "node:node3"]
-akka.actor.deployment.service-hello.nr-of-instances = 2
-akka.actor.timeout = 30
-akka.cluster.session-timeout = 10
diff --git a/akka-cluster/src/multi-jvm/scala/akka/cluster/routing/random/failover/RandomFailoverMultiJvmNode2.opts b/akka-cluster/src/multi-jvm/scala/akka/cluster/routing/random/failover/RandomFailoverMultiJvmNode2.opts
deleted file mode 100644
index 897e69f626..0000000000
--- a/akka-cluster/src/multi-jvm/scala/akka/cluster/routing/random/failover/RandomFailoverMultiJvmNode2.opts
+++ /dev/null
@@ -1 +0,0 @@
--Dakka.cluster.nodename=node2 -Dakka.remote.port=9992 -Dakka.event.force-sync=true
diff --git a/akka-cluster/src/multi-jvm/scala/akka/cluster/routing/random/failover/RandomFailoverMultiJvmNode3.conf b/akka-cluster/src/multi-jvm/scala/akka/cluster/routing/random/failover/RandomFailoverMultiJvmNode3.conf
deleted file mode 100644
index 1772693874..0000000000
--- a/akka-cluster/src/multi-jvm/scala/akka/cluster/routing/random/failover/RandomFailoverMultiJvmNode3.conf
+++ /dev/null
@@ -1,8 +0,0 @@
-akka.enabled-modules = ["cluster"]
-akka.event-handlers = ["akka.testkit.TestEventListener"]
-akka.event-handler-level = "WARNING"
-akka.actor.deployment.service-hello.router = "random"
-akka.actor.deployment.service-hello.cluster.preferred-nodes = ["node:node1", "node:node3"]
-akka.actor.deployment.service-hello.nr-of-instances = 2
-akka.actor.timeout = 30
-akka.cluster.session-timeout = 10
diff --git a/akka-cluster/src/multi-jvm/scala/akka/cluster/routing/random/failover/RandomFailoverMultiJvmNode3.opts b/akka-cluster/src/multi-jvm/scala/akka/cluster/routing/random/failover/RandomFailoverMultiJvmNode3.opts
deleted file mode 100644
index 4127fb94fc..0000000000
--- a/akka-cluster/src/multi-jvm/scala/akka/cluster/routing/random/failover/RandomFailoverMultiJvmNode3.opts
+++ /dev/null
@@ -1 +0,0 @@
--Dakka.cluster.nodename=node3 -Dakka.remote.port=9993 -Dakka.event.force-sync=true
diff --git a/akka-cluster/src/multi-jvm/scala/akka/cluster/routing/random/failover/RandomFailoverMultiJvmSpec.scala b/akka-cluster/src/multi-jvm/scala/akka/cluster/routing/random/failover/RandomFailoverMultiJvmSpec.scala
deleted file mode 100644
index cbdc42dbe9..0000000000
--- a/akka-cluster/src/multi-jvm/scala/akka/cluster/routing/random/failover/RandomFailoverMultiJvmSpec.scala
+++ /dev/null
@@ -1,145 +0,0 @@
-package akka.cluster.routing.random.failover
-
-import akka.config.Config
-import akka.cluster._
-import akka.actor.{ ActorRef, Actor }
-import akka.event.EventHandler
-import akka.util.duration._
-import akka.util.{ Duration, Timer }
-import akka.testkit.{ EventFilter, TestEvent }
-import java.util.{ Collections, Set ⇒ JSet }
-import java.net.ConnectException
-import java.nio.channels.NotYetConnectedException
-import akka.cluster.LocalCluster._
-import akka.dispatch.Await
-
-object RandomFailoverMultiJvmSpec {
-
- val NrOfNodes = 3
-
- class SomeActor extends Actor with Serializable {
-
- def receive = {
- case "identify" ⇒
- reply(Config.nodename)
- }
- }
-
-}
-
-class RandomFailoverMultiJvmNode1 extends MasterClusterTestNode {
-
- import RandomFailoverMultiJvmSpec._
-
- def testNodes = NrOfNodes
-
- "Random: when random router fails" must {
- "jump to another replica" ignore {
- val ignoreExceptions = Seq(
- EventFilter[NotYetConnectedException],
- EventFilter[ConnectException],
- EventFilter[ClusterException],
- EventFilter[java.nio.channels.ClosedChannelException])
-
- var oldFoundConnections: JSet[String] = null
- var actor: ActorRef = null
-
- barrier("node-start", NrOfNodes) {
- EventHandler.notify(TestEvent.Mute(ignoreExceptions))
- Cluster.node.start()
- }
-
- barrier("actor-creation", NrOfNodes) {
- actor = Actor.actorOf(Props[SomeActor]("service-hello")
- actor.isInstanceOf[ClusterActorRef] must be(true)
- }
-
- val timer = Timer(30.seconds, true)
- while (timer.isTicking &&
- !Cluster.node.isInUseOnNode("service-hello", "node1") &&
- !Cluster.node.isInUseOnNode("service-hello", "node3")) {}
-
- barrier("actor-usage", NrOfNodes) {
- Cluster.node.isInUseOnNode("service-hello") must be(true)
- oldFoundConnections = identifyConnections(actor)
-
- //since we have replication factor 2
- oldFoundConnections.size() must be(2)
- }
-
- barrier("verify-fail-over", NrOfNodes - 1) {
- val timer = Timer(30.seconds, true)
- while (timer.isTicking &&
- !Cluster.node.isInUseOnNode("service-hello", "node1") &&
- !Cluster.node.isInUseOnNode("service-hello", "node2")) {}
-
- val newFoundConnections = identifyConnections(actor)
-
- //it still must be 2 since a different node should have been used to failover to
- newFoundConnections.size() must be(2)
-
- //they are not disjoint since, there must be a single element that is in both
- Collections.disjoint(newFoundConnections, oldFoundConnections) must be(false)
-
- //but they should not be equal since the shutdown-node has been replaced by another one.
- newFoundConnections.equals(oldFoundConnections) must be(false)
- }
-
- Cluster.node.shutdown()
- }
- }
-
- def identifyConnections(actor: ActorRef): JSet[String] = {
- val set = new java.util.HashSet[String]
- for (i ← 0 until 100) { // we should get hits from both nodes in 100 attempts, if not then not very random
- val value = Await.result(actor ? "identify", timeout.duration).asInstanceOf[String]
- set.add(value)
- }
- set
- }
-}
-
-class RandomFailoverMultiJvmNode2 extends ClusterTestNode {
-
- import RandomFailoverMultiJvmSpec._
-
- "___" must {
- "___" ignore {
- barrier("node-start", NrOfNodes) {
- Cluster.node.start()
- }
-
- barrier("actor-creation", NrOfNodes).await()
- barrier("actor-usage", NrOfNodes).await()
-
- Cluster.node.isInUseOnNode("service-hello") must be(false)
-
- Thread.sleep(5000) // wait for fail-over from node3
-
- barrier("verify-fail-over", NrOfNodes - 1).await()
-
- Cluster.node.shutdown()
- }
- }
-}
-
-class RandomFailoverMultiJvmNode3 extends ClusterTestNode {
-
- import RandomFailoverMultiJvmSpec._
-
- "___" must {
- "___" ignore {
- barrier("node-start", NrOfNodes) {
- Cluster.node.start()
- }
-
- barrier("actor-creation", NrOfNodes).await()
- barrier("actor-usage", NrOfNodes).await()
-
- Cluster.node.isInUseOnNode("service-hello") must be(true)
-
- Cluster.node.shutdown()
- }
- }
-}
-
diff --git a/akka-cluster/src/multi-jvm/scala/akka/cluster/routing/random/homenode/HomeNodeMultiJvmNode1.conf b/akka-cluster/src/multi-jvm/scala/akka/cluster/routing/random/homenode/HomeNodeMultiJvmNode1.conf
deleted file mode 100644
index 012685917c..0000000000
--- a/akka-cluster/src/multi-jvm/scala/akka/cluster/routing/random/homenode/HomeNodeMultiJvmNode1.conf
+++ /dev/null
@@ -1,8 +0,0 @@
-akka.enabled-modules = ["cluster"]
-akka.event-handler-level = "WARNING"
-akka.actor.deployment.service-node1.router = "random"
-akka.actor.deployment.service-node1.cluster.preferred-nodes = ["node:node1"]
-akka.actor.deployment.service-node1.nr-of-instances = 1
-akka.actor.deployment.service-node2.router = "random"
-akka.actor.deployment.service-node2.cluster.preferred-nodes = ["node:node2"]
-akka.actor.deployment.service-node2.nr-of-instances = 1
diff --git a/akka-cluster/src/multi-jvm/scala/akka/cluster/routing/random/homenode/HomeNodeMultiJvmNode1.opts b/akka-cluster/src/multi-jvm/scala/akka/cluster/routing/random/homenode/HomeNodeMultiJvmNode1.opts
deleted file mode 100644
index dc86c1c9c0..0000000000
--- a/akka-cluster/src/multi-jvm/scala/akka/cluster/routing/random/homenode/HomeNodeMultiJvmNode1.opts
+++ /dev/null
@@ -1 +0,0 @@
--Dakka.cluster.nodename=node1 -Dakka.remote.port=9991
diff --git a/akka-cluster/src/multi-jvm/scala/akka/cluster/routing/random/homenode/HomeNodeMultiJvmNode2.conf b/akka-cluster/src/multi-jvm/scala/akka/cluster/routing/random/homenode/HomeNodeMultiJvmNode2.conf
deleted file mode 100644
index 012685917c..0000000000
--- a/akka-cluster/src/multi-jvm/scala/akka/cluster/routing/random/homenode/HomeNodeMultiJvmNode2.conf
+++ /dev/null
@@ -1,8 +0,0 @@
-akka.enabled-modules = ["cluster"]
-akka.event-handler-level = "WARNING"
-akka.actor.deployment.service-node1.router = "random"
-akka.actor.deployment.service-node1.cluster.preferred-nodes = ["node:node1"]
-akka.actor.deployment.service-node1.nr-of-instances = 1
-akka.actor.deployment.service-node2.router = "random"
-akka.actor.deployment.service-node2.cluster.preferred-nodes = ["node:node2"]
-akka.actor.deployment.service-node2.nr-of-instances = 1
diff --git a/akka-cluster/src/multi-jvm/scala/akka/cluster/routing/random/homenode/HomeNodeMultiJvmNode2.opts b/akka-cluster/src/multi-jvm/scala/akka/cluster/routing/random/homenode/HomeNodeMultiJvmNode2.opts
deleted file mode 100644
index bb140941a5..0000000000
--- a/akka-cluster/src/multi-jvm/scala/akka/cluster/routing/random/homenode/HomeNodeMultiJvmNode2.opts
+++ /dev/null
@@ -1 +0,0 @@
--Dakka.cluster.nodename=node2 -Dakka.remote.port=9992
diff --git a/akka-cluster/src/multi-jvm/scala/akka/cluster/routing/random/homenode/HomeNodeMultiJvmSpec.scala b/akka-cluster/src/multi-jvm/scala/akka/cluster/routing/random/homenode/HomeNodeMultiJvmSpec.scala
deleted file mode 100644
index a8f4887464..0000000000
--- a/akka-cluster/src/multi-jvm/scala/akka/cluster/routing/random/homenode/HomeNodeMultiJvmSpec.scala
+++ /dev/null
@@ -1,60 +0,0 @@
-package akka.cluster.routing.random.homenode
-
-import akka.config.Config
-import akka.actor.Actor
-import akka.cluster.{ ClusterTestNode, MasterClusterTestNode, Cluster }
-import Cluster._
-import akka.cluster.LocalCluster._
-
-object HomeNodeMultiJvmSpec {
-
- val NrOfNodes = 2
-
- class SomeActor extends Actor with Serializable {
- def receive = {
- case "identify" ⇒ {
- reply(Config.nodename)
- }
- }
- }
-
-}
-
-class HomeNodeMultiJvmNode1 extends MasterClusterTestNode {
-
- import HomeNodeMultiJvmSpec._
-
- val testNodes = NrOfNodes
-
- "___" must {
- "___" in {
- Cluster.node.start()
- barrier("waiting-for-begin", NrOfNodes).await()
- barrier("waiting-for-end", NrOfNodes).await()
- node.shutdown()
- }
- }
-}
-
-class HomeNodeMultiJvmNode2 extends ClusterTestNode {
-
- import HomeNodeMultiJvmSpec._
-
- "Random Router: A Random Router" must {
- "obey 'home-node' config option when instantiated actor in cluster" in {
- Cluster.node.start()
- barrier("waiting-for-begin", NrOfNodes).await()
-
- val actorNode1 = Actor.actorOf(Props[SomeActor]("service-node1")
- val nameNode1 = (actorNode1 ? "identify").get.asInstanceOf[String]
- nameNode1 must equal("node1")
-
- val actorNode2 = Actor.actorOf(Props[SomeActor]("service-node2")
- val nameNode2 = (actorNode2 ? "identify").get.asInstanceOf[String]
- nameNode2 must equal("node2")
-
- barrier("waiting-for-end", NrOfNodes).await()
- node.shutdown()
- }
- }
-}
diff --git a/akka-cluster/src/multi-jvm/scala/akka/cluster/routing/random/replicationfactor_1/Random1ReplicaMultiJvmNode1.conf b/akka-cluster/src/multi-jvm/scala/akka/cluster/routing/random/replicationfactor_1/Random1ReplicaMultiJvmNode1.conf
deleted file mode 100644
index 729dc64fd6..0000000000
--- a/akka-cluster/src/multi-jvm/scala/akka/cluster/routing/random/replicationfactor_1/Random1ReplicaMultiJvmNode1.conf
+++ /dev/null
@@ -1,4 +0,0 @@
-akka.enabled-modules = ["cluster"]
-akka.event-handler-level = "WARNING"
-akka.actor.deployment.service-hello.router = "random"
-akka.actor.deployment.service-hello.nr-of-instances = 1
diff --git a/akka-cluster/src/multi-jvm/scala/akka/cluster/routing/random/replicationfactor_1/Random1ReplicaMultiJvmNode1.opts b/akka-cluster/src/multi-jvm/scala/akka/cluster/routing/random/replicationfactor_1/Random1ReplicaMultiJvmNode1.opts
deleted file mode 100644
index dc86c1c9c0..0000000000
--- a/akka-cluster/src/multi-jvm/scala/akka/cluster/routing/random/replicationfactor_1/Random1ReplicaMultiJvmNode1.opts
+++ /dev/null
@@ -1 +0,0 @@
--Dakka.cluster.nodename=node1 -Dakka.remote.port=9991
diff --git a/akka-cluster/src/multi-jvm/scala/akka/cluster/routing/random/replicationfactor_1/Random1ReplicaMultiJvmSpec.scala b/akka-cluster/src/multi-jvm/scala/akka/cluster/routing/random/replicationfactor_1/Random1ReplicaMultiJvmSpec.scala
deleted file mode 100644
index 525a09467a..0000000000
--- a/akka-cluster/src/multi-jvm/scala/akka/cluster/routing/random/replicationfactor_1/Random1ReplicaMultiJvmSpec.scala
+++ /dev/null
@@ -1,51 +0,0 @@
-/**
- * Copyright (C) 2009-2012 Typesafe Inc.
- */
-
-package akka.cluster.routing.random.replicationfactor_1
-
-import akka.cluster._
-import akka.cluster.Cluster._
-import akka.actor._
-import akka.config.Config
-import akka.cluster.LocalCluster._
-
-/**
- * Test that if a single node is used with a random router with replication factor then the actor is instantiated
- * on the single node.
- */
-object Random1ReplicaMultiJvmSpec {
-
- class HelloWorld extends Actor with Serializable {
- def receive = {
- case "Hello" ⇒
- reply("World from node [" + Config.nodename + "]")
- }
- }
-
-}
-
-class Random1ReplicaMultiJvmNode1 extends MasterClusterTestNode {
-
- import Random1ReplicaMultiJvmSpec._
-
- val testNodes = 1
-
- "Random Router: A cluster" must {
-
- "create clustered actor, get a 'local' actor on 'home' node and a 'ref' to actor on remote node" in {
- Cluster.node.start()
-
- var hello = Actor.actorOf(Props[HelloWorld]("service-hello")
- hello must not equal (null)
- hello.address must equal("service-hello")
- hello.isInstanceOf[ClusterActorRef] must be(true)
-
- hello must not equal (null)
- val reply = (hello ? "Hello").as[String].getOrElse(fail("Should have recieved reply from node1"))
- reply must equal("World from node [node1]")
-
- node.shutdown()
- }
- }
-}
diff --git a/akka-cluster/src/multi-jvm/scala/akka/cluster/routing/random/replicationfactor_3/Random3ReplicasMultiJvmNode1.conf b/akka-cluster/src/multi-jvm/scala/akka/cluster/routing/random/replicationfactor_3/Random3ReplicasMultiJvmNode1.conf
deleted file mode 100644
index ae344f2100..0000000000
--- a/akka-cluster/src/multi-jvm/scala/akka/cluster/routing/random/replicationfactor_3/Random3ReplicasMultiJvmNode1.conf
+++ /dev/null
@@ -1,4 +0,0 @@
-akka.enabled-modules = ["cluster"]
-akka.event-handler-level = "WARNING"
-akka.actor.deployment.service-hello.router = "random"
-akka.actor.deployment.service-hello.nr-of-instances = 3
diff --git a/akka-cluster/src/multi-jvm/scala/akka/cluster/routing/random/replicationfactor_3/Random3ReplicasMultiJvmNode1.opts b/akka-cluster/src/multi-jvm/scala/akka/cluster/routing/random/replicationfactor_3/Random3ReplicasMultiJvmNode1.opts
deleted file mode 100644
index dc86c1c9c0..0000000000
--- a/akka-cluster/src/multi-jvm/scala/akka/cluster/routing/random/replicationfactor_3/Random3ReplicasMultiJvmNode1.opts
+++ /dev/null
@@ -1 +0,0 @@
--Dakka.cluster.nodename=node1 -Dakka.remote.port=9991
diff --git a/akka-cluster/src/multi-jvm/scala/akka/cluster/routing/random/replicationfactor_3/Random3ReplicasMultiJvmNode2.conf b/akka-cluster/src/multi-jvm/scala/akka/cluster/routing/random/replicationfactor_3/Random3ReplicasMultiJvmNode2.conf
deleted file mode 100644
index 09a37715d0..0000000000
--- a/akka-cluster/src/multi-jvm/scala/akka/cluster/routing/random/replicationfactor_3/Random3ReplicasMultiJvmNode2.conf
+++ /dev/null
@@ -1,4 +0,0 @@
-akka.enabled-modules = ["cluster"]
-akka.event-handler-level = "WARNING"
-akka.actor.deployment.service-hello.router = "random"
-akka.actor.deployment.service-hello.cluster.repliction-factor = 3
diff --git a/akka-cluster/src/multi-jvm/scala/akka/cluster/routing/random/replicationfactor_3/Random3ReplicasMultiJvmNode2.opts b/akka-cluster/src/multi-jvm/scala/akka/cluster/routing/random/replicationfactor_3/Random3ReplicasMultiJvmNode2.opts
deleted file mode 100644
index bb140941a5..0000000000
--- a/akka-cluster/src/multi-jvm/scala/akka/cluster/routing/random/replicationfactor_3/Random3ReplicasMultiJvmNode2.opts
+++ /dev/null
@@ -1 +0,0 @@
--Dakka.cluster.nodename=node2 -Dakka.remote.port=9992
diff --git a/akka-cluster/src/multi-jvm/scala/akka/cluster/routing/random/replicationfactor_3/Random3ReplicasMultiJvmNode3.conf b/akka-cluster/src/multi-jvm/scala/akka/cluster/routing/random/replicationfactor_3/Random3ReplicasMultiJvmNode3.conf
deleted file mode 100644
index ae344f2100..0000000000
--- a/akka-cluster/src/multi-jvm/scala/akka/cluster/routing/random/replicationfactor_3/Random3ReplicasMultiJvmNode3.conf
+++ /dev/null
@@ -1,4 +0,0 @@
-akka.enabled-modules = ["cluster"]
-akka.event-handler-level = "WARNING"
-akka.actor.deployment.service-hello.router = "random"
-akka.actor.deployment.service-hello.nr-of-instances = 3
diff --git a/akka-cluster/src/multi-jvm/scala/akka/cluster/routing/random/replicationfactor_3/Random3ReplicasMultiJvmNode3.opts b/akka-cluster/src/multi-jvm/scala/akka/cluster/routing/random/replicationfactor_3/Random3ReplicasMultiJvmNode3.opts
deleted file mode 100644
index 089e3b7776..0000000000
--- a/akka-cluster/src/multi-jvm/scala/akka/cluster/routing/random/replicationfactor_3/Random3ReplicasMultiJvmNode3.opts
+++ /dev/null
@@ -1 +0,0 @@
--Dakka.cluster.nodename=node3 -Dakka.remote.port=9993
diff --git a/akka-cluster/src/multi-jvm/scala/akka/cluster/routing/random/replicationfactor_3/Random3ReplicasMultiJvmSpec.scala b/akka-cluster/src/multi-jvm/scala/akka/cluster/routing/random/replicationfactor_3/Random3ReplicasMultiJvmSpec.scala
deleted file mode 100644
index c1a4175a09..0000000000
--- a/akka-cluster/src/multi-jvm/scala/akka/cluster/routing/random/replicationfactor_3/Random3ReplicasMultiJvmSpec.scala
+++ /dev/null
@@ -1,119 +0,0 @@
-/**
- * Copyright (C) 2009-2012 Typesafe Inc.
- */
-
-package akka.cluster.routing.random.replicationfactor_3
-
-import akka.cluster._
-import akka.actor._
-import akka.config.Config
-import Cluster._
-import akka.cluster.LocalCluster._
-import akka.dispatch.Await
-
-/**
- * When a MultiJvmNode is started, will it automatically be part of the cluster (so will it automatically be eligible
- * for running actors, or will it be just a 'client' talking to the cluster.
- */
-object Random3ReplicasMultiJvmSpec {
- val NrOfNodes = 3
-
- class HelloWorld extends Actor with Serializable {
- def receive = {
- case "Hello" ⇒
- reply("World from node [" + Config.nodename + "]")
- }
- }
-}
-
-/**
- * What is the purpose of this node? Is this just a node for the cluster to make use of?
- */
-class Random3ReplicasMultiJvmNode1 extends MasterClusterTestNode {
-
- import Random3ReplicasMultiJvmSpec._
-
- def testNodes: Int = NrOfNodes
-
- "___" must {
- "___" in {
- Cluster.node.start()
-
- barrier("start-nodes", NrOfNodes).await()
-
- barrier("create-actor", NrOfNodes).await()
-
- barrier("end-test", NrOfNodes).await()
-
- node.shutdown()
- }
- }
-}
-
-class Random3ReplicasMultiJvmNode2 extends ClusterTestNode {
-
- import Random3ReplicasMultiJvmSpec._
- import Cluster._
-
- "Random: A cluster" must {
-
- "distribute requests randomly" in {
- Cluster.node.start()
-
- //wait till node 1 has started.
- barrier("start-nodes", NrOfNodes).await()
-
- //check if the actorRef is the expected remoteActorRef.
- var hello: ActorRef = null
- hello = Actor.actorOf(Props[HelloWorld]("service-hello")
- hello must not equal (null)
- hello.address must equal("service-hello")
- hello.isInstanceOf[ClusterActorRef] must be(true)
-
- barrier("create-actor", NrOfNodes).await()
-
- val replies = collection.mutable.Map.empty[String, Int]
- def count(reply: String) = {
- if (replies.get(reply).isEmpty) replies.put(reply, 1)
- else replies.put(reply, replies(reply) + 1)
- }
-
- for (i ← 0 until 1000) {
- count(Await.result((hello ? "Hello").mapTo[String], 10 seconds))
- }
-
- val repliesNode1 = replies("World from node [node1]")
- val repliesNode2 = replies("World from node [node2]")
- val repliesNode3 = replies("World from node [node3]")
-
- assert(repliesNode1 > 100)
- assert(repliesNode2 > 100)
- assert(repliesNode3 > 100)
- assert(repliesNode1 + repliesNode2 + repliesNode3 === 1000)
-
- barrier("end-test", NrOfNodes).await()
-
- node.shutdown()
- }
- }
-}
-
-class Random3ReplicasMultiJvmNode3 extends ClusterTestNode {
-
- import Random3ReplicasMultiJvmSpec._
- import Cluster._
-
- "___" must {
- "___" in {
- Cluster.node.start()
-
- barrier("start-nodes", NrOfNodes).await()
-
- barrier("create-actor", NrOfNodes).await()
-
- barrier("end-test", NrOfNodes).await()
-
- node.shutdown()
- }
- }
-}
diff --git a/akka-cluster/src/multi-jvm/scala/akka/cluster/routing/roundrobin/failover/RoundRobinFailoverMultiJvmNode1.conf b/akka-cluster/src/multi-jvm/scala/akka/cluster/routing/roundrobin/failover/RoundRobinFailoverMultiJvmNode1.conf
deleted file mode 100644
index 0a858fb8fd..0000000000
--- a/akka-cluster/src/multi-jvm/scala/akka/cluster/routing/roundrobin/failover/RoundRobinFailoverMultiJvmNode1.conf
+++ /dev/null
@@ -1,8 +0,0 @@
-akka.enabled-modules = ["cluster"]
-akka.event-handlers = ["akka.testkit.TestEventListener"]
-akka.event-handler-level = "WARNING"
-akka.actor.deployment.service-hello.router = "round-robin"
-akka.actor.deployment.service-hello.nr-of-instances = 2
-akka.actor.deployment.service-hello.cluster.preferred-nodes = ["node:node1","node:node3"]
-akka.cluster.include-ref-node-in-replica-set = on
-akka.actor.timeout = 30
diff --git a/akka-cluster/src/multi-jvm/scala/akka/cluster/routing/roundrobin/failover/RoundRobinFailoverMultiJvmNode1.opts b/akka-cluster/src/multi-jvm/scala/akka/cluster/routing/roundrobin/failover/RoundRobinFailoverMultiJvmNode1.opts
deleted file mode 100644
index f1306829d9..0000000000
--- a/akka-cluster/src/multi-jvm/scala/akka/cluster/routing/roundrobin/failover/RoundRobinFailoverMultiJvmNode1.opts
+++ /dev/null
@@ -1 +0,0 @@
--Dakka.cluster.nodename=node1 -Dakka.remote.port=9991 -Dakka.event.force-sync=true
diff --git a/akka-cluster/src/multi-jvm/scala/akka/cluster/routing/roundrobin/failover/RoundRobinFailoverMultiJvmNode2.conf b/akka-cluster/src/multi-jvm/scala/akka/cluster/routing/roundrobin/failover/RoundRobinFailoverMultiJvmNode2.conf
deleted file mode 100644
index 0a858fb8fd..0000000000
--- a/akka-cluster/src/multi-jvm/scala/akka/cluster/routing/roundrobin/failover/RoundRobinFailoverMultiJvmNode2.conf
+++ /dev/null
@@ -1,8 +0,0 @@
-akka.enabled-modules = ["cluster"]
-akka.event-handlers = ["akka.testkit.TestEventListener"]
-akka.event-handler-level = "WARNING"
-akka.actor.deployment.service-hello.router = "round-robin"
-akka.actor.deployment.service-hello.nr-of-instances = 2
-akka.actor.deployment.service-hello.cluster.preferred-nodes = ["node:node1","node:node3"]
-akka.cluster.include-ref-node-in-replica-set = on
-akka.actor.timeout = 30
diff --git a/akka-cluster/src/multi-jvm/scala/akka/cluster/routing/roundrobin/failover/RoundRobinFailoverMultiJvmNode2.opts b/akka-cluster/src/multi-jvm/scala/akka/cluster/routing/roundrobin/failover/RoundRobinFailoverMultiJvmNode2.opts
deleted file mode 100644
index 897e69f626..0000000000
--- a/akka-cluster/src/multi-jvm/scala/akka/cluster/routing/roundrobin/failover/RoundRobinFailoverMultiJvmNode2.opts
+++ /dev/null
@@ -1 +0,0 @@
--Dakka.cluster.nodename=node2 -Dakka.remote.port=9992 -Dakka.event.force-sync=true
diff --git a/akka-cluster/src/multi-jvm/scala/akka/cluster/routing/roundrobin/failover/RoundRobinFailoverMultiJvmNode3.conf b/akka-cluster/src/multi-jvm/scala/akka/cluster/routing/roundrobin/failover/RoundRobinFailoverMultiJvmNode3.conf
deleted file mode 100644
index 0a858fb8fd..0000000000
--- a/akka-cluster/src/multi-jvm/scala/akka/cluster/routing/roundrobin/failover/RoundRobinFailoverMultiJvmNode3.conf
+++ /dev/null
@@ -1,8 +0,0 @@
-akka.enabled-modules = ["cluster"]
-akka.event-handlers = ["akka.testkit.TestEventListener"]
-akka.event-handler-level = "WARNING"
-akka.actor.deployment.service-hello.router = "round-robin"
-akka.actor.deployment.service-hello.nr-of-instances = 2
-akka.actor.deployment.service-hello.cluster.preferred-nodes = ["node:node1","node:node3"]
-akka.cluster.include-ref-node-in-replica-set = on
-akka.actor.timeout = 30
diff --git a/akka-cluster/src/multi-jvm/scala/akka/cluster/routing/roundrobin/failover/RoundRobinFailoverMultiJvmNode3.opts b/akka-cluster/src/multi-jvm/scala/akka/cluster/routing/roundrobin/failover/RoundRobinFailoverMultiJvmNode3.opts
deleted file mode 100644
index 4127fb94fc..0000000000
--- a/akka-cluster/src/multi-jvm/scala/akka/cluster/routing/roundrobin/failover/RoundRobinFailoverMultiJvmNode3.opts
+++ /dev/null
@@ -1 +0,0 @@
--Dakka.cluster.nodename=node3 -Dakka.remote.port=9993 -Dakka.event.force-sync=true
diff --git a/akka-cluster/src/multi-jvm/scala/akka/cluster/routing/roundrobin/failover/RoundRobinFailoverMultiJvmSpec.scala b/akka-cluster/src/multi-jvm/scala/akka/cluster/routing/roundrobin/failover/RoundRobinFailoverMultiJvmSpec.scala
deleted file mode 100644
index 1b97ef1075..0000000000
--- a/akka-cluster/src/multi-jvm/scala/akka/cluster/routing/roundrobin/failover/RoundRobinFailoverMultiJvmSpec.scala
+++ /dev/null
@@ -1,146 +0,0 @@
-package akka.cluster.routing.roundrobin.failover
-
-import akka.config.Config
-import akka.cluster._
-import akka.actor.{ ActorRef, Actor }
-import akka.event.EventHandler
-import akka.testkit.{ EventFilter, TestEvent }
-import akka.util.duration._
-import akka.util.{ Duration, Timer }
-import java.util.{ Collections, Set ⇒ JSet }
-import java.net.ConnectException
-import java.nio.channels.NotYetConnectedException
-import java.lang.Thread
-import akka.cluster.LocalCluster._
-import akka.dispatch.Await
-
-object RoundRobinFailoverMultiJvmSpec {
-
- val NrOfNodes = 3
-
- class SomeActor extends Actor with Serializable {
-
- def receive = {
- case "identify" ⇒
- reply(Config.nodename)
- }
- }
-
-}
-
-class RoundRobinFailoverMultiJvmNode1 extends MasterClusterTestNode {
-
- import RoundRobinFailoverMultiJvmSpec._
-
- def testNodes = NrOfNodes
-
- "Round Robin: when round robin router fails" must {
- "jump to another replica" ignore {
- val ignoreExceptions = Seq(
- EventFilter[NotYetConnectedException],
- EventFilter[ConnectException],
- EventFilter[ClusterException])
-
- var oldFoundConnections: JSet[String] = null
- var actor: ActorRef = null
-
- barrier("node-start", NrOfNodes) {
- EventHandler.notify(TestEvent.Mute(ignoreExceptions))
- Cluster.node.start()
- }
-
- barrier("actor-creation", NrOfNodes) {
- actor = Actor.actorOf(Props[SomeActor]("service-hello")
- actor.isInstanceOf[ClusterActorRef] must be(true)
- }
-
- val timer = Timer(30.seconds, true)
- while (timer.isTicking &&
- !Cluster.node.isInUseOnNode("service-hello", "node1") &&
- !Cluster.node.isInUseOnNode("service-hello", "node3")) {}
- //Thread.sleep(5000) // wait for all actors to start up on other nodes
-
- barrier("actor-usage", NrOfNodes) {
- Cluster.node.isInUseOnNode("service-hello") must be(true)
- oldFoundConnections = identifyConnections(actor)
-
- //since we have replication factor 2
- oldFoundConnections.size() must be(2)
- }
-
- Thread.sleep(5000) // wait for fail-over from node3
-
- barrier("verify-fail-over", NrOfNodes - 1) {
- val timer = Timer(30.seconds, true)
- while (timer.isTicking &&
- !Cluster.node.isInUseOnNode("service-hello", "node1") &&
- !Cluster.node.isInUseOnNode("service-hello", "node2")) {}
-
- val newFoundConnections = identifyConnections(actor)
-
- //it still must be 2 since a different node should have been used to failover to
- newFoundConnections.size() must be(2)
-
- //they are not disjoint since, there must be a single element that is in both
- Collections.disjoint(newFoundConnections, oldFoundConnections) must be(false)
-
- //but they should not be equal since the shutdown-node has been replaced by another one.
- newFoundConnections.equals(oldFoundConnections) must be(false)
- }
-
- Cluster.node.shutdown()
- }
- }
-
- def identifyConnections(actor: ActorRef): JSet[String] = {
- val set = new java.util.HashSet[String]
- for (i ← 0 until 100) {
- val value = Await.result(actor ? "identify", timeout.duration).asInstanceOf[String]
- set.add(value)
- }
- set
- }
-}
-
-class RoundRobinFailoverMultiJvmNode2 extends ClusterTestNode {
-
- import RoundRobinFailoverMultiJvmSpec._
-
- "___" must {
- "___" ignore {
- barrier("node-start", NrOfNodes) {
- Cluster.node.start()
- }
-
- barrier("actor-creation", NrOfNodes).await()
- barrier("actor-usage", NrOfNodes).await()
-
- Cluster.node.isInUseOnNode("service-hello") must be(false)
-
- Thread.sleep(5000) // wait for fail-over from node3
-
- barrier("verify-fail-over", NrOfNodes - 1).await()
- }
- }
-}
-
-class RoundRobinFailoverMultiJvmNode3 extends ClusterTestNode {
-
- import RoundRobinFailoverMultiJvmSpec._
-
- "___" must {
- "___" ignore {
- barrier("node-start", NrOfNodes) {
- Cluster.node.start()
- }
-
- barrier("actor-creation", NrOfNodes).await()
- barrier("actor-usage", NrOfNodes).await()
-
- Cluster.node.isInUseOnNode("service-hello") must be(true)
-
- Cluster.node.shutdown()
- }
- }
-}
-
diff --git a/akka-cluster/src/multi-jvm/scala/akka/cluster/routing/roundrobin/homenode/HomeNodeMultiJvmNode1.conf b/akka-cluster/src/multi-jvm/scala/akka/cluster/routing/roundrobin/homenode/HomeNodeMultiJvmNode1.conf
deleted file mode 100644
index 85536cd656..0000000000
--- a/akka-cluster/src/multi-jvm/scala/akka/cluster/routing/roundrobin/homenode/HomeNodeMultiJvmNode1.conf
+++ /dev/null
@@ -1,8 +0,0 @@
-akka.enabled-modules = ["cluster"]
-akka.event-handler-level = "WARNING"
-akka.actor.deployment.service-node1.router = "round-robin"
-akka.actor.deployment.service-node1.cluster.preferred-nodes = ["node:node1"]
-akka.actor.deployment.service-node1.nr-of-instances = 1
-akka.actor.deployment.service-node2.router = "round-robin"
-akka.actor.deployment.service-node2.cluster.preferred-nodes = ["node:node2"]
-akka.actor.deployment.service-node2.nr-of-instances = 1
diff --git a/akka-cluster/src/multi-jvm/scala/akka/cluster/routing/roundrobin/homenode/HomeNodeMultiJvmNode1.opts b/akka-cluster/src/multi-jvm/scala/akka/cluster/routing/roundrobin/homenode/HomeNodeMultiJvmNode1.opts
deleted file mode 100644
index dc86c1c9c0..0000000000
--- a/akka-cluster/src/multi-jvm/scala/akka/cluster/routing/roundrobin/homenode/HomeNodeMultiJvmNode1.opts
+++ /dev/null
@@ -1 +0,0 @@
--Dakka.cluster.nodename=node1 -Dakka.remote.port=9991
diff --git a/akka-cluster/src/multi-jvm/scala/akka/cluster/routing/roundrobin/homenode/HomeNodeMultiJvmNode2.conf b/akka-cluster/src/multi-jvm/scala/akka/cluster/routing/roundrobin/homenode/HomeNodeMultiJvmNode2.conf
deleted file mode 100644
index 99c85fd1a8..0000000000
--- a/akka-cluster/src/multi-jvm/scala/akka/cluster/routing/roundrobin/homenode/HomeNodeMultiJvmNode2.conf
+++ /dev/null
@@ -1,5 +0,0 @@
-akka.enabled-modules = ["cluster"]
-akka.event-handler-level = "WARNING"
-akka.actor.deployment.service-hello.router = "round-robin"
-akka.actor.deployment.service-hello.cluster.preferred-nodes = ["node:node1"]
-akka.actor.deployment.service-hello.nr-of-instances = 1
diff --git a/akka-cluster/src/multi-jvm/scala/akka/cluster/routing/roundrobin/homenode/HomeNodeMultiJvmNode2.opts b/akka-cluster/src/multi-jvm/scala/akka/cluster/routing/roundrobin/homenode/HomeNodeMultiJvmNode2.opts
deleted file mode 100644
index bb140941a5..0000000000
--- a/akka-cluster/src/multi-jvm/scala/akka/cluster/routing/roundrobin/homenode/HomeNodeMultiJvmNode2.opts
+++ /dev/null
@@ -1 +0,0 @@
--Dakka.cluster.nodename=node2 -Dakka.remote.port=9992
diff --git a/akka-cluster/src/multi-jvm/scala/akka/cluster/routing/roundrobin/homenode/HomeNodeMultiJvmSpec.scala b/akka-cluster/src/multi-jvm/scala/akka/cluster/routing/roundrobin/homenode/HomeNodeMultiJvmSpec.scala
deleted file mode 100644
index 4dc9e96429..0000000000
--- a/akka-cluster/src/multi-jvm/scala/akka/cluster/routing/roundrobin/homenode/HomeNodeMultiJvmSpec.scala
+++ /dev/null
@@ -1,63 +0,0 @@
-package akka.cluster.routing.roundrobin.homenode
-
-import akka.config.Config
-import akka.actor.Actor
-import akka.cluster.{ ClusterTestNode, MasterClusterTestNode, Cluster }
-import Cluster._
-import akka.cluster.LocalCluster._
-
-object HomeNodeMultiJvmSpec {
-
- val NrOfNodes = 2
-
- class SomeActor extends Actor with Serializable {
- def receive = {
- case "identify" ⇒ {
- reply(Config.nodename)
- }
- }
- }
-
-}
-
-class HomeNodeMultiJvmNode1 extends MasterClusterTestNode {
-
- import HomeNodeMultiJvmSpec._
-
- val testNodes = NrOfNodes
-
- "___" must {
- "___" in {
-
- Cluster.node.start()
- barrier("waiting-for-begin", NrOfNodes).await()
- barrier("waiting-for-end", NrOfNodes).await()
-
- node.shutdown()
- }
- }
-}
-
-class HomeNodeMultiJvmNode2 extends ClusterTestNode {
-
- import HomeNodeMultiJvmSpec._
-
- "Round Robin: A Router" must {
- "obey 'home-node' config option when instantiated actor in cluster" in {
-
- Cluster.node.start()
- barrier("waiting-for-begin", NrOfNodes).await()
-
- val actorNode1 = Actor.actorOf(Props[SomeActor]("service-node1")
- val name1 = (actorNode1 ? "identify").get.asInstanceOf[String]
- name1 must equal("node1")
-
- val actorNode2 = Actor.actorOf(Props[SomeActor]("service-node2")
- val name2 = (actorNode2 ? "identify").get.asInstanceOf[String]
- name2 must equal("node2")
-
- barrier("waiting-for-end", NrOfNodes).await()
- node.shutdown()
- }
- }
-}
diff --git a/akka-cluster/src/multi-jvm/scala/akka/cluster/routing/roundrobin/replicationfactor_1/RoundRobin1ReplicaMultiJvmNode1.conf b/akka-cluster/src/multi-jvm/scala/akka/cluster/routing/roundrobin/replicationfactor_1/RoundRobin1ReplicaMultiJvmNode1.conf
deleted file mode 100644
index 88df1a6421..0000000000
--- a/akka-cluster/src/multi-jvm/scala/akka/cluster/routing/roundrobin/replicationfactor_1/RoundRobin1ReplicaMultiJvmNode1.conf
+++ /dev/null
@@ -1,4 +0,0 @@
-akka.enabled-modules = ["cluster"]
-akka.event-handler-level = "WARNING"
-akka.actor.deployment.service-hello.router = "round-robin"
-akka.actor.deployment.service-hello.nr-of-instances = 1
diff --git a/akka-cluster/src/multi-jvm/scala/akka/cluster/routing/roundrobin/replicationfactor_1/RoundRobin1ReplicaMultiJvmNode1.opts b/akka-cluster/src/multi-jvm/scala/akka/cluster/routing/roundrobin/replicationfactor_1/RoundRobin1ReplicaMultiJvmNode1.opts
deleted file mode 100644
index dc86c1c9c0..0000000000
--- a/akka-cluster/src/multi-jvm/scala/akka/cluster/routing/roundrobin/replicationfactor_1/RoundRobin1ReplicaMultiJvmNode1.opts
+++ /dev/null
@@ -1 +0,0 @@
--Dakka.cluster.nodename=node1 -Dakka.remote.port=9991
diff --git a/akka-cluster/src/multi-jvm/scala/akka/cluster/routing/roundrobin/replicationfactor_1/RoundRobin1ReplicaMultiJvmSpec.scala b/akka-cluster/src/multi-jvm/scala/akka/cluster/routing/roundrobin/replicationfactor_1/RoundRobin1ReplicaMultiJvmSpec.scala
deleted file mode 100644
index f8fd41b0cf..0000000000
--- a/akka-cluster/src/multi-jvm/scala/akka/cluster/routing/roundrobin/replicationfactor_1/RoundRobin1ReplicaMultiJvmSpec.scala
+++ /dev/null
@@ -1,49 +0,0 @@
-/**
- * Copyright (C) 2009-2012 Typesafe Inc.
- */
-
-package akka.cluster.routing.roundrobin.replicationfactor_1
-
-import akka.cluster._
-import Cluster._
-import akka.actor._
-import akka.config.Config
-import akka.cluster.LocalCluster._
-
-/**
- * Test that if a single node is used with a round robin router with replication factor then the actor is instantiated on the single node.
- */
-object RoundRobin1ReplicaMultiJvmSpec {
-
- class HelloWorld extends Actor with Serializable {
- def receive = {
- case "Hello" ⇒ reply("World from node [" + Config.nodename + "]")
- }
- }
-
-}
-
-class RoundRobin1ReplicaMultiJvmNode1 extends MasterClusterTestNode {
-
- import RoundRobin1ReplicaMultiJvmSpec._
-
- val testNodes = 1
-
- "Round Robin: A cluster" must {
-
- "create clustered actor, get a 'local' actor on 'home' node and a 'ref' to actor on remote node" in {
- Cluster.node.start()
-
- var hello = Actor.actorOf(Props[HelloWorld]("service-hello")
- hello must not equal (null)
- hello.address must equal("service-hello")
- hello.isInstanceOf[ClusterActorRef] must be(true)
-
- hello must not equal (null)
- val reply = (hello ? "Hello").as[String].getOrElse(fail("Should have recieved reply from node1"))
- reply must equal("World from node [node1]")
-
- node.shutdown()
- }
- }
-}
diff --git a/akka-cluster/src/multi-jvm/scala/akka/cluster/routing/roundrobin/replicationfactor_2/RoundRobin2ReplicasMultiJvmNode1.conf b/akka-cluster/src/multi-jvm/scala/akka/cluster/routing/roundrobin/replicationfactor_2/RoundRobin2ReplicasMultiJvmNode1.conf
deleted file mode 100644
index a763b66792..0000000000
--- a/akka-cluster/src/multi-jvm/scala/akka/cluster/routing/roundrobin/replicationfactor_2/RoundRobin2ReplicasMultiJvmNode1.conf
+++ /dev/null
@@ -1,4 +0,0 @@
-akka.enabled-modules = ["cluster"]
-akka.event-handler-level = "WARNING"
-akka.actor.deployment.service-hello.router = "round-robin"
-akka.actor.deployment.service-hello.nr-of-instances = 2
diff --git a/akka-cluster/src/multi-jvm/scala/akka/cluster/routing/roundrobin/replicationfactor_2/RoundRobin2ReplicasMultiJvmNode1.opts b/akka-cluster/src/multi-jvm/scala/akka/cluster/routing/roundrobin/replicationfactor_2/RoundRobin2ReplicasMultiJvmNode1.opts
deleted file mode 100644
index dc86c1c9c0..0000000000
--- a/akka-cluster/src/multi-jvm/scala/akka/cluster/routing/roundrobin/replicationfactor_2/RoundRobin2ReplicasMultiJvmNode1.opts
+++ /dev/null
@@ -1 +0,0 @@
--Dakka.cluster.nodename=node1 -Dakka.remote.port=9991
diff --git a/akka-cluster/src/multi-jvm/scala/akka/cluster/routing/roundrobin/replicationfactor_2/RoundRobin2ReplicasMultiJvmNode2.conf b/akka-cluster/src/multi-jvm/scala/akka/cluster/routing/roundrobin/replicationfactor_2/RoundRobin2ReplicasMultiJvmNode2.conf
deleted file mode 100644
index a763b66792..0000000000
--- a/akka-cluster/src/multi-jvm/scala/akka/cluster/routing/roundrobin/replicationfactor_2/RoundRobin2ReplicasMultiJvmNode2.conf
+++ /dev/null
@@ -1,4 +0,0 @@
-akka.enabled-modules = ["cluster"]
-akka.event-handler-level = "WARNING"
-akka.actor.deployment.service-hello.router = "round-robin"
-akka.actor.deployment.service-hello.nr-of-instances = 2
diff --git a/akka-cluster/src/multi-jvm/scala/akka/cluster/routing/roundrobin/replicationfactor_2/RoundRobin2ReplicasMultiJvmNode2.opts b/akka-cluster/src/multi-jvm/scala/akka/cluster/routing/roundrobin/replicationfactor_2/RoundRobin2ReplicasMultiJvmNode2.opts
deleted file mode 100644
index bb140941a5..0000000000
--- a/akka-cluster/src/multi-jvm/scala/akka/cluster/routing/roundrobin/replicationfactor_2/RoundRobin2ReplicasMultiJvmNode2.opts
+++ /dev/null
@@ -1 +0,0 @@
--Dakka.cluster.nodename=node2 -Dakka.remote.port=9992
diff --git a/akka-cluster/src/multi-jvm/scala/akka/cluster/routing/roundrobin/replicationfactor_2/RoundRobin2ReplicasMultiJvmSpec.scala b/akka-cluster/src/multi-jvm/scala/akka/cluster/routing/roundrobin/replicationfactor_2/RoundRobin2ReplicasMultiJvmSpec.scala
deleted file mode 100644
index b101a06f81..0000000000
--- a/akka-cluster/src/multi-jvm/scala/akka/cluster/routing/roundrobin/replicationfactor_2/RoundRobin2ReplicasMultiJvmSpec.scala
+++ /dev/null
@@ -1,121 +0,0 @@
-/**
- * Copyright (C) 2009-2012 Typesafe Inc.
- */
-
-package akka.cluster.routing.roundrobin.replicationfactor_2
-
-import org.scalatest.WordSpec
-import org.scalatest.matchers.MustMatchers
-import org.scalatest.BeforeAndAfterAll
-
-import akka.cluster._
-import Cluster._
-import akka.cluster.LocalCluster._
-import akka.actor._
-import akka.actor.Actor._
-import akka.config.Config
-import akka.util.duration._
-import akka.util.{ Duration, Timer }
-import akka.cluster.LocalCluster._
-
-import java.util.concurrent.atomic.AtomicInteger
-import java.util.concurrent.ConcurrentHashMap
-import akka.dispatch.Await
-
-/**
- * When a MultiJvmNode is started, will it automatically be part of the cluster (so will it automatically be eligible
- * for running actors, or will it be just a 'client' talking to the cluster.
- */
-object RoundRobin2ReplicasMultiJvmSpec {
- val NrOfNodes = 2
-
- class HelloWorld extends Actor with Serializable {
- def receive = {
- case "Hello" ⇒
- reply("World from node [" + Config.nodename + "]")
- }
- }
-}
-
-class RoundRobin2ReplicasMultiJvmNode1 extends MasterClusterTestNode {
- import RoundRobin2ReplicasMultiJvmSpec._
-
- val testNodes = NrOfNodes
-
- "Round Robin: A cluster" must {
-
- "create clustered actor, get a 'local' actor on 'home' node and a 'ref' to actor on remote node" in {
- System.getProperty("akka.cluster.nodename", "") must be("node1")
- System.getProperty("akka.remote.port", "") must be("9991")
-
- //wait till node 1 has started.
- barrier("start-node1", NrOfNodes) {
- Cluster.node.start()
- }
-
- //wait till ndoe 2 has started.
- barrier("start-node2", NrOfNodes).await()
-
- //wait till an actor reference on node 2 has become available.
- barrier("get-ref-to-actor-on-node2", NrOfNodes) {
- val timer = Timer(30.seconds, true)
- while (timer.isTicking && !node.isInUseOnNode("service-hello")) {}
- }
-
- //wait till the node 2 has send a message to the replica's.
- barrier("send-message-from-node2-to-replicas", NrOfNodes).await()
-
- node.shutdown()
- }
- }
-}
-
-class RoundRobin2ReplicasMultiJvmNode2 extends ClusterTestNode {
- import RoundRobin2ReplicasMultiJvmSpec._
-
- "Round Robin: A cluster" must {
-
- "create clustered actor, get a 'local' actor on 'home' node and a 'ref' to actor on remote node" in {
- System.getProperty("akka.cluster.nodename", "") must be("node2")
- System.getProperty("akka.remote.port", "") must be("9992")
-
- //wait till node 1 has started.
- barrier("start-node1", NrOfNodes).await()
-
- //wait till node 2 has started.
- barrier("start-node2", NrOfNodes) {
- Cluster.node.start()
- }
-
- //check if the actorRef is the expected remoteActorRef.
- var hello: ActorRef = null
- barrier("get-ref-to-actor-on-node2", NrOfNodes) {
- hello = Actor.actorOf(Props[HelloWorld]("service-hello")
- hello must not equal (null)
- hello.address must equal("service-hello")
- hello.isInstanceOf[ClusterActorRef] must be(true)
- }
-
- barrier("send-message-from-node2-to-replicas", NrOfNodes) {
- //todo: is there a reason to check for null again since it already has been done in the previous block.
- hello must not equal (null)
-
- val replies = new ConcurrentHashMap[String, AtomicInteger]()
- def count(reply: String) = {
- val counter = new AtomicInteger(0)
- Option(replies.putIfAbsent(reply, counter)).getOrElse(counter).incrementAndGet()
- }
-
- implicit val timeout = Timeout(Duration(20, "seconds"))
-
- for(i <- 1 to 8)
- count(Await.result((hello ? "Hello").mapTo[String], timeout.duration))
-
- replies.get("World from node [node1]").get must equal(4)
- replies.get("World from node [node2]").get must equal(4)
- }
-
- node.shutdown()
- }
- }
-}
diff --git a/akka-cluster/src/multi-jvm/scala/akka/cluster/routing/roundrobin/replicationfactor_3/RoundRobin3ReplicasMultiJvmNode1.conf b/akka-cluster/src/multi-jvm/scala/akka/cluster/routing/roundrobin/replicationfactor_3/RoundRobin3ReplicasMultiJvmNode1.conf
deleted file mode 100644
index 8592b46c85..0000000000
--- a/akka-cluster/src/multi-jvm/scala/akka/cluster/routing/roundrobin/replicationfactor_3/RoundRobin3ReplicasMultiJvmNode1.conf
+++ /dev/null
@@ -1,4 +0,0 @@
-akka.enabled-modules = ["cluster"]
-akka.event-handler-level = "WARNING"
-akka.actor.deployment.service-hello.router = "round-robin"
-akka.actor.deployment.service-hello.nr-of-instances = 3
diff --git a/akka-cluster/src/multi-jvm/scala/akka/cluster/routing/roundrobin/replicationfactor_3/RoundRobin3ReplicasMultiJvmNode1.opts b/akka-cluster/src/multi-jvm/scala/akka/cluster/routing/roundrobin/replicationfactor_3/RoundRobin3ReplicasMultiJvmNode1.opts
deleted file mode 100644
index dc86c1c9c0..0000000000
--- a/akka-cluster/src/multi-jvm/scala/akka/cluster/routing/roundrobin/replicationfactor_3/RoundRobin3ReplicasMultiJvmNode1.opts
+++ /dev/null
@@ -1 +0,0 @@
--Dakka.cluster.nodename=node1 -Dakka.remote.port=9991
diff --git a/akka-cluster/src/multi-jvm/scala/akka/cluster/routing/roundrobin/replicationfactor_3/RoundRobin3ReplicasMultiJvmNode2.conf b/akka-cluster/src/multi-jvm/scala/akka/cluster/routing/roundrobin/replicationfactor_3/RoundRobin3ReplicasMultiJvmNode2.conf
deleted file mode 100644
index 92bafcfe8b..0000000000
--- a/akka-cluster/src/multi-jvm/scala/akka/cluster/routing/roundrobin/replicationfactor_3/RoundRobin3ReplicasMultiJvmNode2.conf
+++ /dev/null
@@ -1,4 +0,0 @@
-akka.enabled-modules = ["cluster"]
-akka.event-handler-level = "WARNING"
-akka.actor.deployment.service-hello.router = "round-robin"
-akka.actor.deployment.service-hello.cluster.repliction-factor = 3
diff --git a/akka-cluster/src/multi-jvm/scala/akka/cluster/routing/roundrobin/replicationfactor_3/RoundRobin3ReplicasMultiJvmNode2.opts b/akka-cluster/src/multi-jvm/scala/akka/cluster/routing/roundrobin/replicationfactor_3/RoundRobin3ReplicasMultiJvmNode2.opts
deleted file mode 100644
index bb140941a5..0000000000
--- a/akka-cluster/src/multi-jvm/scala/akka/cluster/routing/roundrobin/replicationfactor_3/RoundRobin3ReplicasMultiJvmNode2.opts
+++ /dev/null
@@ -1 +0,0 @@
--Dakka.cluster.nodename=node2 -Dakka.remote.port=9992
diff --git a/akka-cluster/src/multi-jvm/scala/akka/cluster/routing/roundrobin/replicationfactor_3/RoundRobin3ReplicasMultiJvmNode3.conf b/akka-cluster/src/multi-jvm/scala/akka/cluster/routing/roundrobin/replicationfactor_3/RoundRobin3ReplicasMultiJvmNode3.conf
deleted file mode 100644
index 8592b46c85..0000000000
--- a/akka-cluster/src/multi-jvm/scala/akka/cluster/routing/roundrobin/replicationfactor_3/RoundRobin3ReplicasMultiJvmNode3.conf
+++ /dev/null
@@ -1,4 +0,0 @@
-akka.enabled-modules = ["cluster"]
-akka.event-handler-level = "WARNING"
-akka.actor.deployment.service-hello.router = "round-robin"
-akka.actor.deployment.service-hello.nr-of-instances = 3
diff --git a/akka-cluster/src/multi-jvm/scala/akka/cluster/routing/roundrobin/replicationfactor_3/RoundRobin3ReplicasMultiJvmNode3.opts b/akka-cluster/src/multi-jvm/scala/akka/cluster/routing/roundrobin/replicationfactor_3/RoundRobin3ReplicasMultiJvmNode3.opts
deleted file mode 100644
index 089e3b7776..0000000000
--- a/akka-cluster/src/multi-jvm/scala/akka/cluster/routing/roundrobin/replicationfactor_3/RoundRobin3ReplicasMultiJvmNode3.opts
+++ /dev/null
@@ -1 +0,0 @@
--Dakka.cluster.nodename=node3 -Dakka.remote.port=9993
diff --git a/akka-cluster/src/multi-jvm/scala/akka/cluster/routing/roundrobin/replicationfactor_3/RoundRobin3ReplicasMultiJvmSpec.scala b/akka-cluster/src/multi-jvm/scala/akka/cluster/routing/roundrobin/replicationfactor_3/RoundRobin3ReplicasMultiJvmSpec.scala
deleted file mode 100644
index f62b7d3e74..0000000000
--- a/akka-cluster/src/multi-jvm/scala/akka/cluster/routing/roundrobin/replicationfactor_3/RoundRobin3ReplicasMultiJvmSpec.scala
+++ /dev/null
@@ -1,158 +0,0 @@
-// /**
-// * Copyright (C) 2009-2012 Typesafe Inc.
-// */
-
-// package akka.cluster.routing.roundrobin.replicationfactor_3
-
-// import org.scalatest.WordSpec
-// import org.scalatest.matchers.MustMatchers
-// import org.scalatest.BeforeAndAfterAll
-
-// import akka.cluster._
-// import akka.actor._
-// import akka.actor.Actor._
-// import akka.util.duration._
-// import akka.util.{ Duration, Timer }
-// import akka.config.Config
-// import akka.cluster.LocalCluster._
-// import Cluster._
-
-// /**
-// * When a MultiJvmNode is started, will it automatically be part of the cluster (so will it automatically be eligible
-// * for running actors, or will it be just a 'client' talking to the cluster.
-// */
-// object RoundRobin3ReplicasMultiJvmSpec {
-// val NrOfNodes = 3
-
-// class HelloWorld extends Actor with Serializable {
-// def receive = {
-// case "Hello" ⇒
-// reply("World from node [" + Config.nodename + "]")
-// }
-// }
-// }
-
-// /**
-// * What is the purpose of this node? Is this just a node for the cluster to make use of?
-// */
-// class RoundRobin3ReplicasMultiJvmNode1 extends MasterClusterTestNode {
-// import RoundRobin3ReplicasMultiJvmSpec._
-
-// val testNodes = NrOfNodes
-
-// "Round Robin: A cluster" must {
-
-// "create clustered actor, get a 'local' actor on 'home' node and a 'ref' to actor on remote node" in {
-
-// //wait till node 1 has started.
-// barrier("start-node1", NrOfNodes) {
-// Cluster.node.boot()
-// }
-
-// //wait till ndoe 2 has started.
-// barrier("start-node2", NrOfNodes).await()
-
-// //wait till node 3 has started.
-// barrier("start-node3", NrOfNodes).await()
-
-// //wait till an actor reference on node 2 has become available.
-// barrier("get-ref-to-actor-on-node2", NrOfNodes) {
-// val timer = Timer(30.seconds, true)
-// while (timer.isTicking && !node.isInUseOnNode("service-hello")) {}
-// }
-
-// //wait till the node 2 has send a message to the replica's.
-// barrier("send-message-from-node2-to-replicas", NrOfNodes).await()
-
-// node.shutdown()
-// }
-// }
-// }
-
-// class RoundRobin3ReplicasMultiJvmNode2 extends ClusterTestNode {
-// import RoundRobin3ReplicasMultiJvmSpec._
-// import Cluster._
-
-// "Round Robin: A cluster" must {
-
-// "create clustered actor, get a 'local' actor on 'home' node and a 'ref' to actor on remote node" in {
-
-// //wait till node 1 has started.
-// barrier("start-node1", NrOfNodes).await()
-
-// //wait till node 2 has started.
-// barrier("start-node2", NrOfNodes) {
-// Cluster.node.start()
-// }
-
-// //wait till node 3 has started.
-// barrier("start-node3", NrOfNodes).await()
-
-// //check if the actorRef is the expected remoteActorRef.
-// var hello: ActorRef = null
-// barrier("get-ref-to-actor-on-node2", NrOfNodes) {
-// hello = Actor.actorOf(Props[HelloWorld]("service-hello")
-// hello must not equal (null)
-// hello.address must equal("service-hello")
-// hello.isInstanceOf[ClusterActorRef] must be(true)
-// }
-
-// barrier("send-message-from-node2-to-replicas", NrOfNodes) {
-// //todo: is there a reason to check for null again since it already has been done in the previous block.
-// hello must not equal (null)
-
-// val replies = collection.mutable.Map.empty[String, Int]
-// def count(reply: String) = {
-// if (replies.get(reply).isEmpty) replies.put(reply, 1)
-// else replies.put(reply, replies(reply) + 1)
-// }
-
-// count((hello ? "Hello").as[String].getOrElse(fail("Should have recieved reply from node1")))
-// count((hello ? "Hello").as[String].getOrElse(fail("Should have recieved reply from node2")))
-// count((hello ? "Hello").as[String].getOrElse(fail("Should have recieved reply from node3")))
-// count((hello ? "Hello").as[String].getOrElse(fail("Should have recieved reply from node1")))
-// count((hello ? "Hello").as[String].getOrElse(fail("Should have recieved reply from node2")))
-// count((hello ? "Hello").as[String].getOrElse(fail("Should have recieved reply from node3")))
-// count((hello ? "Hello").as[String].getOrElse(fail("Should have recieved reply from node1")))
-// count((hello ? "Hello").as[String].getOrElse(fail("Should have recieved reply from node2")))
-// count((hello ? "Hello").as[String].getOrElse(fail("Should have recieved reply from node3")))
-// count((hello ? "Hello").as[String].getOrElse(fail("Should have recieved reply from node1")))
-// count((hello ? "Hello").as[String].getOrElse(fail("Should have recieved reply from node2")))
-// count((hello ? "Hello").as[String].getOrElse(fail("Should have recieved reply from node3")))
-
-// replies("World from node [node1]") must equal(4)
-// replies("World from node [node2]") must equal(4)
-// replies("World from node [node3]") must equal(4)
-// }
-
-// node.shutdown()
-// }
-// }
-// }
-
-// class RoundRobin3ReplicasMultiJvmNode3 extends ClusterTestNode {
-// import RoundRobin3ReplicasMultiJvmSpec._
-// import Cluster._
-
-// "Round Robin: A cluster" must {
-
-// "create clustered actor, get a 'local' actor on 'home' node and a 'ref' to actor on remote node" in {
-// barrier("start-node1", NrOfNodes).await()
-
-// barrier("start-node2", NrOfNodes).await()
-
-// barrier("start-node3", NrOfNodes) {
-// Cluster.node.start()
-// }
-
-// barrier("get-ref-to-actor-on-node2", NrOfNodes) {
-// val timer = Timer(30.seconds, true)
-// while (timer.isTicking && !node.isInUseOnNode("service-hello")) {}
-// }
-
-// barrier("send-message-from-node2-to-replicas", NrOfNodes).await()
-
-// node.shutdown()
-// }
-// }
-// }
diff --git a/akka-cluster/src/multi-jvm/scala/akka/cluster/routing/scattergather/failover/ScatterGatherFailoverMultiJvmNode1.conf b/akka-cluster/src/multi-jvm/scala/akka/cluster/routing/scattergather/failover/ScatterGatherFailoverMultiJvmNode1.conf
deleted file mode 100644
index fd2babf3a9..0000000000
--- a/akka-cluster/src/multi-jvm/scala/akka/cluster/routing/scattergather/failover/ScatterGatherFailoverMultiJvmNode1.conf
+++ /dev/null
@@ -1,6 +0,0 @@
-akka.enabled-modules = ["cluster"]
-akka.event-handlers = ["akka.testkit.TestEventListener"]
-akka.event-handler-level = "WARNING"
-akka.actor.deployment.service-hello.router = "akka.routing.ScatterGatherFirstCompletedRouter"
-akka.actor.deployment.service-hello.nr-of-instances = 2
-akka.actor.timeout = 30
diff --git a/akka-cluster/src/multi-jvm/scala/akka/cluster/routing/scattergather/failover/ScatterGatherFailoverMultiJvmNode1.opts b/akka-cluster/src/multi-jvm/scala/akka/cluster/routing/scattergather/failover/ScatterGatherFailoverMultiJvmNode1.opts
deleted file mode 100644
index dc86c1c9c0..0000000000
--- a/akka-cluster/src/multi-jvm/scala/akka/cluster/routing/scattergather/failover/ScatterGatherFailoverMultiJvmNode1.opts
+++ /dev/null
@@ -1 +0,0 @@
--Dakka.cluster.nodename=node1 -Dakka.remote.port=9991
diff --git a/akka-cluster/src/multi-jvm/scala/akka/cluster/routing/scattergather/failover/ScatterGatherFailoverMultiJvmNode2.conf b/akka-cluster/src/multi-jvm/scala/akka/cluster/routing/scattergather/failover/ScatterGatherFailoverMultiJvmNode2.conf
deleted file mode 100644
index fd2babf3a9..0000000000
--- a/akka-cluster/src/multi-jvm/scala/akka/cluster/routing/scattergather/failover/ScatterGatherFailoverMultiJvmNode2.conf
+++ /dev/null
@@ -1,6 +0,0 @@
-akka.enabled-modules = ["cluster"]
-akka.event-handlers = ["akka.testkit.TestEventListener"]
-akka.event-handler-level = "WARNING"
-akka.actor.deployment.service-hello.router = "akka.routing.ScatterGatherFirstCompletedRouter"
-akka.actor.deployment.service-hello.nr-of-instances = 2
-akka.actor.timeout = 30
diff --git a/akka-cluster/src/multi-jvm/scala/akka/cluster/routing/scattergather/failover/ScatterGatherFailoverMultiJvmNode2.opts b/akka-cluster/src/multi-jvm/scala/akka/cluster/routing/scattergather/failover/ScatterGatherFailoverMultiJvmNode2.opts
deleted file mode 100644
index bb140941a5..0000000000
--- a/akka-cluster/src/multi-jvm/scala/akka/cluster/routing/scattergather/failover/ScatterGatherFailoverMultiJvmNode2.opts
+++ /dev/null
@@ -1 +0,0 @@
--Dakka.cluster.nodename=node2 -Dakka.remote.port=9992
diff --git a/akka-cluster/src/multi-jvm/scala/akka/cluster/routing/scattergather/failover/ScatterGatherFailoverMultiJvmSpec.scala b/akka-cluster/src/multi-jvm/scala/akka/cluster/routing/scattergather/failover/ScatterGatherFailoverMultiJvmSpec.scala
deleted file mode 100644
index e8cc4f7d68..0000000000
--- a/akka-cluster/src/multi-jvm/scala/akka/cluster/routing/scattergather/failover/ScatterGatherFailoverMultiJvmSpec.scala
+++ /dev/null
@@ -1,114 +0,0 @@
-package akka.cluster.routing.scattergather.failover
-
-import akka.config.Config
-import akka.cluster._
-import akka.actor.{ ActorRef, Actor }
-import akka.event.EventHandler
-import akka.testkit.{ EventFilter, TestEvent }
-import java.util.{ Collections, Set ⇒ JSet }
-import java.net.ConnectException
-import java.nio.channels.NotYetConnectedException
-import java.lang.Thread
-import akka.routing.Routing.Broadcast
-import akka.cluster.LocalCluster._
-import akka.dispatch.Await
-
-object ScatterGatherFailoverMultiJvmSpec {
-
- val NrOfNodes = 2
-
- case class Shutdown(node: Option[String] = None)
- case class Sleep(node: String)
-
- class TestActor extends Actor with Serializable {
-
- def shutdownNode = new Thread() {
- override def run() {
- Thread.sleep(2000)
- Cluster.node.shutdown()
- }
- }
-
- def receive = {
- case Shutdown(None) ⇒ shutdownNode
- case Sleep(node) if node.equals(Config.nodename) ⇒
- Thread sleep 100
- reply(Config.nodename)
- case Shutdown(Some(node)) if node.equals(Config.nodename) ⇒ shutdownNode
- case _ ⇒
- Thread sleep 100
- reply(Config.nodename)
- }
- }
-
-}
-
-class ScatterGatherFailoverMultiJvmNode1 extends MasterClusterTestNode {
-
- import ScatterGatherFailoverMultiJvmSpec._
-
- def testNodes = NrOfNodes
-
- "When the message is sent with ?, and all connections are up, router" must {
- "return the first came reponse" ignore {
- val ignoreExceptions = Seq(
- EventFilter[NotYetConnectedException],
- EventFilter[ConnectException],
- EventFilter[ClusterException])
-
- EventHandler.notify(TestEvent.Mute(ignoreExceptions))
-
- Cluster.node.start()
- LocalCluster.barrier("waiting-for-begin", NrOfNodes).await()
-
- /*
- FIXME: Uncomment, when custom routers will be fully supported (ticket #1109)
-
- val actor = Actor.actorOf(Props[TestActor]("service-hello").asInstanceOf[ClusterActorRef]
-
- identifyConnections(actor).size() must be(2)
-
- // since node1 is falling asleep, response from node2 is gathered
- (actor ? Broadcast(Sleep("node1"))).get.asInstanceOf[String] must be("node2")
-
- Thread sleep 100
-
- // since node2 shuts down during processing the message, response from node1 is gathered
- (actor ? Broadcast(Shutdown(Some("node2")))).get.asInstanceOf[String] must be("node1")
-
- */
- LocalCluster.barrier("waiting-for-end", NrOfNodes).await()
- Cluster.node.shutdown()
- }
- }
-
- def identifyConnections(actor: ActorRef): JSet[String] = {
- val set = new java.util.HashSet[String]
- for (i ← 0 until NrOfNodes * 2) {
- val value = Await.result(actor ? "foo", timeout.duration).asInstanceOf[String]
- set.add(value)
- }
- set
- }
-}
-
-class ScatterGatherFailoverMultiJvmNode2 extends ClusterTestNode {
-
- import ScatterGatherFailoverMultiJvmSpec._
-
- "___" must {
- "___" ignore {
-
- Cluster.node.start()
- LocalCluster.barrier("waiting-for-begin", NrOfNodes).await()
-
- /*
- FIXME: Uncomment, when custom routers will be fully supported (ticket #1109)
- Thread.sleep(30 *1000)
- */
-
- LocalCluster.barrier("waiting-for-end", NrOfNodes).await()
- Cluster.node.shutdown()
- }
- }
-}
diff --git a/akka-cluster/src/multi-jvm/scala/akka/cluster/sample/PingPongMultiJvmExample.scala b/akka-cluster/src/multi-jvm/scala/akka/cluster/sample/PingPongMultiJvmExample.scala
deleted file mode 100644
index c7e9aceaf1..0000000000
--- a/akka-cluster/src/multi-jvm/scala/akka/cluster/sample/PingPongMultiJvmExample.scala
+++ /dev/null
@@ -1,227 +0,0 @@
-/**
- * Copyright (C) 2009-2012 Typesafe Inc.
- */
-
-package akka.cluster.sample
-
-import akka.cluster._
-
-import akka.actor._
-import akka.util.duration._
-
-object PingPongMultiJvmExample {
- val PING_ADDRESS = "ping"
- val PONG_ADDRESS = "pong"
-
- val ClusterName = "ping-pong-cluster"
- val NrOfNodes = 5
- val Pause = true
- val PauseTimeout = 5 minutes
-
- // -----------------------------------------------
- // Messages
- // -----------------------------------------------
-
- sealed trait PingPong extends Serializable
- case object Ping extends PingPong
- case object Pong extends PingPong
- case object Stop extends PingPong
-
- case class Serve(player: ActorRef)
-
- // -----------------------------------------------
- // Actors
- // -----------------------------------------------
-
- class PingActor extends Actor with Serializable {
- var pong: ActorRef = _
- var play = true
-
- def receive = {
- case Pong ⇒
- if (play) {
- println("---->> PING")
- pong ! Ping
- } else {
- println("---->> GAME OVER")
- }
- case Serve(player) ⇒
- pong = player
- println("---->> SERVE")
- pong ! Ping
- case Stop ⇒
- play = false
- }
- }
-
- class PongActor extends Actor with Serializable {
- def receive = {
- case Ping ⇒
- println("---->> PONG")
- reply(Pong)
- }
- }
-}
-
-/*
-object PingPongMultiJvmNode1 {
- import PingPong._
- import BinaryFormats._
-
- val PingService = classOf[PingActor].getName
- val PongService = classOf[PongActor].getName
-
- def main(args: Array[String]) { run }
-
- def run = {
- // -----------------------------------------------
- // Start monitoring
- // -----------------------------------------------
-
- //MonitoringServer.start
- //Monitoring.startLocalDaemons
-
- // -----------------------------------------------
- // Start cluster
- // -----------------------------------------------
-
- Cluster.startLocalCluster()
-
- // create node
- val node = Cluster.newNode(NodeAddress(ClusterName, "node1", port = 9991))
-
- def pause(name: String, message: String) = {
- node.barrier("user-prompt-" + name, NrOfNodes, PauseTimeout) {
- println(message)
- if (Pause) {
- println("Press enter to continue (timeout of %s) ..." format PauseTimeout)
- System.in.read
- }
- }
- }
-
- pause("start", "Ready to start all nodes")
- println("Starting nodes ...")
-
- Cluster.node.start()
-
- node.barrier("start", NrOfNodes) {
- // wait for others to start
- }
-
- // -----------------------------------------------
- // Store pong actors in the cluster
- // -----------------------------------------------
-
- pause("create", "Ready to create all actors")
- println("Creating actors ...")
-
- // store the ping actor in the cluster, but do not deploy it anywhere
- node.store(classOf[PingActor], PING_ADDRESS)
-
- // store the pong actor in the cluster and replicate it on all nodes
- node.store(classOf[PongActor], PONG_ADDRESS, NrOfNodes)
-
- // give some time for the deployment
- Thread.sleep(3000)
-
- // -----------------------------------------------
- // Get actor references
- // -----------------------------------------------
-
- // check out a local ping actor
- val ping = node.use[PingActor](PING_ADDRESS).head
-
- // get a reference to all the pong actors through a round-robin router actor ref
- val pong = node.ref(PONG_ADDRESS, router = Router.RoundRobin)
-
- // -----------------------------------------------
- // Play the game
- // -----------------------------------------------
-
- pause("play", "Ready to play ping pong")
-
- ping ! Serve(pong)
-
- // let them play for 3 seconds
- Thread.sleep(3000)
-
- ping ! Stop
-
- // give some time for the game to finish
- Thread.sleep(3000)
-
- // -----------------------------------------------
- // Stop actors
- // -----------------------------------------------
-
- pause("stop", "Ready to stop actors")
- println("Stopping actors ...")
-
- ping.stop
- pong.stop
-
- // give remote actors time to stop
- Thread.sleep(5000)
-
- // -----------------------------------------------
- // Stop everything
- // -----------------------------------------------
-
- pause("shutdown", "Ready to shutdown")
- println("Stopping everything ...")
-
- //Monitoring.stopLocalDaemons
- //MonitoringServer.stop
-
- Actor.remote.shutdown
- Actor.registry.local.shutdownAll
-
- node.stop
-
- Cluster.shutdownLocalCluster
- }
-}
-
-object PingPongMultiJvmNode2 extends PongNode(2)
-object PingPongMultiJvmNode3 extends PongNode(3)
-object PingPongMultiJvmNode4 extends PongNode(4)
-object PingPongMultiJvmNode5 extends PongNode(5)
-
-class PongNode(number: Int) {
- import PingPong._
-
- def main(args: Array[String]) { run }
-
- def run = {
- val node = Cluster.newNode(NodeAddress(ClusterName, "node" + number, port = 9990 + number))
-
- def pause(name: String) = {
- node.barrier("user-prompt-" + name, NrOfNodes, PauseTimeout) {
- // wait for user prompt
- }
- }
-
- pause("start")
-
- node.barrier("start", NrOfNodes) {
- Cluster.node.start()
- }
-
- pause("create")
-
- pause("play")
-
- pause("stop")
-
- pause("shutdown")
-
- // clean up and stop
-
- Actor.remote.shutdown
- Actor.registry.local.shutdownAll
-
- node.stop
- }
-}
-*/
diff --git a/akka-remote/src/test/scala/akka/remote/AccrualFailureDetectorSpec.scala b/akka-cluster/src/test/scala/akka/cluster/AccrualFailureDetectorSpec.scala
similarity index 99%
rename from akka-remote/src/test/scala/akka/remote/AccrualFailureDetectorSpec.scala
rename to akka-cluster/src/test/scala/akka/cluster/AccrualFailureDetectorSpec.scala
index cffc424408..d02199f703 100644
--- a/akka-remote/src/test/scala/akka/remote/AccrualFailureDetectorSpec.scala
+++ b/akka-cluster/src/test/scala/akka/cluster/AccrualFailureDetectorSpec.scala
@@ -1,4 +1,4 @@
-package akka.remote
+package akka.cluster
import java.net.InetSocketAddress
import akka.testkit.AkkaSpec
diff --git a/akka-cluster/src/test/scala/akka/cluster/AsynchronousTransactionLogSpec.scala b/akka-cluster/src/test/scala/akka/cluster/AsynchronousTransactionLogSpec.scala
deleted file mode 100644
index 0d26befc4e..0000000000
--- a/akka-cluster/src/test/scala/akka/cluster/AsynchronousTransactionLogSpec.scala
+++ /dev/null
@@ -1,230 +0,0 @@
-/**
- * Copyright (C) 2009-2012 Typesafe Inc.
- */
-package akka.cluster
-
-import org.apache.bookkeeper.client.BookKeeper
-import org.scalatest.WordSpec
-import org.scalatest.matchers.MustMatchers
-import org.scalatest.BeforeAndAfterAll
-
-import akka.actor._
-import akka.event.EventHandler
-import akka.testkit.{ EventFilter, TestEvent }
-
-import com.eaio.uuid.UUID
-
-class AsynchronousTransactionLogSpec extends WordSpec with MustMatchers with BeforeAndAfterAll {
- private var bookKeeper: BookKeeper = _
- private var localBookKeeper: LocalBookKeeper = _
-
- "An asynchronous Transaction Log" should {
- "be able to record entries - asynchronous" in {
- val uuid = (new UUID).toString
- val txlog = TransactionLog.newLogFor(uuid, true, null)
- val entry = "hello".getBytes("UTF-8")
- txlog.recordEntry(entry)
- Thread.sleep(200)
- txlog.close
- }
-
- "be able to be deleted - asynchronous" in {
- val uuid = (new UUID).toString
- val txlog = TransactionLog.newLogFor(uuid, true, null)
- val entry = "hello".getBytes("UTF-8")
- txlog.recordEntry(entry)
-
- txlog.delete()
- txlog.close()
-
- val zkClient = TransactionLog.zkClient
- assert(zkClient.readData(txlog.snapshotPath, true) == null)
- assert(zkClient.readData(txlog.txLogPath, true) == null)
- }
-
- "be able to be checked for existence - asynchronous" in {
- val uuid = (new UUID).toString
- TransactionLog.exists(uuid) must be(false)
-
- TransactionLog.newLogFor(uuid, true, null)
- TransactionLog.exists(uuid) must be(true)
- }
-
- "fail to be opened if non existing - asynchronous" in {
- EventHandler.notify(TestEvent.Mute(EventFilter[ReplicationException]))
- val uuid = (new UUID).toString
- intercept[ReplicationException](TransactionLog.logFor(uuid, true, null))
- EventHandler.notify(TestEvent.UnMuteAll)
- }
-
- "be able to overweite an existing txlog if one already exists - asynchronous" in {
- val uuid = (new UUID).toString
- val txlog1 = TransactionLog.newLogFor(uuid, true, null)
- val entry = "hello".getBytes("UTF-8")
- txlog1.recordEntry(entry)
- txlog1.recordEntry(entry)
- txlog1.close
-
- val txLog2 = TransactionLog.newLogFor(uuid, true, null)
- txLog2.latestSnapshotId.isDefined must be(false)
- txLog2.latestEntryId must be(-1)
- }
-
- "be able to record and delete entries - asynchronous" in {
- EventHandler.notify(TestEvent.Mute(EventFilter[ReplicationException]))
- val uuid = (new UUID).toString
- val txlog1 = TransactionLog.newLogFor(uuid, true, null)
- Thread.sleep(200)
- val entry = "hello".getBytes("UTF-8")
- txlog1.recordEntry(entry)
- Thread.sleep(200)
- txlog1.recordEntry(entry)
- Thread.sleep(200)
- txlog1.delete
- Thread.sleep(200)
- intercept[ReplicationException](TransactionLog.logFor(uuid, true, null))
- EventHandler.notify(TestEvent.UnMuteAll)
- }
-
- "be able to record entries and read entries with 'entriesInRange' - asynchronous" in {
- val uuid = (new UUID).toString
- val txlog1 = TransactionLog.newLogFor(uuid, true, null)
- Thread.sleep(200)
- val entry = "hello".getBytes("UTF-8")
- txlog1.recordEntry(entry)
- Thread.sleep(200)
- txlog1.recordEntry(entry)
- Thread.sleep(200)
- txlog1.close
-
- val txlog2 = TransactionLog.logFor(uuid, true, null)
- Thread.sleep(200)
- val entries = txlog2.entriesInRange(0, 1).map(bytes ⇒ new String(bytes, "UTF-8"))
- Thread.sleep(200)
- entries.size must equal(2)
- entries(0) must equal("hello")
- entries(1) must equal("hello")
- Thread.sleep(200)
- txlog2.close
- }
-
- "be able to record entries and read entries with 'entries' - asynchronous" in {
- val uuid = (new UUID).toString
- val txlog1 = TransactionLog.newLogFor(uuid, true, null)
- Thread.sleep(200)
- val entry = "hello".getBytes("UTF-8")
- txlog1.recordEntry(entry)
- Thread.sleep(200)
- txlog1.recordEntry(entry)
- Thread.sleep(200)
- txlog1.recordEntry(entry)
- Thread.sleep(200)
- txlog1.recordEntry(entry)
- Thread.sleep(200)
- txlog1.close
-
- val txlog2 = TransactionLog.logFor(uuid, true, null)
- val entries = txlog2.entries.map(bytes ⇒ new String(bytes, "UTF-8"))
- Thread.sleep(200)
- entries.size must equal(4)
- entries(0) must equal("hello")
- entries(1) must equal("hello")
- entries(2) must equal("hello")
- entries(3) must equal("hello")
- Thread.sleep(200)
- txlog2.close
- }
-
- "be able to record a snapshot - asynchronous" in {
- val uuid = (new UUID).toString
- val txlog1 = TransactionLog.newLogFor(uuid, true, null)
- Thread.sleep(200)
- val snapshot = "snapshot".getBytes("UTF-8")
- txlog1.recordSnapshot(snapshot)
- Thread.sleep(200)
- txlog1.close
- }
-
- "be able to record and read a snapshot and following entries - asynchronous" in {
- val uuid = (new UUID).toString
- val txlog1 = TransactionLog.newLogFor(uuid, true, null)
- Thread.sleep(200)
- val snapshot = "snapshot".getBytes("UTF-8")
- txlog1.recordSnapshot(snapshot)
- Thread.sleep(200)
-
- val entry = "hello".getBytes("UTF-8")
- txlog1.recordEntry(entry)
- Thread.sleep(200)
- txlog1.recordEntry(entry)
- Thread.sleep(200)
- txlog1.recordEntry(entry)
- Thread.sleep(200)
- txlog1.recordEntry(entry)
- Thread.sleep(200)
- txlog1.close
-
- val txlog2 = TransactionLog.logFor(uuid, true, null)
- Thread.sleep(200)
- val (snapshotAsBytes, entriesAsBytes) = txlog2.latestSnapshotAndSubsequentEntries
- Thread.sleep(200)
- new String(snapshotAsBytes.getOrElse(fail("No snapshot")), "UTF-8") must equal("snapshot")
-
- val entries = entriesAsBytes.map(bytes ⇒ new String(bytes, "UTF-8"))
- Thread.sleep(200)
- entries.size must equal(4)
- entries(0) must equal("hello")
- entries(1) must equal("hello")
- entries(2) must equal("hello")
- entries(3) must equal("hello")
- Thread.sleep(200)
- txlog2.close
- }
-
- "be able to record entries then a snapshot then more entries - and then read from the snapshot and the following entries - asynchronous" in {
- val uuid = (new UUID).toString
- val txlog1 = TransactionLog.newLogFor(uuid, true, null)
- Thread.sleep(200)
-
- val entry = "hello".getBytes("UTF-8")
- txlog1.recordEntry(entry)
- Thread.sleep(200)
- txlog1.recordEntry(entry)
- Thread.sleep(200)
- txlog1.recordEntry(entry)
- Thread.sleep(200)
-
- val snapshot = "snapshot".getBytes("UTF-8")
- txlog1.recordSnapshot(snapshot)
- Thread.sleep(200)
- txlog1.recordEntry(entry)
- Thread.sleep(200)
- txlog1.recordEntry(entry)
- Thread.sleep(200)
- txlog1.close
-
- val txlog2 = TransactionLog.logFor(uuid, true, null)
- Thread.sleep(200)
- val (snapshotAsBytes, entriesAsBytes) = txlog2.latestSnapshotAndSubsequentEntries
- Thread.sleep(200)
- new String(snapshotAsBytes.getOrElse(fail("No snapshot")), "UTF-8") must equal("snapshot")
- val entries = entriesAsBytes.map(bytes ⇒ new String(bytes, "UTF-8"))
- Thread.sleep(200)
- entries.size must equal(2)
- entries(0) must equal("hello")
- entries(1) must equal("hello")
- Thread.sleep(200)
- txlog2.close
- }
- }
-
- override def beforeAll() = {
- LocalBookKeeperEnsemble.start()
- TransactionLog.start()
- }
-
- override def afterAll() = {
- TransactionLog.shutdown()
- LocalBookKeeperEnsemble.shutdown()
- }
-}
diff --git a/akka-remote/src/test/scala/akka/remote/GossipingAccrualFailureDetectorSpec.scala b/akka-cluster/src/test/scala/akka/cluster/GossipingAccrualFailureDetectorSpec.scala
similarity index 99%
rename from akka-remote/src/test/scala/akka/remote/GossipingAccrualFailureDetectorSpec.scala
rename to akka-cluster/src/test/scala/akka/cluster/GossipingAccrualFailureDetectorSpec.scala
index 1e954b34fb..6366a9f65e 100644
--- a/akka-remote/src/test/scala/akka/remote/GossipingAccrualFailureDetectorSpec.scala
+++ b/akka-cluster/src/test/scala/akka/cluster/GossipingAccrualFailureDetectorSpec.scala
@@ -1,7 +1,7 @@
// /**
// * Copyright (C) 2009-2011 Typesafe Inc.
// */
-// package akka.remote
+// package akka.cluster
// import java.net.InetSocketAddress
diff --git a/akka-cluster/src/test/scala/akka/cluster/SynchronousTransactionLogSpec.scala b/akka-cluster/src/test/scala/akka/cluster/SynchronousTransactionLogSpec.scala
deleted file mode 100644
index 3dc58d6c9a..0000000000
--- a/akka-cluster/src/test/scala/akka/cluster/SynchronousTransactionLogSpec.scala
+++ /dev/null
@@ -1,190 +0,0 @@
-/**
- * Copyright (C) 2009-2012 Typesafe Inc.
- */
-package akka.cluster
-
-import org.apache.bookkeeper.client.BookKeeper
-import org.scalatest.WordSpec
-import org.scalatest.matchers.MustMatchers
-import org.scalatest.BeforeAndAfterAll
-
-import akka.actor._
-import akka.event.EventHandler
-import akka.testkit.{ EventFilter, TestEvent }
-
-import com.eaio.uuid.UUID
-
-class SynchronousTransactionLogSpec extends WordSpec with MustMatchers with BeforeAndAfterAll {
- private var bookKeeper: BookKeeper = _
- private var localBookKeeper: LocalBookKeeper = _
-
- "A synchronous used Transaction Log" should {
-
- "be able to be deleted - synchronous" in {
- val uuid = (new UUID).toString
- val txlog = TransactionLog.newLogFor(uuid, false, null)
- val entry = "hello".getBytes("UTF-8")
- txlog.recordEntry(entry)
-
- txlog.delete()
- txlog.close()
-
- val zkClient = TransactionLog.zkClient
- assert(zkClient.readData(txlog.snapshotPath, true) == null)
- assert(zkClient.readData(txlog.txLogPath, true) == null)
- }
-
- "fail to be opened if non existing - synchronous" in {
- EventHandler.notify(TestEvent.Mute(EventFilter[ReplicationException]))
- val uuid = (new UUID).toString
- intercept[ReplicationException](TransactionLog.logFor(uuid, false, null))
- EventHandler.notify(TestEvent.UnMuteAll)
- }
-
- "be able to be checked for existence - synchronous" in {
- val uuid = (new UUID).toString
- TransactionLog.exists(uuid) must be(false)
-
- TransactionLog.newLogFor(uuid, false, null)
- TransactionLog.exists(uuid) must be(true)
- }
-
- "be able to record entries - synchronous" in {
- val uuid = (new UUID).toString
- val txlog = TransactionLog.newLogFor(uuid, false, null)
- val entry = "hello".getBytes("UTF-8")
- txlog.recordEntry(entry)
- }
-
- "be able to overweite an existing txlog if one already exists - synchronous" in {
- val uuid = (new UUID).toString
- val txlog1 = TransactionLog.newLogFor(uuid, false, null)
- val entry = "hello".getBytes("UTF-8")
- txlog1.recordEntry(entry)
- txlog1.recordEntry(entry)
- txlog1.close
-
- val txLog2 = TransactionLog.newLogFor(uuid, false, null)
- txLog2.latestSnapshotId.isDefined must be(false)
- txLog2.latestEntryId must be(-1)
- }
-
- "be able to record and delete entries - synchronous" in {
- val uuid = (new UUID).toString
- val txlog1 = TransactionLog.newLogFor(uuid, false, null)
- val entry = "hello".getBytes("UTF-8")
- txlog1.recordEntry(entry)
- txlog1.recordEntry(entry)
- txlog1.delete
- txlog1.close
- // intercept[ReplicationException](TransactionLog.logFor(uuid, false, null))
- }
-
- "be able to record entries and read entries with 'entriesInRange' - synchronous" in {
- val uuid = (new UUID).toString
- val txlog1 = TransactionLog.newLogFor(uuid, false, null)
- val entry = "hello".getBytes("UTF-8")
- txlog1.recordEntry(entry)
- txlog1.recordEntry(entry)
- txlog1.close
-
- val txlog2 = TransactionLog.logFor(uuid, false, null)
- val entries = txlog2.entriesInRange(0, 1).map(bytes ⇒ new String(bytes, "UTF-8"))
- entries.size must equal(2)
- entries(0) must equal("hello")
- entries(1) must equal("hello")
- txlog2.close
- }
-
- "be able to record entries and read entries with 'entries' - synchronous" in {
- val uuid = (new UUID).toString
- val txlog1 = TransactionLog.newLogFor(uuid, false, null)
- val entry = "hello".getBytes("UTF-8")
- txlog1.recordEntry(entry)
- txlog1.recordEntry(entry)
- txlog1.recordEntry(entry)
- txlog1.recordEntry(entry)
- txlog1.close // should work without txlog.close
-
- val txlog2 = TransactionLog.logFor(uuid, false, null)
- val entries = txlog2.entries.map(bytes ⇒ new String(bytes, "UTF-8"))
- entries.size must equal(4)
- entries(0) must equal("hello")
- entries(1) must equal("hello")
- entries(2) must equal("hello")
- entries(3) must equal("hello")
- txlog2.close
- }
-
- "be able to record a snapshot - synchronous" in {
- val uuid = (new UUID).toString
- val txlog1 = TransactionLog.newLogFor(uuid, false, null)
- val snapshot = "snapshot".getBytes("UTF-8")
- txlog1.recordSnapshot(snapshot)
- txlog1.close
- }
-
- "be able to record and read a snapshot and following entries - synchronous" in {
- val uuid = (new UUID).toString
- val txlog1 = TransactionLog.newLogFor(uuid, false, null)
- val snapshot = "snapshot".getBytes("UTF-8")
- txlog1.recordSnapshot(snapshot)
-
- val entry = "hello".getBytes("UTF-8")
- txlog1.recordEntry(entry)
- txlog1.recordEntry(entry)
- txlog1.recordEntry(entry)
- txlog1.recordEntry(entry)
- txlog1.close
-
- val txlog2 = TransactionLog.logFor(uuid, false, null)
- val (snapshotAsBytes, entriesAsBytes) = txlog2.latestSnapshotAndSubsequentEntries
- new String(snapshotAsBytes.getOrElse(fail("No snapshot")), "UTF-8") must equal("snapshot")
-
- val entries = entriesAsBytes.map(bytes ⇒ new String(bytes, "UTF-8"))
- entries.size must equal(4)
- entries(0) must equal("hello")
- entries(1) must equal("hello")
- entries(2) must equal("hello")
- entries(3) must equal("hello")
- txlog2.close
- }
-
- "be able to record entries then a snapshot then more entries - and then read from the snapshot and the following entries - synchronous" in {
- val uuid = (new UUID).toString
- val txlog1 = TransactionLog.newLogFor(uuid, false, null)
-
- val entry = "hello".getBytes("UTF-8")
- txlog1.recordEntry(entry)
- txlog1.recordEntry(entry)
- txlog1.recordEntry(entry)
-
- val snapshot = "snapshot".getBytes("UTF-8")
- txlog1.recordSnapshot(snapshot)
-
- txlog1.recordEntry(entry)
- txlog1.recordEntry(entry)
- txlog1.close
-
- val txlog2 = TransactionLog.logFor(uuid, false, null)
- val (snapshotAsBytes, entriesAsBytes) = txlog2.latestSnapshotAndSubsequentEntries
- new String(snapshotAsBytes.getOrElse(fail("No snapshot")), "UTF-8") must equal("snapshot")
-
- val entries = entriesAsBytes.map(bytes ⇒ new String(bytes, "UTF-8"))
- entries.size must equal(2)
- entries(0) must equal("hello")
- entries(1) must equal("hello")
- txlog2.close
- }
- }
-
- override def beforeAll() = {
- LocalBookKeeperEnsemble.start()
- TransactionLog.start()
- }
-
- override def afterAll() = {
- TransactionLog.shutdown()
- LocalBookKeeperEnsemble.shutdown()
- }
-}
diff --git a/akka-remote/src/test/scala/akka/remote/VectorClockSpec.scala b/akka-cluster/src/test/scala/akka/cluster/VectorClockSpec.scala
similarity index 99%
rename from akka-remote/src/test/scala/akka/remote/VectorClockSpec.scala
rename to akka-cluster/src/test/scala/akka/cluster/VectorClockSpec.scala
index 03e4109423..df9cead7f8 100644
--- a/akka-remote/src/test/scala/akka/remote/VectorClockSpec.scala
+++ b/akka-cluster/src/test/scala/akka/cluster/VectorClockSpec.scala
@@ -1,4 +1,4 @@
-package akka.remote
+package akka.cluster
import java.net.InetSocketAddress
import akka.testkit.AkkaSpec
diff --git a/akka-cluster/src/test/scala/akka/cluster/sample/ClusteredPingPongSample.scala b/akka-cluster/src/test/scala/akka/cluster/sample/ClusteredPingPongSample.scala
deleted file mode 100644
index c242185450..0000000000
--- a/akka-cluster/src/test/scala/akka/cluster/sample/ClusteredPingPongSample.scala
+++ /dev/null
@@ -1,134 +0,0 @@
-/**
- * Copyright (C) 2009-2012 Typesafe Inc.
- */
-
-package akka.cluster.sample
-
-import akka.cluster._
-
-import akka.actor._
-import akka.actor.Actor._
-
-import java.util.concurrent.CountDownLatch
-
-object PingPong {
- val PING_ADDRESS = "ping"
- val PONG_ADDRESS = "pong"
-
- val NrOfPings = 5
-
- // ------------------------
- // Messages
- // ------------------------
-
- sealed trait PingPong extends Serializable
- case object Ball extends PingPong
- case object Stop extends PingPong
- case class Latch(latch: CountDownLatch) extends PingPong
-
- // ------------------------
- // Actors
- // ------------------------
-
- class PingActor extends Actor with Serializable {
- var count = 0
- var gameOverLatch: CountDownLatch = _
-
- def receive = {
- case Ball ⇒
- if (count < NrOfPings) {
- println("---->> PING (%s)" format count)
- count += 1
- reply(Ball)
- } else {
- sender.foreach(s ⇒ (s ? Stop).await)
- gameOverLatch.countDown
- self.stop
- }
- case Latch(latch) ⇒
- gameOverLatch = latch
- }
- }
-
- class PongActor extends Actor with Serializable {
- def receive = {
- case Ball ⇒
- reply(Ball)
- case Stop ⇒
- reply(Stop)
- self.stop
- }
- }
-}
-
-/*
-object ClusteredPingPongSample {
- import PingPong._
- import BinaryFormats._
-
- val CLUSTER_NAME = "test-cluster"
-
- def main(args: Array[String]) = run
-
- def run = {
-
- // ------------------------
- // Start cluster of 5 nodes
- // ------------------------
-
- Cluster.startLocalCluster()
- val localNode = Cluster.newNode(NodeAddress(CLUSTER_NAME, "node0", port = 9991)).start
- val remoteNodes = Cluster.newNode(NodeAddress(CLUSTER_NAME, "node1", port = 9992)).start ::
- Cluster.newNode(NodeAddress(CLUSTER_NAME, "node2", port = 9993)).start ::
- Cluster.newNode(NodeAddress(CLUSTER_NAME, "node3", port = 9994)).start ::
- Cluster.newNode(NodeAddress(CLUSTER_NAME, "node4", port = 9995)).start :: Nil
-
- // ------------------------
- // Store the actors in the cluster
- // ------------------------
-
- // Store the PingActor in the cluster, but do not deploy it anywhere
- localNode.store(classOf[PingActor], PING_ADDRESS)
-
- // Store the PongActor in the cluster and deploy it
- // to 5 (replication factor) nodes in the cluster
- localNode.store(classOf[PongActor], PONG_ADDRESS, 5)
-
- Thread.sleep(1000) // let the deployment finish
-
- // ------------------------
- // Get the actors from the cluster
- // ------------------------
-
- // Check out a local PingActor instance (not reference)
- val ping = localNode.use[PingActor](PING_ADDRESS).head
-
- // Get a reference to all the pong actors through a round-robin router ActorRef
- val pong = localNode.ref(PONG_ADDRESS, router = Router.RoundRobin)
-
- // ------------------------
- // Play the game
- // ------------------------
-
- val latch = new CountDownLatch(1)
- ping ! Latch(latch) // register latch for actor to know when to stop
-
- println("---->> SERVE")
-
- implicit val replyTo = Some(pong) // set the reply address to the PongActor
- ping ! Ball // serve
-
- latch.await // wait for game to finish
-
- println("---->> GAME OVER")
-
- // ------------------------
- // Clean up
- // ------------------------
-
- localNode.stop
- remoteNodes.foreach(_.stop)
- Cluster.shutdownLocalCluster()
- }
-}
-*/
diff --git a/akka-cluster/src/test/scala/akka/cluster/sample/ComputeGridSample.scala b/akka-cluster/src/test/scala/akka/cluster/sample/ComputeGridSample.scala
deleted file mode 100644
index daf817872e..0000000000
--- a/akka-cluster/src/test/scala/akka/cluster/sample/ComputeGridSample.scala
+++ /dev/null
@@ -1,91 +0,0 @@
-/**
- * Copyright (C) 2009-2012 Typesafe Inc.
- */
-
-package akka.cluster.sample
-
-import akka.cluster._
-import akka.dispatch.Futures
-
-object ComputeGridSample {
- //sample.cluster.ComputeGridSample.fun2
-
- // FIXME rewrite as multi-jvm test
-
- /*
- // run all
- def run {
- fun1
- fun2
- fun3
- fun4
- }
-
- // Send Function0[Unit]
- def fun1 = {
- Cluster.startLocalCluster()
- val node = Cluster newNode (NodeAddress("test", "local", port = 9991)) start
- val remote1 = Cluster newNode (NodeAddress("test", "remote1", port = 9992)) start
-
- Thread.sleep(100)
- val fun = () ⇒ println("=============>>> AKKA ROCKS <<<=============")
- node send (fun, 2) // send and invoke function on to two cluster nodes
-
- node.stop
- remote1.stop
- Cluster.shutdownLocalCluster()
- }
-
- // Send Function0[Any]
- def fun2 = {
- Cluster.startLocalCluster()
- val local = Cluster newNode (NodeAddress("test", "local", port = 9991)) start
- val remote1 = Cluster newNode (NodeAddress("test", "remote1", port = 9992)) start
-
- Thread.sleep(100)
- val fun = () ⇒ "AKKA ROCKS"
- val futures = local send (fun, 2) // send and invoke function on to two cluster nodes and get result
-
- val result = Await.sync(Futures.fold("")(futures)(_ + " - " + _), timeout)
- println("===================>>> Cluster says [" + result + "]")
-
- local.stop
- remote1.stop
- Cluster.shutdownLocalCluster()
- }
-
- // Send Function1[Any, Unit]
- def fun3 = {
- Cluster.startLocalCluster()
- val local = Cluster newNode (NodeAddress("test", "local", port = 9991)) start
- val remote1 = Cluster newNode (NodeAddress("test", "remote1", port = 9992)) start
-
- val fun = ((s: String) ⇒ println("=============>>> " + s + " <<<=============")).asInstanceOf[Function1[Any, Unit]]
- local send (fun, "AKKA ROCKS", 2) // send and invoke function on to two cluster nodes
-
- local.stop
- remote1.stop
- Cluster.shutdownLocalCluster()
- }
-
- // Send Function1[Any, Any]
- def fun4 = {
- Cluster.startLocalCluster()
- val local = Cluster newNode (NodeAddress("test", "local", port = 9991)) start
- val remote1 = Cluster newNode (NodeAddress("test", "remote1", port = 9992)) start
-
- val fun = ((i: Int) ⇒ i * i).asInstanceOf[Function1[Any, Any]]
-
- val future1 = local send (fun, 2, 1) head // send and invoke function on one cluster node and get result
- val future2 = local send (fun, 2, 1) head // send and invoke function on one cluster node and get result
-
- // grab the result from the first one that returns
- val result = Await.sync(Futures.firstCompletedOf(List(future1, future2)), timeout)
- println("===================>>> Cluster says [" + result + "]")
-
- local.stop
- remote1.stop
- Cluster.shutdownLocalCluster()
- }
- */
-}
diff --git a/akka-cluster/src/test/scala/akka/cluster/storage/InMemoryStorageSpec.scala b/akka-cluster/src/test/scala/akka/cluster/storage/InMemoryStorageSpec.scala
deleted file mode 100644
index 762b189bd2..0000000000
--- a/akka-cluster/src/test/scala/akka/cluster/storage/InMemoryStorageSpec.scala
+++ /dev/null
@@ -1,241 +0,0 @@
-package akka.cluster.storage
-
-import org.scalatest.matchers.MustMatchers
-import org.scalatest.WordSpec
-import akka.cluster.storage.StorageTestUtils._
-
-class InMemoryStorageSpec extends WordSpec with MustMatchers {
-
- "unversioned load" must {
- "throw MissingDataException if non existing key" in {
- val store = new InMemoryStorage()
-
- try {
- store.load("foo")
- fail()
- } catch {
- case e: MissingDataException ⇒
- }
- }
-
- "return VersionedData if key existing" in {
- val storage = new InMemoryStorage()
- val key = "somekey"
- val value = "somevalue".getBytes
- storage.insert(key, value)
-
- val result = storage.load(key)
- //todo: strange that the implicit store is not found
- assertContent(key, value, result.version)(storage)
- }
- }
-
- "exist" must {
- "return true if value exists" in {
- val store = new InMemoryStorage()
- val key = "somekey"
- store.insert(key, "somevalue".getBytes)
- store.exists(key) must be(true)
- }
-
- "return false if value not exists" in {
- val store = new InMemoryStorage()
- store.exists("somekey") must be(false)
- }
- }
-
- "versioned load" must {
- "throw MissingDataException if non existing key" in {
- val store = new InMemoryStorage()
-
- try {
- store.load("foo", 1)
- fail()
- } catch {
- case e: MissingDataException ⇒
- }
- }
-
- "return VersionedData if key existing and exact version match" in {
- val storage = new InMemoryStorage()
- val key = "somekey"
- val value = "somevalue".getBytes
- val storedVersion = storage.insert(key, value)
-
- val loaded = storage.load(key, storedVersion)
- assert(loaded.version == storedVersion)
- org.junit.Assert.assertArrayEquals(value, loaded.data)
- }
-
- "throw BadVersionException is version too new" in {
- val storage = new InMemoryStorage()
- val key = "somekey"
- val value = "somevalue".getBytes
- val version = storage.insert(key, value)
-
- try {
- storage.load(key, version + 1)
- fail()
- } catch {
- case e: BadVersionException ⇒
- }
- }
-
- "throw BadVersionException is version too old" in {
- val storage = new InMemoryStorage()
- val key = "somekey"
- val value = "somevalue".getBytes
- val version = storage.insert(key, value)
-
- try {
- storage.load(key, version - 1)
- fail()
- } catch {
- case e: BadVersionException ⇒
- }
- }
- }
-
- "insert" must {
-
- "place a new value when non previously existed" in {
- val storage = new InMemoryStorage()
- val key = "somekey"
- val oldValue = "oldvalue".getBytes
- storage.insert(key, oldValue)
-
- val result = storage.load(key)
- assertContent(key, oldValue)(storage)
- assert(InMemoryStorage.InitialVersion == result.version)
- }
-
- "throw MissingDataException when there already exists an entry with the same key" in {
- val storage = new InMemoryStorage()
- val key = "somekey"
- val initialValue = "oldvalue".getBytes
- val initialVersion = storage.insert(key, initialValue)
-
- val newValue = "newValue".getBytes
-
- try {
- storage.insert(key, newValue)
- fail()
- } catch {
- case e: DataExistsException ⇒
- }
-
- assertContent(key, initialValue, initialVersion)(storage)
- }
- }
-
- "update" must {
-
- "throw MissingDataException when no node exists" in {
- val storage = new InMemoryStorage()
-
- val key = "somekey"
-
- try {
- storage.update(key, "somevalue".getBytes, 1)
- fail()
- } catch {
- case e: MissingDataException ⇒
- }
- }
-
- "replace if previous value exists and no other updates have been done" in {
- val storage = new InMemoryStorage()
-
- //do the initial insert
- val key = "foo"
- val oldValue = "insert".getBytes
- val initialVersion = storage.insert(key, oldValue)
-
- //do the update the will be the cause of the conflict.
- val newValue: Array[Byte] = "update".getBytes
- val newVersion = storage.update(key, newValue, initialVersion)
-
- assertContent(key, newValue, newVersion)(storage)
- }
-
- "throw BadVersionException when already overwritten" in {
- val storage = new InMemoryStorage()
-
- //do the initial insert
- val key = "foo"
- val oldValue = "insert".getBytes
- val initialVersion = storage.insert(key, oldValue)
-
- //do the update the will be the cause of the conflict.
- val newValue = "otherupdate".getBytes
- val newVersion = storage.update(key, newValue, initialVersion)
-
- try {
- storage.update(key, "update".getBytes, initialVersion)
- fail()
- } catch {
- case e: BadVersionException ⇒
- }
-
- assertContent(key, newValue, newVersion)(storage)
- }
- }
-
- "overwrite" must {
-
- "throw MissingDataException when no node exists" in {
- val storage = new InMemoryStorage()
- val key = "somekey"
-
- try {
- storage.overwrite(key, "somevalue".getBytes)
- fail()
- } catch {
- case e: MissingDataException ⇒
- }
-
- storage.exists(key) must be(false)
- }
-
- "succeed if previous value exist" in {
- val storage = new InMemoryStorage()
- val key = "somekey"
- val oldValue = "oldvalue".getBytes
- val newValue = "somevalue".getBytes
-
- val initialVersion = storage.insert(key, oldValue)
- val overwriteVersion = storage.overwrite(key, newValue)
-
- assert(overwriteVersion == initialVersion + 1)
- assertContent(key, newValue, overwriteVersion)(storage)
- }
- }
-
- "insertOrOverwrite" must {
- "insert if nothing was inserted before" in {
- val storage = new InMemoryStorage()
- val key = "somekey"
- val value = "somevalue".getBytes
-
- val version = storage.insertOrOverwrite(key, value)
-
- assert(version == InMemoryStorage.InitialVersion)
- assertContent(key, value, version)(storage)
- }
-
- "overwrite of something existed before" in {
- val storage = new InMemoryStorage()
- val key = "somekey"
- val oldValue = "oldvalue".getBytes
- val newValue = "somevalue".getBytes
-
- val initialVersion = storage.insert(key, oldValue)
-
- val overwriteVersion = storage.insertOrOverwrite(key, newValue)
-
- assert(overwriteVersion == initialVersion + 1)
- assertContent(key, newValue, overwriteVersion)(storage)
- }
- }
-
-}
diff --git a/akka-cluster/src/test/scala/akka/cluster/storage/StorageTestUtils.scala b/akka-cluster/src/test/scala/akka/cluster/storage/StorageTestUtils.scala
deleted file mode 100644
index 71ad994356..0000000000
--- a/akka-cluster/src/test/scala/akka/cluster/storage/StorageTestUtils.scala
+++ /dev/null
@@ -1,15 +0,0 @@
-package akka.cluster.storage
-
-object StorageTestUtils {
-
- def assertContent(key: String, expectedData: Array[Byte], expectedVersion: Long)(implicit storage: Storage) {
- val found = storage.load(key)
- assert(found.version == expectedVersion, "versions should match, found[" + found.version + "], expected[" + expectedVersion + "]")
- org.junit.Assert.assertArrayEquals(expectedData, found.data)
- }
-
- def assertContent(key: String, expectedData: Array[Byte])(implicit storage: Storage) {
- val found = storage.load(key)
- org.junit.Assert.assertArrayEquals(expectedData, found.data)
- }
-}
diff --git a/akka-cluster/src/test/scala/akka/cluster/storage/ZooKeeperStorageSpec.scala b/akka-cluster/src/test/scala/akka/cluster/storage/ZooKeeperStorageSpec.scala
deleted file mode 100644
index 8767ccf88e..0000000000
--- a/akka-cluster/src/test/scala/akka/cluster/storage/ZooKeeperStorageSpec.scala
+++ /dev/null
@@ -1,132 +0,0 @@
-// package akka.cluster.storage
-
-// import org.scalatest.matchers.MustMatchers
-// import akka.actor.Actor
-// import org.scalatest.{ BeforeAndAfterEach, BeforeAndAfterAll, WordSpec }
-// import org.I0Itec.zkclient.ZkServer
-// //import zookeeper.AkkaZkClient
-// import akka.cluster.storage.StorageTestUtils._
-// import java.io.File
-// import java.util.concurrent.atomic.AtomicLong
-
-// class ZooKeeperStorageSpec extends WordSpec with MustMatchers with BeforeAndAfterAll with BeforeAndAfterEach {
-// val dataPath = "_akka_cluster/data"
-// val logPath = "_akka_cluster/log"
-// var zkServer: ZkServer = _
-// //var zkClient: AkkaZkClient = _
-// val idGenerator = new AtomicLong
-
-// def generateKey: String = {
-// "foo" + idGenerator.incrementAndGet()
-// }
-
-// override def beforeAll() {
-// /*new File(dataPath).delete()
-// new File(logPath).delete()
-
-// try {
-// zkServer = Cluster.startLocalCluster(dataPath, logPath)
-// Thread.sleep(5000)
-// Actor.cluster.start()
-// zkClient = Cluster.newZkClient()
-// } catch {
-// case e ⇒ e.printStackTrace()
-// }*/
-// }
-
-// override def afterAll() {
-// /*zkClient.close()
-// Actor.cluster.shutdown()
-// ClusterDeployer.shutdown()
-// Cluster.shutdownLocalCluster()
-// Actor.registry.local.shutdownAll() */
-// }
-
-// /*
-// "unversioned load" must {
-// "throw MissingDataException if non existing key" in {
-// val storage = new ZooKeeperStorage(zkClient)
-
-// try {
-// storage.load(generateKey)
-// fail()
-// } catch {
-// case e: MissingDataException ⇒
-// }
-// }
-
-// "return VersionedData if key existing" in {
-// val storage = new ZooKeeperStorage(zkClient)
-// val key = generateKey
-// val value = "somevalue".getBytes
-// storage.insert(key, value)
-
-// val result = storage.load(key)
-// //todo: strange that the implicit store is not found
-// assertContent(key, value, result.version)(storage)
-// }
-// } */
-
-// /*"overwrite" must {
-
-// "throw MissingDataException when there doesn't exist an entry to overwrite" in {
-// val storage = new ZooKeeperStorage(zkClient)
-// val key = generateKey
-// val value = "value".getBytes
-
-// try {
-// storage.overwrite(key, value)
-// fail()
-// } catch {
-// case e: MissingDataException ⇒
-// }
-
-// assert(!storage.exists(key))
-// }
-
-// "overwrite if there is an existing value" in {
-// val storage = new ZooKeeperStorage(zkClient)
-// val key = generateKey
-// val oldValue = "oldvalue".getBytes
-
-// storage.insert(key, oldValue)
-// val newValue = "newValue".getBytes
-
-// val result = storage.overwrite(key, newValue)
-// //assertContent(key, newValue, result.version)(storage)
-// }
-// }
-
-// "insert" must {
-
-// "place a new value when non previously existed" in {
-// val storage = new ZooKeeperStorage(zkClient)
-// val key = generateKey
-// val oldValue = "oldvalue".getBytes
-// storage.insert(key, oldValue)
-
-// val result = storage.load(key)
-// assertContent(key, oldValue)(storage)
-// assert(InMemoryStorage.InitialVersion == result.version)
-// }
-
-// "throw DataExistsException when there already exists an entry with the same key" in {
-// val storage = new ZooKeeperStorage(zkClient)
-// val key = generateKey
-// val oldValue = "oldvalue".getBytes
-
-// val initialVersion = storage.insert(key, oldValue)
-// val newValue = "newValue".getBytes
-
-// try {
-// storage.insert(key, newValue)
-// fail()
-// } catch {
-// case e: DataExistsException ⇒
-// }
-
-// assertContent(key, oldValue, initialVersion)(storage)
-// }
-// } */
-
-// }
diff --git a/akka-remote/src/main/scala/akka/remote/RemoteActorRefProvider.scala b/akka-remote/src/main/scala/akka/remote/RemoteActorRefProvider.scala
index a169f9e9b5..4ef079457a 100644
--- a/akka-remote/src/main/scala/akka/remote/RemoteActorRefProvider.scala
+++ b/akka-remote/src/main/scala/akka/remote/RemoteActorRefProvider.scala
@@ -57,10 +57,6 @@ class RemoteActorRefProvider(
def tempPath() = local.tempPath()
def tempContainer = local.tempContainer
- @volatile
- private var _failureDetector: AccrualFailureDetector = _
- def failureDetector: AccrualFailureDetector = _failureDetector
-
@volatile
private var _transport: RemoteTransport = _
def transport: RemoteTransport = _transport
@@ -80,8 +76,6 @@ class RemoteActorRefProvider(
def init(system: ActorSystemImpl) {
local.init(system)
- _failureDetector = new AccrualFailureDetector(remoteSettings.FailureDetectorThreshold, remoteSettings.FailureDetectorMaxSampleSize, system)
-
_remoteDaemon = new RemoteSystemDaemon(system, rootPath / "remote", rootGuardian, log)
local.registerExtraNames(Map(("remote", remoteDaemon)))
diff --git a/akka-remote/src/test/resources/log4j.properties b/akka-remote/src/test/resources/log4j.properties
deleted file mode 100644
index 2d07c8e051..0000000000
--- a/akka-remote/src/test/resources/log4j.properties
+++ /dev/null
@@ -1,58 +0,0 @@
-# Define some default values that can be overridden by system properties
-zookeeper.root.logger=INFO, CONSOLE
-zookeeper.console.threshold=OFF
-zookeeper.log.dir=.
-zookeeper.log.file=zookeeper.log
-zookeeper.log.threshold=DEBUG
-zookeeper.tracelog.dir=.
-zookeeper.tracelog.file=zookeeper_trace.log
-
-#
-# ZooKeeper Logging Configuration
-#
-
-# Format is " (, )+
-
-# DEFAULT: console appender only
-log4j.rootLogger=${zookeeper.root.logger}
-
-# Example with rolling log file
-#log4j.rootLogger=DEBUG, CONSOLE, ROLLINGFILE
-
-# Example with rolling log file and tracing
-#log4j.rootLogger=TRACE, CONSOLE, ROLLINGFILE, TRACEFILE
-
-#
-# Log INFO level and above messages to the console
-#
-log4j.appender.CONSOLE=org.apache.log4j.ConsoleAppender
-log4j.appender.CONSOLE.Threshold=${zookeeper.console.threshold}
-log4j.appender.CONSOLE.layout=org.apache.log4j.PatternLayout
-log4j.appender.CONSOLE.layout.ConversionPattern=%d{ISO8601} [myid:%X{myid}] - %-5p [%t:%C{1}@%L] - %m%n
-
-#
-# Add ROLLINGFILE to rootLogger to get log file output
-# Log DEBUG level and above messages to a log file
-log4j.appender.ROLLINGFILE=org.apache.log4j.RollingFileAppender
-log4j.appender.ROLLINGFILE.Threshold=${zookeeper.log.threshold}
-log4j.appender.ROLLINGFILE.File=${zookeeper.log.dir}/${zookeeper.log.file}
-
-# Max log file size of 10MB
-log4j.appender.ROLLINGFILE.MaxFileSize=10MB
-# uncomment the next line to limit number of backup files
-#log4j.appender.ROLLINGFILE.MaxBackupIndex=10
-
-log4j.appender.ROLLINGFILE.layout=org.apache.log4j.PatternLayout
-log4j.appender.ROLLINGFILE.layout.ConversionPattern=%d{ISO8601} [myid:%X{myid}] - %-5p [%t:%C{1}@%L] - %m%n
-
-
-#
-# Add TRACEFILE to rootLogger to get log file output
-# Log DEBUG level and above messages to a log file
-log4j.appender.TRACEFILE=org.apache.log4j.FileAppender
-log4j.appender.TRACEFILE.Threshold=TRACE
-log4j.appender.TRACEFILE.File=${zookeeper.tracelog.dir}/${zookeeper.tracelog.file}
-
-log4j.appender.TRACEFILE.layout=org.apache.log4j.PatternLayout
-### Notice we are including log4j's NDC here (%x)
-log4j.appender.TRACEFILE.layout.ConversionPattern=%d{ISO8601} [myid:%X{myid}] - %-5p [%t:%C{1}@%L][%x] - %m%n
diff --git a/akka-remote/src/test/resources/logback-test.xml b/akka-remote/src/test/resources/logback-test.xml
deleted file mode 100644
index 240a412687..0000000000
--- a/akka-remote/src/test/resources/logback-test.xml
+++ /dev/null
@@ -1,26 +0,0 @@
-
-
-
-
-
-
-
-
-
-
-
-
-
-
- [%4p] [%d{ISO8601}] [%t] %c{1}: %m%n
-
-
-
-
-
-
-
-
-
-
-
diff --git a/akka-remote/src/test/resources/zoo.cfg b/akka-remote/src/test/resources/zoo.cfg
deleted file mode 100644
index b71eadcc33..0000000000
--- a/akka-remote/src/test/resources/zoo.cfg
+++ /dev/null
@@ -1,12 +0,0 @@
-# The number of milliseconds of each tick
-tickTime=2000
-# The number of ticks that the initial
-# synchronization phase can take
-initLimit=10
-# The number of ticks that can pass between
-# sending a request and getting an acknowledgement
-syncLimit=5
-# the directory where the snapshot is stored.
-dataDir=/export/crawlspace/mahadev/zookeeper/server1/data
-# the port at which the clients will connect
-clientPort=2181
diff --git a/project/AkkaBuild.scala b/project/AkkaBuild.scala
index 9dada98416..a5c257ca84 100644
--- a/project/AkkaBuild.scala
+++ b/project/AkkaBuild.scala
@@ -31,7 +31,7 @@ object AkkaBuild extends Build {
Unidoc.unidocExclude := Seq(samples.id, tutorials.id),
Dist.distExclude := Seq(actorTests.id, akkaSbtPlugin.id, docs.id)
),
- aggregate = Seq(actor, testkit, actorTests, remote, slf4j, agent, transactor, mailboxes, zeroMQ, kernel, akkaSbtPlugin, actorMigration, samples, tutorials, docs)
+ aggregate = Seq(actor, testkit, actorTests, remote, cluster, slf4j, agent, transactor, mailboxes, zeroMQ, kernel, akkaSbtPlugin, actorMigration, samples, tutorials, docs)
)
lazy val actor = Project(
@@ -86,6 +86,25 @@ object AkkaBuild extends Build {
)
) configs (MultiJvm)
+ lazy val cluster = Project(
+ id = "akka-cluster",
+ base = file("akka-cluster"),
+ dependencies = Seq(remote, remote % "test->test", testkit % "test->test"),
+ settings = defaultSettings ++ multiJvmSettings ++ schoirSettings ++ Seq(
+ libraryDependencies ++= Dependencies.cluster,
+ // disable parallel tests
+ parallelExecution in Test := false,
+ extraOptions in MultiJvm <<= (sourceDirectory in MultiJvm) { src =>
+ (name: String) => (src ** (name + ".conf")).get.headOption.map("-Dakka.config=" + _.absolutePath).toSeq
+ },
+ scalatestOptions in MultiJvm := Seq("-r", "org.scalatest.akka.QuietReporter"),
+ jvmOptions in MultiJvm := {
+ if (getBoolean("sbt.log.noformat")) Seq("-Dakka.test.nocolor=true") else Nil
+ },
+ test in Test <<= (test in Test) dependsOn (test in MultiJvm)
+ )
+ ) configs (MultiJvm)
+
lazy val slf4j = Project(
id = "akka-slf4j",
base = file("akka-slf4j"),
@@ -301,7 +320,7 @@ object AkkaBuild extends Build {
lazy val docs = Project(
id = "akka-docs",
base = file("akka-docs"),
- dependencies = Seq(actor, testkit % "test->test", remote, slf4j, agent, transactor, fileMailbox, mongoMailbox, redisMailbox, beanstalkMailbox, zookeeperMailbox),
+ dependencies = Seq(actor, testkit % "test->test", remote, cluster, slf4j, agent, transactor, fileMailbox, mongoMailbox, redisMailbox, beanstalkMailbox, zookeeperMailbox),
settings = defaultSettings ++ Seq(
unmanagedSourceDirectories in Test <<= baseDirectory { _ ** "code" get },
libraryDependencies ++= Dependencies.docs,
@@ -410,10 +429,7 @@ object Dependencies {
Test.zookeeper, Test.log4j // needed for ZkBarrier in multi-jvm tests
)
-// val cluster = Seq(
-// bookkeeper, commonsCodec, commonsIo, guice, h2Lzf, jacksonCore, jacksonMapper, log4j, netty,
-// protobuf, sjson, zkClient, zookeeper, zookeeperLock, Test.junit, Test.scalatest
-// )
+ val cluster = Seq(Test.junit, Test.scalatest)
val slf4j = Seq(slf4jApi)