Fixed misc issues after review.

Signed-off-by: Jonas Bonér <jonas@jonasboner.com>
This commit is contained in:
Jonas Bonér 2012-03-02 09:55:54 +01:00
parent 06ec519c7c
commit a3026b3316
11 changed files with 101 additions and 226 deletions

View file

@ -107,18 +107,6 @@ object MemberStatus {
case object Removed extends MemberStatus
}
// sealed trait PartitioningStatus
// object PartitioningStatus {
// case object Complete extends PartitioningStatus
// case object Awaiting extends PartitioningStatus
// }
// case class PartitioningChange(
// from: Address,
// to: Address,
// path: PartitionPath,
// status: PartitioningStatus)
/**
* Represents the overview of the cluster, holds the cluster convergence table and set with unreachable nodes.
*/
@ -138,8 +126,6 @@ case class GossipOverview(
case class Gossip(
overview: GossipOverview = GossipOverview(),
members: SortedSet[Member], // sorted set of members with their status, sorted by name
//partitions: Tree[PartitionPath, Node] = Tree.empty[PartitionPath, Node], // name/partition service
//pending: Set[PartitioningChange] = Set.empty[PartitioningChange],
meta: Map[String, Array[Byte]] = Map.empty[String, Array[Byte]],
version: VectorClock = VectorClock()) // vector clock version
extends ClusterMessage // is a serializable cluster message
@ -159,8 +145,10 @@ case class Gossip(
* Marks the gossip as seen by this node (remoteAddress) by updating the address entry in the 'gossip.overview.seen'
* Map with the VectorClock for the new gossip.
*/
def seen(address: Address): Gossip =
this copy (overview = overview copy (seen = overview.seen + (address -> version)))
def seen(address: Address): Gossip = {
if (overview.seen.contains(address) && overview.seen(address) == version) this
else this copy (overview = overview copy (seen = overview.seen + (address -> version)))
}
override def toString =
"Gossip(" +
@ -269,34 +257,26 @@ final class ClusterGossipDaemon(system: ActorSystem, node: Node) extends Actor {
def receive = {
case GossipEnvelope(sender, gossip) node.receive(sender, gossip)
case unknown log.error("Unknown message sent to cluster daemon [" + unknown + "]")
}
override def unhandled(unknown: Any) = log.error("Unknown message sent to cluster daemon [" + unknown + "]")
}
/**
* Node Extension Id and factory for creating Node extension.
* Example:
* {{{
* val node = NodeExtension(system)
*
* if (node.isLeader) { ... }
* }}}
*
* Example:
* {{{
* import akka.cluster._
*
* val node = system.node // implicit conversion adds 'node' method
* val node = Node(system)
*
* if (node.isLeader) { ... }
* }}}
*/
object NodeExtension extends ExtensionId[Node] with ExtensionIdProvider {
object Node extends ExtensionId[Node] with ExtensionIdProvider {
override def get(system: ActorSystem): Node = super.get(system)
override def lookup = NodeExtension
override def lookup = Node
override def createExtension(system: ExtendedActorSystem): Node = new Node(system.asInstanceOf[ActorSystemImpl]) // not nice but need API in ActorSystemImpl inside Node
override def createExtension(system: ExtendedActorSystem): Node = new Node(system)
}
/**
@ -316,21 +296,12 @@ object NodeExtension extends ExtensionId[Node] with ExtensionIdProvider {
*
* Example:
* {{{
* val node = NodeExtension(system)
*
* if (node.isLeader) { ... }
* }}}
*
* Example:
* {{{
* import akka.cluster._
*
* val node = system.node // implicit conversion adds 'node' method
* val node = Node(system)
*
* if (node.isLeader) { ... }
* }}}
*/
class Node(system: ActorSystemImpl) extends Extension {
class Node(system: ExtendedActorSystem) extends Extension {
/**
* Represents the state for this Node. Implemented using optimistic lockless concurrency,
@ -372,10 +343,10 @@ class Node(system: ActorSystemImpl) extends Extension {
private val log = Logging(system, "Node")
private val random = SecureRandom.getInstance("SHA1PRNG")
private val clusterCommandDaemon = system.systemActorOf(
private val clusterCommandDaemon = systemActorOf(
Props(new ClusterCommandDaemon(system, this)), "clusterCommand")
private val clusterGossipDaemon = system.systemActorOf(
private val clusterGossipDaemon = systemActorOf(
Props(new ClusterGossipDaemon(system, this)).withRouter(RoundRobinRouter(nrOfGossipDaemons)), "clusterGossip")
private val state = {
@ -439,21 +410,13 @@ class Node(system: ActorSystemImpl) extends Extension {
* Shuts down all connections to other members, the cluster daemon and the periodic gossip and cleanup tasks.
*/
def shutdown() {
// FIXME Cheating for now. Can't just shut down. Node must first gossip an Leave command, wait for Leader to do proper Handoff and then await an Exit command before switching to Removed
if (isRunning.compareAndSet(true, false)) {
log.info("Node [{}] - Shutting down Node and ClusterDaemon...", remoteAddress)
try system.stop(clusterCommandDaemon) finally {
try system.stop(clusterGossipDaemon) finally {
try gossipCanceller.cancel() finally {
try scrutinizeCanceller.cancel() finally {
log.info("Node [{}] - Node and ClusterDaemon shut down successfully", remoteAddress)
}
}
}
}
gossipCanceller.cancel()
scrutinizeCanceller.cancel()
system.stop(clusterCommandDaemon)
system.stop(clusterGossipDaemon)
}
}
@ -519,8 +482,6 @@ class Node(system: ActorSystemImpl) extends Extension {
private[cluster] final def joining(node: Address) {
log.info("Node [{}] - Node [{}] is joining", remoteAddress, node)
failureDetector heartbeat node // update heartbeat in failure detector
val localState = state.get
val localGossip = localState.latestGossip
val localMembers = localGossip.members
@ -535,8 +496,9 @@ class Node(system: ActorSystemImpl) extends Extension {
if (!state.compareAndSet(localState, newState)) joining(node) // recur if we failed update
else {
failureDetector heartbeat node // update heartbeat in failure detector
if (convergence(newState.latestGossip).isDefined) {
newState.memberMembershipChangeListeners map { _ notify newMembers }
newState.memberMembershipChangeListeners foreach { _ notify newMembers }
}
}
}
@ -571,10 +533,6 @@ class Node(system: ActorSystemImpl) extends Extension {
*/
@tailrec
private[cluster] final def receive(sender: Member, remoteGossip: Gossip) {
log.debug("Node [{}] - Receiving gossip from [{}]", remoteAddress, sender.address)
failureDetector heartbeat sender.address // update heartbeat in failure detector
val localState = state.get
val localGossip = localState.latestGossip
@ -604,8 +562,12 @@ class Node(system: ActorSystemImpl) extends Extension {
// if we won the race then update else try again
if (!state.compareAndSet(localState, newState)) receive(sender, remoteGossip) // recur if we fail the update
else {
log.debug("Node [{}] - Receiving gossip from [{}]", remoteAddress, sender.address)
failureDetector heartbeat sender.address // update heartbeat in failure detector
if (convergence(newState.latestGossip).isDefined) {
newState.memberMembershipChangeListeners map { _ notify newState.latestGossip.members }
newState.memberMembershipChangeListeners foreach { _ notify newState.latestGossip.members }
}
}
}
@ -639,12 +601,12 @@ class Node(system: ActorSystemImpl) extends Extension {
val localUnreachableSize = localUnreachableAddresses.size
// 1. gossip to alive members
val gossipedToDeputy = gossipToRandomNodeOf(localMembers.toList map { _.address })
val gossipedToDeputy = gossipToRandomNodeOf(localMembers map { _.address })
// 2. gossip to unreachable members
if (localUnreachableSize > 0) {
val probability: Double = localUnreachableSize / (localMembersSize + 1)
if (random.nextDouble() < probability) gossipToRandomNodeOf(localUnreachableAddresses.toList)
if (random.nextDouble() < probability) gossipToRandomNodeOf(localUnreachableAddresses)
}
// 3. gossip to a deputy nodes for facilitating partition healing
@ -714,8 +676,8 @@ class Node(system: ActorSystemImpl) extends Extension {
*
* @return 'true' if it gossiped to a "deputy" member.
*/
private def gossipToRandomNodeOf(addresses: Seq[Address]): Boolean = {
val peers = addresses filter (_ != remoteAddress) // filter out myself
private def gossipToRandomNodeOf(addresses: Iterable[Address]): Boolean = {
val peers = addresses filterNot (_ == remoteAddress) // filter out myself
val peer = selectRandomNode(peers)
gossipTo(peer)
deputyNodes exists (peer == _)
@ -744,8 +706,6 @@ class Node(system: ActorSystemImpl) extends Extension {
val newMembers = localMembers diff newlyDetectedUnreachableMembers
val newUnreachableAddresses: Set[Address] = localUnreachableAddresses ++ newlyDetectedUnreachableAddresses
log.info("Node [{}] - Marking node(s) an unreachable [{}]", remoteAddress, newlyDetectedUnreachableAddresses.mkString(", "))
val newSeen = newUnreachableAddresses.foldLeft(localSeen)((currentSeen, address) currentSeen - address)
val newOverview = localOverview copy (seen = newSeen, unreachable = newUnreachableAddresses)
@ -759,8 +719,10 @@ class Node(system: ActorSystemImpl) extends Extension {
// if we won the race then update else try again
if (!state.compareAndSet(localState, newState)) scrutinize() // recur
else {
log.info("Node [{}] - Marking node(s) an unreachable [{}]", remoteAddress, newlyDetectedUnreachableAddresses.mkString(", "))
if (convergence(newState.latestGossip).isDefined) {
newState.memberMembershipChangeListeners map { _ notify newMembers }
newState.memberMembershipChangeListeners foreach { _ notify newMembers }
}
}
}
@ -777,7 +739,7 @@ class Node(system: ActorSystemImpl) extends Extension {
// if (overview.unreachable.isEmpty) { // if there are any unreachable nodes then we can't have a convergence -
// waiting for user to act (issuing DOWN) or leader to act (issuing DOWN through auto-down)
val seen = gossip.overview.seen
val views = Set.empty[VectorClock] ++ seen.values
val views = seen.values.toSet
if (views.size == 1) {
log.debug("Node [{}] - Cluster convergence reached", remoteAddress)
Some(gossip)
@ -785,6 +747,13 @@ class Node(system: ActorSystemImpl) extends Extension {
// } else None
}
private def systemActorOf(props: Props, name: String): ActorRef = {
Await.result(system.systemGuardian ? CreateChild(props, name), system.settings.CreationTimeout.duration) match {
case ref: ActorRef ref
case ex: Exception throw ex
}
}
/**
* Sets up cluster command connection.
*/
@ -795,9 +764,9 @@ class Node(system: ActorSystemImpl) extends Extension {
*/
private def clusterGossipConnectionFor(address: Address): ActorRef = system.actorFor(RootActorPath(address) / "system" / "clusterGossip")
private def deputyNodes: Seq[Address] = state.get.latestGossip.members.toSeq map (_.address) drop 1 take nrOfDeputyNodes filter (_ != remoteAddress)
private def deputyNodes: Iterable[Address] = state.get.latestGossip.members.toIterable map (_.address) drop 1 take nrOfDeputyNodes filter (_ != remoteAddress)
private def selectRandomNode(addresses: Seq[Address]): Address = addresses(random nextInt addresses.size)
private def selectRandomNode(addresses: Iterable[Address]): Address = addresses.toSeq(random nextInt addresses.size)
private def isSingletonCluster(currentState: State): Boolean = currentState.latestGossip.members.size == 1
}

View file

@ -72,25 +72,18 @@ object VectorClock {
/**
* Hash representation of a versioned node name.
*/
class Node private (val name: String) extends Serializable {
override def hashCode = 0 + name.##
override def equals(other: Any) = Node.unapply(this) == Node.unapply(other)
override def toString = name.mkString("Node(", "", ")")
}
sealed trait Node extends Serializable
object Node {
def apply(name: String): Node = new Node(hash(name))
def unapply(other: Any) = other match {
case x: Node import x._; Some(name)
case _ None
private case class NodeImpl(name: String) extends Node {
override def toString(): String = "Node(" + name + ")"
}
def apply(name: String): Node = NodeImpl(hash(name))
private def hash(name: String): String = {
val digester = MessageDigest.getInstance("MD5")
digester update name.getBytes
digester update name.getBytes("UTF-8")
digester.digest.map { h "%02x".format(0xFF & h) }.mkString
}
}
@ -144,8 +137,6 @@ case class VectorClock(
versions: Map[VectorClock.Node, VectorClock.Timestamp] = Map.empty[VectorClock.Node, VectorClock.Timestamp])
extends PartiallyOrdered[VectorClock] {
// FIXME pruning of VectorClock history
import VectorClock._
/**

View file

@ -1,20 +0,0 @@
/**
* Copyright (C) 2009-2012 Typesafe Inc. <http://www.typesafe.com>
*/
package akka
import akka.actor.ActorSystem
package object cluster {
/**
* Implicitly creates an augmented [[akka.actor.ActorSystem]] with a method {{{def node: Node}}}.
*
* @param system
* @return An augmented [[akka.actor.ActorSystem]] with a method {{{def node: Node}}}.
*/
implicit def actorSystemWithNodeAccessor(system: ActorSystem) = new {
val node = NodeExtension(system)
}
}

View file

@ -19,19 +19,19 @@ class ClusterCommandDaemonFSMSpec extends AkkaSpec(
"A ClusterCommandDaemon FSM" must {
"start in Joining" in {
val fsm = TestFSMRef(new ClusterCommandDaemon(system, system.node))
val fsm = TestFSMRef(new ClusterCommandDaemon(system, Node(system)))
fsm.stateName must be(MemberStatus.Joining)
}
"be able to switch from Joining to Up" in {
val fsm = TestFSMRef(new ClusterCommandDaemon(system, system.node))
val fsm = TestFSMRef(new ClusterCommandDaemon(system, Node(system)))
fsm.stateName must be(MemberStatus.Joining)
fsm ! ClusterAction.Up
fsm.stateName must be(MemberStatus.Up)
}
"be able to switch from Up to Down" in {
val fsm = TestFSMRef(new ClusterCommandDaemon(system, system.node))
val fsm = TestFSMRef(new ClusterCommandDaemon(system, Node(system)))
fsm.stateName must be(MemberStatus.Joining)
fsm ! ClusterAction.Up
fsm.stateName must be(MemberStatus.Up)
@ -40,7 +40,7 @@ class ClusterCommandDaemonFSMSpec extends AkkaSpec(
}
"be able to switch from Up to Leaving" in {
val fsm = TestFSMRef(new ClusterCommandDaemon(system, system.node))
val fsm = TestFSMRef(new ClusterCommandDaemon(system, Node(system)))
fsm.stateName must be(MemberStatus.Joining)
fsm ! ClusterAction.Up
fsm.stateName must be(MemberStatus.Up)
@ -49,7 +49,7 @@ class ClusterCommandDaemonFSMSpec extends AkkaSpec(
}
"be able to switch from Up to Exiting" in {
val fsm = TestFSMRef(new ClusterCommandDaemon(system, system.node))
val fsm = TestFSMRef(new ClusterCommandDaemon(system, Node(system)))
fsm.stateName must be(MemberStatus.Joining)
fsm ! ClusterAction.Up
fsm.stateName must be(MemberStatus.Up)
@ -58,7 +58,7 @@ class ClusterCommandDaemonFSMSpec extends AkkaSpec(
}
"be able to switch from Up to Removed" in {
val fsm = TestFSMRef(new ClusterCommandDaemon(system, system.node))
val fsm = TestFSMRef(new ClusterCommandDaemon(system, Node(system)))
fsm.stateName must be(MemberStatus.Joining)
fsm ! ClusterAction.Up
fsm.stateName must be(MemberStatus.Up)
@ -67,7 +67,7 @@ class ClusterCommandDaemonFSMSpec extends AkkaSpec(
}
"be able to switch from Leaving to Down" in {
val fsm = TestFSMRef(new ClusterCommandDaemon(system, system.node))
val fsm = TestFSMRef(new ClusterCommandDaemon(system, Node(system)))
fsm.stateName must be(MemberStatus.Joining)
fsm ! ClusterAction.Up
fsm.stateName must be(MemberStatus.Up)
@ -78,7 +78,7 @@ class ClusterCommandDaemonFSMSpec extends AkkaSpec(
}
"be able to switch from Leaving to Removed" in {
val fsm = TestFSMRef(new ClusterCommandDaemon(system, system.node))
val fsm = TestFSMRef(new ClusterCommandDaemon(system, Node(system)))
fsm.stateName must be(MemberStatus.Joining)
fsm ! ClusterAction.Up
fsm.stateName must be(MemberStatus.Up)
@ -89,7 +89,7 @@ class ClusterCommandDaemonFSMSpec extends AkkaSpec(
}
"be able to switch from Exiting to Removed" in {
val fsm = TestFSMRef(new ClusterCommandDaemon(system, system.node))
val fsm = TestFSMRef(new ClusterCommandDaemon(system, Node(system)))
fsm.stateName must be(MemberStatus.Joining)
fsm ! ClusterAction.Up
fsm.stateName must be(MemberStatus.Up)
@ -100,7 +100,7 @@ class ClusterCommandDaemonFSMSpec extends AkkaSpec(
}
"be able to switch from Down to Removed" in {
val fsm = TestFSMRef(new ClusterCommandDaemon(system, system.node))
val fsm = TestFSMRef(new ClusterCommandDaemon(system, Node(system)))
fsm.stateName must be(MemberStatus.Joining)
fsm ! ClusterAction.Up
fsm.stateName must be(MemberStatus.Up)
@ -111,7 +111,7 @@ class ClusterCommandDaemonFSMSpec extends AkkaSpec(
}
"not be able to switch from Removed to any other state" in {
val fsm = TestFSMRef(new ClusterCommandDaemon(system, system.node))
val fsm = TestFSMRef(new ClusterCommandDaemon(system, Node(system)))
fsm.stateName must be(MemberStatus.Joining)
fsm ! ClusterAction.Up
fsm.stateName must be(MemberStatus.Up)
@ -132,7 +132,7 @@ class ClusterCommandDaemonFSMSpec extends AkkaSpec(
"remain in the same state when receiving a Join command" in {
val address = Address("akka", system.name)
val fsm = TestFSMRef(new ClusterCommandDaemon(system, system.node))
val fsm = TestFSMRef(new ClusterCommandDaemon(system, Node(system)))
fsm.stateName must be(MemberStatus.Joining)
fsm ! ClusterAction.Join(address)
fsm.stateName must be(MemberStatus.Joining)

View file

@ -16,9 +16,11 @@ import java.net.InetSocketAddress
class GossipingAccrualFailureDetectorSpec extends AkkaSpec("""
akka {
loglevel = "INFO"
cluster.failure-detector.threshold = 3
actor.debug.lifecycle = on
actor.debug.autoreceive = on
actor.provider = akka.remote.RemoteActorRefProvider
remote.netty.hostname = localhost
cluster.failure-detector.threshold = 3
}
""") with ImplicitSender {
@ -35,18 +37,11 @@ class GossipingAccrualFailureDetectorSpec extends AkkaSpec("""
// ======= NODE 1 ========
system1 = ActorSystem("system1", ConfigFactory
.parseString("""
akka {
actor.provider = "akka.remote.RemoteActorRefProvider"
remote.netty {
hostname = localhost
port=5550
}
}""")
.parseString("akka.remote.netty.port=5550")
.withFallback(system.settings.config))
.asInstanceOf[ActorSystemImpl]
val remote1 = system1.provider.asInstanceOf[RemoteActorRefProvider]
node1 = new Node(system1)
node1 = Node(system1)
val fd1 = node1.failureDetector
val address1 = node1.self.address
@ -54,17 +49,13 @@ class GossipingAccrualFailureDetectorSpec extends AkkaSpec("""
system2 = ActorSystem("system2", ConfigFactory
.parseString("""
akka {
actor.provider = "akka.remote.RemoteActorRefProvider"
remote.netty {
hostname = localhost
port = 5551
}
remote.netty.port=5551
cluster.node-to-join = "akka://system1@localhost:5550"
}""")
.withFallback(system.settings.config))
.asInstanceOf[ActorSystemImpl]
val remote2 = system2.provider.asInstanceOf[RemoteActorRefProvider]
node2 = new Node(system2)
node2 = Node(system2)
val fd2 = node2.failureDetector
val address2 = node2.self.address
@ -72,17 +63,13 @@ class GossipingAccrualFailureDetectorSpec extends AkkaSpec("""
system3 = ActorSystem("system3", ConfigFactory
.parseString("""
akka {
actor.provider = "akka.remote.RemoteActorRefProvider"
remote.netty {
hostname = localhost
port=5552
}
remote.netty.port=5552
cluster.node-to-join = "akka://system1@localhost:5550"
}""")
.withFallback(system.settings.config))
.asInstanceOf[ActorSystemImpl]
val remote3 = system3.provider.asInstanceOf[RemoteActorRefProvider]
node3 = new Node(system3)
node3 = Node(system3)
val fd3 = node3.failureDetector
val address3 = node3.self.address

View file

@ -17,6 +17,7 @@ import java.net.InetSocketAddress
class LeaderElectionSpec extends AkkaSpec("""
akka {
loglevel = "INFO"
actor.provider = "akka.remote.RemoteActorRefProvider"
actor.debug.lifecycle = on
actor.debug.autoreceive = on
cluster.failure-detector.threshold = 3
@ -38,7 +39,6 @@ class LeaderElectionSpec extends AkkaSpec("""
system1 = ActorSystem("system1", ConfigFactory
.parseString("""
akka {
actor.provider = "akka.remote.RemoteActorRefProvider"
remote.netty {
hostname = localhost
port=5550
@ -47,7 +47,7 @@ class LeaderElectionSpec extends AkkaSpec("""
.withFallback(system.settings.config))
.asInstanceOf[ActorSystemImpl]
val remote1 = system1.provider.asInstanceOf[RemoteActorRefProvider]
node1 = new Node(system1)
node1 = Node(system1)
val fd1 = node1.failureDetector
val address1 = node1.self.address
@ -55,7 +55,6 @@ class LeaderElectionSpec extends AkkaSpec("""
system2 = ActorSystem("system2", ConfigFactory
.parseString("""
akka {
actor.provider = "akka.remote.RemoteActorRefProvider"
remote.netty {
hostname = localhost
port = 5551
@ -65,7 +64,7 @@ class LeaderElectionSpec extends AkkaSpec("""
.withFallback(system.settings.config))
.asInstanceOf[ActorSystemImpl]
val remote2 = system2.provider.asInstanceOf[RemoteActorRefProvider]
node2 = new Node(system2)
node2 = Node(system2)
val fd2 = node2.failureDetector
val address2 = node2.self.address
@ -73,7 +72,6 @@ class LeaderElectionSpec extends AkkaSpec("""
system3 = ActorSystem("system3", ConfigFactory
.parseString("""
akka {
actor.provider = "akka.remote.RemoteActorRefProvider"
remote.netty {
hostname = localhost
port=5552
@ -83,7 +81,7 @@ class LeaderElectionSpec extends AkkaSpec("""
.withFallback(system.settings.config))
.asInstanceOf[ActorSystemImpl]
val remote3 = system3.provider.asInstanceOf[RemoteActorRefProvider]
node3 = new Node(system3)
node3 = Node(system3)
val fd3 = node3.failureDetector
val address3 = node3.self.address

View file

@ -18,6 +18,8 @@ import com.typesafe.config._
class MembershipChangeListenerSpec extends AkkaSpec("""
akka {
actor.provider = akka.remote.RemoteActorRefProvider
remote.netty.hostname = localhost
loglevel = "INFO"
}
""") with ImplicitSender {
@ -34,33 +36,22 @@ class MembershipChangeListenerSpec extends AkkaSpec("""
"A set of connected cluster systems" must {
"(when two systems) after cluster convergence updates the membership table then all MembershipChangeListeners should be triggered" taggedAs LongRunningTest in {
system0 = ActorSystem("system0", ConfigFactory
.parseString("""
akka {
actor.provider = "akka.remote.RemoteActorRefProvider"
remote.netty {
hostname = localhost
port=5550
}
}""")
.parseString("akka.remote.netty.port=5550")
.withFallback(system.settings.config))
.asInstanceOf[ActorSystemImpl]
val remote0 = system0.provider.asInstanceOf[RemoteActorRefProvider]
node0 = new Node(system0)
node0 = Node(system0)
system1 = ActorSystem("system1", ConfigFactory
.parseString("""
akka {
actor.provider = "akka.remote.RemoteActorRefProvider"
remote.netty {
hostname = localhost
port=5551
}
remote.netty.port=5551
cluster.node-to-join = "akka://system0@localhost:5550"
}""")
.withFallback(system.settings.config))
.asInstanceOf[ActorSystemImpl]
val remote1 = system1.provider.asInstanceOf[RemoteActorRefProvider]
node1 = new Node(system1)
node1 = Node(system1)
val latch = new CountDownLatch(2)
@ -90,17 +81,13 @@ class MembershipChangeListenerSpec extends AkkaSpec("""
system2 = ActorSystem("system2", ConfigFactory
.parseString("""
akka {
actor.provider = "akka.remote.RemoteActorRefProvider"
remote.netty {
hostname = localhost
port=5552
}
remote.netty.port=5552
cluster.node-to-join = "akka://system0@localhost:5550"
}""")
.withFallback(system.settings.config))
.asInstanceOf[ActorSystemImpl]
val remote2 = system2.provider.asInstanceOf[RemoteActorRefProvider]
node2 = new Node(system2)
node2 = Node(system2)
val latch = new CountDownLatch(3)
node0.registerListener(new MembershipChangeListener {

View file

@ -15,6 +15,8 @@ import com.typesafe.config._
class NodeMembershipSpec extends AkkaSpec("""
akka {
actor.provider = akka.remote.RemoteActorRefProvider
remote.netty.hostname = localhost
loglevel = "INFO"
}
""") with ImplicitSender {
@ -33,34 +35,23 @@ class NodeMembershipSpec extends AkkaSpec("""
// ======= NODE 0 ========
system0 = ActorSystem("system0", ConfigFactory
.parseString("""
akka {
actor.provider = "akka.remote.RemoteActorRefProvider"
remote.netty {
hostname = localhost
port=5550
}
}""")
.parseString("akka.remote.netty.port=5550")
.withFallback(system.settings.config))
.asInstanceOf[ActorSystemImpl]
val remote0 = system0.provider.asInstanceOf[RemoteActorRefProvider]
node0 = system0.node
node0 = Node(system0)
// ======= NODE 1 ========
system1 = ActorSystem("system1", ConfigFactory
.parseString("""
akka {
actor.provider = "akka.remote.RemoteActorRefProvider"
remote.netty {
hostname = localhost
port=5551
}
remote.netty.port=5551
cluster.node-to-join = "akka://system0@localhost:5550"
}""")
.withFallback(system.settings.config))
.asInstanceOf[ActorSystemImpl]
val remote1 = system1.provider.asInstanceOf[RemoteActorRefProvider]
node1 = system1.node
node1 = Node(system1)
Thread.sleep(10.seconds.dilated.toMillis)
@ -89,17 +80,13 @@ class NodeMembershipSpec extends AkkaSpec("""
system2 = ActorSystem("system2", ConfigFactory
.parseString("""
akka {
actor.provider = "akka.remote.RemoteActorRefProvider"
remote.netty {
hostname = localhost
port=5552
}
remote.netty.port=5552
cluster.node-to-join = "akka://system0@localhost:5550"
}""")
.withFallback(system.settings.config))
.asInstanceOf[ActorSystemImpl]
val remote2 = system2.provider.asInstanceOf[RemoteActorRefProvider]
node2 = system2.node
node2 = Node(system2)
Thread.sleep(10.seconds.dilated.toMillis)

View file

@ -16,6 +16,8 @@ import com.typesafe.config._
class NodeStartupSpec extends AkkaSpec("""
akka {
loglevel = "INFO"
actor.provider = akka.remote.RemoteActorRefProvider
remote.netty.hostname = localhost
}
""") with ImplicitSender {
@ -26,19 +28,12 @@ class NodeStartupSpec extends AkkaSpec("""
try {
"A first cluster node with a 'node-to-join' config set to empty string (singleton cluster)" must {
system0 = ActorSystem("NodeStartupSpec", ConfigFactory
.parseString("""
akka {
actor.provider = "akka.remote.RemoteActorRefProvider"
remote.netty {
hostname = localhost
port=5550
}
}""")
system0 = ActorSystem("system0", ConfigFactory
.parseString("akka.remote.netty.port=5550")
.withFallback(system.settings.config))
.asInstanceOf[ActorSystemImpl]
val remote0 = system0.provider.asInstanceOf[RemoteActorRefProvider]
node0 = new Node(system0)
node0 = Node(system0)
"be a singleton cluster when started up" in {
Thread.sleep(1.seconds.dilated.toMillis)
@ -55,20 +50,16 @@ class NodeStartupSpec extends AkkaSpec("""
"A second cluster node with a 'node-to-join' config defined" must {
"join the other node cluster as 'Joining' when sending a Join command" in {
system1 = ActorSystem("NodeStartupSpec", ConfigFactory
system1 = ActorSystem("system1", ConfigFactory
.parseString("""
akka {
actor.provider = "akka.remote.RemoteActorRefProvider"
remote.netty {
hostname = localhost
port=5551
}
cluster.node-to-join = "akka://NodeStartupSpec@localhost:5550"
}""")
akka {
remote.netty.port=5551
cluster.node-to-join = "akka://system0@localhost:5550"
}""")
.withFallback(system.settings.config))
.asInstanceOf[ActorSystemImpl]
val remote1 = system1.provider.asInstanceOf[RemoteActorRefProvider]
node1 = new Node(system1)
node1 = Node(system1)
Thread.sleep(1.seconds.dilated.toMillis) // give enough time for node1 to JOIN node0
val members = node0.latestGossip.members

View file

@ -133,19 +133,6 @@ case class RemoteServerClientClosed(
": Client[" + clientAddress.getOrElse("no address") + "]"
}
case class RemoteServerWriteFailed(
@BeanProperty request: AnyRef,
@BeanProperty cause: Throwable,
@BeanProperty remote: RemoteTransport,
@BeanProperty remoteAddress: Option[Address]) extends RemoteServerLifeCycleEvent {
override def logLevel = Logging.WarningLevel
override def toString =
"RemoteServerWriteFailed@" + remote +
": ClientAddress[" + remoteAddress +
"] MessageClass[" + (if (request ne null) request.getClass.getName else "no message") +
"] Error[" + cause + "]"
}
/**
* Thrown for example when trying to send a message using a RemoteClient that is either not started or shut down.
*/

View file

@ -327,9 +327,7 @@ object AkkaBuild extends Build {
// Settings
override lazy val settings = super.settings ++ buildSettings ++ Seq(
resolvers += "Sonatype Snapshot Repo" at "https://oss.sonatype.org/content/repositories/snapshots/"
)
override lazy val settings = super.settings ++ buildSettings
lazy val baseSettings = Defaults.defaultSettings ++ Publish.settings