Publish member events when state change first seen, see #3075
* Remove InstantMemberEvent
This commit is contained in:
parent
5c7747e7fa
commit
5b844ec1e6
32 changed files with 145 additions and 517 deletions
|
|
@ -65,138 +65,36 @@ object ClusterEvent {
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Marker interface for membership events.
|
* Marker interface for membership events.
|
||||||
* Only published after convergence, when all members have seen current
|
* Published when the state change is first seen on a node.
|
||||||
|
* The state change was performed by the leader when there was
|
||||||
|
* convergence on the leader node, i.e. all members had seen previous
|
||||||
* state.
|
* state.
|
||||||
*/
|
*/
|
||||||
sealed trait MemberEvent extends ClusterDomainEvent {
|
sealed trait MemberEvent extends ClusterDomainEvent {
|
||||||
def member: Member
|
def member: Member
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
|
||||||
* A new member joined the cluster.
|
|
||||||
* Only published after convergence, when all members have seen current
|
|
||||||
* state.
|
|
||||||
*/
|
|
||||||
case class MemberJoined(member: Member) extends MemberEvent {
|
|
||||||
if (member.status != Joining) throw new IllegalArgumentException("Expected Joining status, got: " + member)
|
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Member status changed to Up.
|
* Member status changed to Up.
|
||||||
* Only published after convergence, when all members have seen current
|
|
||||||
* state.
|
|
||||||
*/
|
*/
|
||||||
case class MemberUp(member: Member) extends MemberEvent {
|
case class MemberUp(member: Member) extends MemberEvent {
|
||||||
if (member.status != Up) throw new IllegalArgumentException("Expected Up status, got: " + member)
|
if (member.status != Up) throw new IllegalArgumentException("Expected Up status, got: " + member)
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
|
||||||
* Member status changed to Leaving.
|
|
||||||
* Only published after convergence, when all members have seen current
|
|
||||||
* state.
|
|
||||||
*/
|
|
||||||
case class MemberLeft(member: Member) extends MemberEvent {
|
|
||||||
if (member.status != Leaving) throw new IllegalArgumentException("Expected Leaving status, got: " + member)
|
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Member status changed to Exiting.
|
* Member status changed to Exiting.
|
||||||
* Only published after convergence, when all members have seen current
|
|
||||||
* state.
|
|
||||||
*/
|
*/
|
||||||
case class MemberExited(member: Member) extends MemberEvent {
|
case class MemberExited(member: Member) extends MemberEvent {
|
||||||
if (member.status != Exiting) throw new IllegalArgumentException("Expected Exiting status, got: " + member)
|
if (member.status != Exiting) throw new IllegalArgumentException("Expected Exiting status, got: " + member)
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Member status changed to Down.
|
* Member completely removed from the cluster.
|
||||||
* Only published after convergence, when all members have seen current
|
|
||||||
* state.
|
|
||||||
*/
|
|
||||||
case class MemberDowned(member: Member) extends MemberEvent {
|
|
||||||
if (member.status != Down) throw new IllegalArgumentException("Expected Down status, got: " + member)
|
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
|
||||||
* Member completely removed from the cluster. Only published after convergence,
|
|
||||||
* when all other members have seen the state.
|
|
||||||
*/
|
*/
|
||||||
case class MemberRemoved(member: Member) extends MemberEvent {
|
case class MemberRemoved(member: Member) extends MemberEvent {
|
||||||
if (member.status != Removed) throw new IllegalArgumentException("Expected Removed status, got: " + member)
|
if (member.status != Removed) throw new IllegalArgumentException("Expected Removed status, got: " + member)
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
|
||||||
* Current snapshot state of the cluster. Sent to new subscriber of
|
|
||||||
* [akka.cluster.ClusterEvent.InstantMemberEvent].
|
|
||||||
*/
|
|
||||||
case class InstantClusterState(members: immutable.SortedSet[Member] = immutable.SortedSet.empty)
|
|
||||||
extends ClusterDomainEvent {
|
|
||||||
|
|
||||||
/**
|
|
||||||
* Java API: get current member list
|
|
||||||
*/
|
|
||||||
def getMembers: java.lang.Iterable[Member] = {
|
|
||||||
import scala.collection.JavaConverters._
|
|
||||||
members.asJava
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
|
||||||
* Marker interface for membership events published immediately when
|
|
||||||
* it happened. All other members might not have seen the state.
|
|
||||||
*/
|
|
||||||
sealed trait InstantMemberEvent extends ClusterDomainEvent {
|
|
||||||
def member: Member
|
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
|
||||||
* A new member joined the cluster. Published immediately when it happened.
|
|
||||||
* All other members might not have seen the state.
|
|
||||||
*/
|
|
||||||
case class InstantMemberJoined(member: Member) extends InstantMemberEvent {
|
|
||||||
if (member.status != Joining) throw new IllegalArgumentException("Expected Joining status, got: " + member)
|
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
|
||||||
* Member status changed to Up. Published immediately when it happened.
|
|
||||||
* All other members might not have seen the state.
|
|
||||||
*/
|
|
||||||
case class InstantMemberUp(member: Member) extends InstantMemberEvent {
|
|
||||||
if (member.status != Up) throw new IllegalArgumentException("Expected Up status, got: " + member)
|
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
|
||||||
* Member status changed to Leaving. Published immediately when it happened.
|
|
||||||
* All other members might not have seen the state.
|
|
||||||
*/
|
|
||||||
case class InstantMemberLeft(member: Member) extends InstantMemberEvent {
|
|
||||||
if (member.status != Leaving) throw new IllegalArgumentException("Expected Leaving status, got: " + member)
|
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
|
||||||
* Member status changed to Exiting. Published immediately when it happened.
|
|
||||||
* All other members might not have seen the state.
|
|
||||||
*/
|
|
||||||
case class InstantMemberExited(member: Member) extends InstantMemberEvent {
|
|
||||||
if (member.status != Exiting) throw new IllegalArgumentException("Expected Exiting status, got: " + member)
|
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
|
||||||
* Member status changed to Down. Published immediately when it happened.
|
|
||||||
* All other members might not have seen the state.
|
|
||||||
*/
|
|
||||||
case class InstantMemberDowned(member: Member) extends InstantMemberEvent {
|
|
||||||
if (member.status != Down) throw new IllegalArgumentException("Expected Down status, got: " + member)
|
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
|
||||||
* Member completely removed from the cluster. Published immediately when it happened.
|
|
||||||
* All other members might not have seen the state.
|
|
||||||
*/
|
|
||||||
case class InstantMemberRemoved(member: Member) extends InstantMemberEvent {
|
|
||||||
if (member.status != Removed) throw new IllegalArgumentException("Expected Removed status, got: " + member)
|
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Leader of the cluster members changed. Only published after convergence.
|
* Leader of the cluster members changed. Only published after convergence.
|
||||||
*/
|
*/
|
||||||
|
|
@ -260,19 +158,13 @@ object ClusterEvent {
|
||||||
val changedMembers = membersGroupedByAddress collect {
|
val changedMembers = membersGroupedByAddress collect {
|
||||||
case (_, newMember :: oldMember :: Nil) if newMember.status != oldMember.status ⇒ newMember
|
case (_, newMember :: oldMember :: Nil) if newMember.status != oldMember.status ⇒ newMember
|
||||||
}
|
}
|
||||||
val memberEvents = (newMembers ++ changedMembers) map { m ⇒
|
val memberEvents = (newMembers ++ changedMembers) collect {
|
||||||
m.status match {
|
case m if m.status == Up ⇒ MemberUp(m)
|
||||||
case Joining ⇒ MemberJoined(m)
|
case m if m.status == Exiting ⇒ MemberExited(m)
|
||||||
case Up ⇒ MemberUp(m)
|
// no events for other transitions
|
||||||
case Leaving ⇒ MemberLeft(m)
|
|
||||||
case Exiting ⇒ MemberExited(m)
|
|
||||||
case _ ⇒ throw new IllegalStateException("Unexpected member status: " + m)
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
val allNewUnreachable = newGossip.overview.unreachable -- oldGossip.overview.unreachable
|
val allNewUnreachable = newGossip.overview.unreachable -- oldGossip.overview.unreachable
|
||||||
val newDowned = allNewUnreachable filter { _.status == Down }
|
|
||||||
val downedEvents = newDowned map MemberDowned
|
|
||||||
|
|
||||||
val unreachableGroupedByAddress =
|
val unreachableGroupedByAddress =
|
||||||
List(newGossip.overview.unreachable, oldGossip.overview.unreachable).flatten.groupBy(_.address)
|
List(newGossip.overview.unreachable, oldGossip.overview.unreachable).flatten.groupBy(_.address)
|
||||||
|
|
@ -280,27 +172,12 @@ object ClusterEvent {
|
||||||
case (_, newMember :: oldMember :: Nil) if newMember.status == Down && newMember.status != oldMember.status ⇒
|
case (_, newMember :: oldMember :: Nil) if newMember.status == Down && newMember.status != oldMember.status ⇒
|
||||||
newMember
|
newMember
|
||||||
}
|
}
|
||||||
val unreachableDownedEvents = unreachableDownMembers map MemberDowned
|
|
||||||
|
|
||||||
val removedMembers = (oldGossip.members -- newGossip.members -- newGossip.overview.unreachable) ++
|
val removedMembers = (oldGossip.members -- newGossip.members -- newGossip.overview.unreachable) ++
|
||||||
(oldGossip.overview.unreachable -- newGossip.overview.unreachable)
|
(oldGossip.overview.unreachable -- newGossip.overview.unreachable)
|
||||||
val removedEvents = removedMembers.map(m ⇒ MemberRemoved(m.copy(status = Removed)))
|
val removedEvents = removedMembers.map(m ⇒ MemberRemoved(m.copy(status = Removed)))
|
||||||
|
|
||||||
(new VectorBuilder[MemberEvent]() ++= memberEvents ++= downedEvents ++= unreachableDownedEvents
|
(new VectorBuilder[MemberEvent]() ++= memberEvents ++= removedEvents).result()
|
||||||
++= removedEvents).result()
|
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
|
||||||
* INTERNAL API
|
|
||||||
*/
|
|
||||||
private[cluster] def convertToInstantMemberEvents(memberEvents: immutable.Seq[MemberEvent]): immutable.Seq[InstantMemberEvent] =
|
|
||||||
memberEvents map {
|
|
||||||
case MemberJoined(m) ⇒ InstantMemberJoined(m)
|
|
||||||
case MemberUp(m) ⇒ InstantMemberUp(m)
|
|
||||||
case MemberDowned(m) ⇒ InstantMemberDowned(m)
|
|
||||||
case MemberLeft(m) ⇒ InstantMemberLeft(m)
|
|
||||||
case MemberExited(m) ⇒ InstantMemberExited(m)
|
|
||||||
case MemberRemoved(m) ⇒ InstantMemberRemoved(m)
|
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
|
|
@ -358,12 +235,12 @@ private[cluster] final class ClusterDomainEventPublisher extends Actor with Acto
|
||||||
def eventStream: EventStream = context.system.eventStream
|
def eventStream: EventStream = context.system.eventStream
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* The current snapshot state that is a mix of converged and latest gossip
|
* The current snapshot state corresponding to latest gossip
|
||||||
* to mimic what you would have seen if you where listening to the events.
|
* to mimic what you would have seen if you where listening to the events.
|
||||||
*/
|
*/
|
||||||
def publishCurrentClusterState(receiver: Option[ActorRef]): Unit = {
|
def publishCurrentClusterState(receiver: Option[ActorRef]): Unit = {
|
||||||
val state = CurrentClusterState(
|
val state = CurrentClusterState(
|
||||||
members = latestConvergedGossip.members,
|
members = latestGossip.members,
|
||||||
unreachable = latestGossip.overview.unreachable,
|
unreachable = latestGossip.overview.unreachable,
|
||||||
seenBy = latestGossip.seenBy,
|
seenBy = latestGossip.seenBy,
|
||||||
leader = latestConvergedGossip.leader)
|
leader = latestConvergedGossip.leader)
|
||||||
|
|
@ -373,20 +250,8 @@ private[cluster] final class ClusterDomainEventPublisher extends Actor with Acto
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
|
||||||
* Publish the snapshot state that is based on latest gossip to mimic what you
|
|
||||||
* would have seen if you where listening to the InstantMemberEvent stream.
|
|
||||||
*/
|
|
||||||
def publishInstantClusterState(receiver: ActorRef): Unit =
|
|
||||||
receiver ! InstantClusterState(members = latestGossip.members)
|
|
||||||
|
|
||||||
def subscribe(subscriber: ActorRef, to: Class[_]): Unit = {
|
def subscribe(subscriber: ActorRef, to: Class[_]): Unit = {
|
||||||
val isInstantMemberEvent = classOf[InstantMemberEvent].isAssignableFrom(to)
|
publishCurrentClusterState(Some(subscriber))
|
||||||
if (classOf[ClusterDomainEvent] == to || isInstantMemberEvent)
|
|
||||||
publishInstantClusterState(subscriber)
|
|
||||||
if (!isInstantMemberEvent)
|
|
||||||
publishCurrentClusterState(Some(subscriber))
|
|
||||||
|
|
||||||
eventStream.subscribe(subscriber, to)
|
eventStream.subscribe(subscriber, to)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
@ -401,25 +266,22 @@ private[cluster] final class ClusterDomainEventPublisher extends Actor with Acto
|
||||||
latestGossip = newGossip
|
latestGossip = newGossip
|
||||||
// first publish the diffUnreachable between the last two gossips
|
// first publish the diffUnreachable between the last two gossips
|
||||||
diffUnreachable(oldGossip, newGossip) foreach publish
|
diffUnreachable(oldGossip, newGossip) foreach publish
|
||||||
val newMemberEvents = diffMemberEvents(oldGossip, newGossip)
|
diffMemberEvents(oldGossip, newGossip) foreach { event ⇒
|
||||||
convertToInstantMemberEvents(newMemberEvents) foreach publish
|
event match {
|
||||||
// buffer up the MemberEvents waiting for convergence
|
case MemberRemoved(m) ⇒
|
||||||
bufferedEvents ++= newMemberEvents
|
publish(event)
|
||||||
|
// notify DeathWatch about downed node
|
||||||
|
publish(AddressTerminated(m.address))
|
||||||
|
case _ ⇒ publish(event)
|
||||||
|
}
|
||||||
|
}
|
||||||
// buffer up the LeaderChanged waiting for convergence
|
// buffer up the LeaderChanged waiting for convergence
|
||||||
bufferedEvents ++= diffLeader(oldGossip, newGossip)
|
bufferedEvents ++= diffLeader(oldGossip, newGossip)
|
||||||
// if we have convergence then publish the MemberEvents and LeaderChanged
|
// if we have convergence then publish the MemberEvents and LeaderChanged
|
||||||
if (newGossip.convergence) {
|
if (newGossip.convergence) {
|
||||||
val previousConvergedGossip = latestConvergedGossip
|
val previousConvergedGossip = latestConvergedGossip
|
||||||
latestConvergedGossip = newGossip
|
latestConvergedGossip = newGossip
|
||||||
bufferedEvents foreach { event ⇒
|
bufferedEvents foreach publish
|
||||||
event match {
|
|
||||||
case m: MemberEvent if m.isInstanceOf[MemberRemoved] ⇒
|
|
||||||
publish(event)
|
|
||||||
// notify DeathWatch about downed node
|
|
||||||
publish(AddressTerminated(m.member.address))
|
|
||||||
case _ ⇒ publish(event)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
bufferedEvents = Vector.empty
|
bufferedEvents = Vector.empty
|
||||||
}
|
}
|
||||||
// publish internal SeenState for testing purposes
|
// publish internal SeenState for testing purposes
|
||||||
|
|
|
||||||
|
|
@ -101,7 +101,7 @@ private[cluster] final class ClusterHeartbeatSender extends Actor with ActorLogg
|
||||||
HeartbeatInterval, self, HeartbeatTick)
|
HeartbeatInterval, self, HeartbeatTick)
|
||||||
|
|
||||||
override def preStart(): Unit = {
|
override def preStart(): Unit = {
|
||||||
cluster.subscribe(self, classOf[InstantMemberEvent])
|
cluster.subscribe(self, classOf[MemberEvent])
|
||||||
cluster.subscribe(self, classOf[UnreachableMember])
|
cluster.subscribe(self, classOf[UnreachableMember])
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
@ -123,19 +123,17 @@ private[cluster] final class ClusterHeartbeatSender extends Actor with ActorLogg
|
||||||
|
|
||||||
def receive = {
|
def receive = {
|
||||||
case HeartbeatTick ⇒ heartbeat()
|
case HeartbeatTick ⇒ heartbeat()
|
||||||
case InstantMemberUp(m) ⇒ addMember(m)
|
case MemberUp(m) ⇒ addMember(m)
|
||||||
case UnreachableMember(m) ⇒ removeMember(m)
|
case UnreachableMember(m) ⇒ removeMember(m)
|
||||||
case InstantMemberDowned(m) ⇒ removeMember(m)
|
case MemberRemoved(m) ⇒ removeMember(m)
|
||||||
case InstantMemberRemoved(m) ⇒ removeMember(m)
|
case s: CurrentClusterState ⇒ reset(s)
|
||||||
case s: InstantClusterState ⇒ reset(s)
|
case _: MemberEvent ⇒ // not interested in other types of MemberEvent
|
||||||
case _: CurrentClusterState ⇒ // enough with InstantClusterState
|
|
||||||
case _: InstantMemberEvent ⇒ // not interested in other types of InstantMemberEvent
|
|
||||||
case HeartbeatRequest(from) ⇒ addHeartbeatRequest(from)
|
case HeartbeatRequest(from) ⇒ addHeartbeatRequest(from)
|
||||||
case SendHeartbeatRequest(to) ⇒ sendHeartbeatRequest(to)
|
case SendHeartbeatRequest(to) ⇒ sendHeartbeatRequest(to)
|
||||||
case ExpectedFirstHeartbeat(from) ⇒ triggerFirstHeartbeat(from)
|
case ExpectedFirstHeartbeat(from) ⇒ triggerFirstHeartbeat(from)
|
||||||
}
|
}
|
||||||
|
|
||||||
def reset(snapshot: InstantClusterState): Unit = state = state.reset(snapshot.members.map(_.address))
|
def reset(snapshot: CurrentClusterState): Unit = state = state.reset(snapshot.members.map(_.address))
|
||||||
|
|
||||||
def addMember(m: Member): Unit = if (m.address != selfAddress) state = state addMember m.address
|
def addMember(m: Member): Unit = if (m.address != selfAddress) state = state addMember m.address
|
||||||
|
|
||||||
|
|
|
||||||
|
|
@ -76,7 +76,7 @@ private[cluster] class ClusterMetricsCollector(publisher: ActorRef) extends Acto
|
||||||
MetricsInterval, self, MetricsTick)
|
MetricsInterval, self, MetricsTick)
|
||||||
|
|
||||||
override def preStart(): Unit = {
|
override def preStart(): Unit = {
|
||||||
cluster.subscribe(self, classOf[InstantMemberEvent])
|
cluster.subscribe(self, classOf[MemberEvent])
|
||||||
cluster.subscribe(self, classOf[UnreachableMember])
|
cluster.subscribe(self, classOf[UnreachableMember])
|
||||||
log.info("Metrics collection has started successfully on node [{}]", selfAddress)
|
log.info("Metrics collection has started successfully on node [{}]", selfAddress)
|
||||||
}
|
}
|
||||||
|
|
@ -85,13 +85,11 @@ private[cluster] class ClusterMetricsCollector(publisher: ActorRef) extends Acto
|
||||||
case GossipTick ⇒ gossip()
|
case GossipTick ⇒ gossip()
|
||||||
case MetricsTick ⇒ collect()
|
case MetricsTick ⇒ collect()
|
||||||
case msg: MetricsGossipEnvelope ⇒ receiveGossip(msg)
|
case msg: MetricsGossipEnvelope ⇒ receiveGossip(msg)
|
||||||
case state: InstantClusterState ⇒ receiveState(state)
|
case state: CurrentClusterState ⇒ receiveState(state)
|
||||||
case state: CurrentClusterState ⇒ // enough with InstantClusterState
|
case MemberUp(m) ⇒ addMember(m)
|
||||||
case InstantMemberUp(m) ⇒ addMember(m)
|
case MemberRemoved(m) ⇒ removeMember(m)
|
||||||
case InstantMemberDowned(m) ⇒ removeMember(m)
|
|
||||||
case InstantMemberRemoved(m) ⇒ removeMember(m)
|
|
||||||
case UnreachableMember(m) ⇒ removeMember(m)
|
case UnreachableMember(m) ⇒ removeMember(m)
|
||||||
case _: InstantMemberEvent ⇒ // not interested in other types of InstantMemberEvent
|
case _: MemberEvent ⇒ // not interested in other types of MemberEvent
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
@ -119,7 +117,7 @@ private[cluster] class ClusterMetricsCollector(publisher: ActorRef) extends Acto
|
||||||
/**
|
/**
|
||||||
* Updates the initial node ring for those nodes that are [[akka.cluster.MemberStatus.Up]].
|
* Updates the initial node ring for those nodes that are [[akka.cluster.MemberStatus.Up]].
|
||||||
*/
|
*/
|
||||||
def receiveState(state: InstantClusterState): Unit =
|
def receiveState(state: CurrentClusterState): Unit =
|
||||||
nodes = state.members collect { case m if m.status == Up ⇒ m.address }
|
nodes = state.members collect { case m if m.status == Up ⇒ m.address }
|
||||||
|
|
||||||
/**
|
/**
|
||||||
|
|
|
||||||
|
|
@ -53,18 +53,14 @@ private[akka] class ClusterReadView(cluster: Cluster) extends Closeable {
|
||||||
case UnreachableMember(member) ⇒
|
case UnreachableMember(member) ⇒
|
||||||
// replace current member with new member (might have different status, only address is used in equals)
|
// replace current member with new member (might have different status, only address is used in equals)
|
||||||
state = state.copy(members = state.members - member, unreachable = state.unreachable - member + member)
|
state = state.copy(members = state.members - member, unreachable = state.unreachable - member + member)
|
||||||
case MemberDowned(member) ⇒
|
|
||||||
// replace current member with new member (might have different status, only address is used in equals)
|
|
||||||
state = state.copy(members = state.members - member, unreachable = state.unreachable - member + member)
|
|
||||||
case event: MemberEvent ⇒
|
case event: MemberEvent ⇒
|
||||||
// replace current member with new member (might have different status, only address is used in equals)
|
// replace current member with new member (might have different status, only address is used in equals)
|
||||||
state = state.copy(members = state.members - event.member + event.member,
|
state = state.copy(members = state.members - event.member + event.member,
|
||||||
unreachable = state.unreachable - event.member)
|
unreachable = state.unreachable - event.member)
|
||||||
case LeaderChanged(leader) ⇒ state = state.copy(leader = leader)
|
case LeaderChanged(leader) ⇒ state = state.copy(leader = leader)
|
||||||
case s: CurrentClusterState ⇒ state = s
|
case s: CurrentClusterState ⇒ state = s
|
||||||
case CurrentInternalStats(stats) ⇒ _latestStats = stats
|
case CurrentInternalStats(stats) ⇒ _latestStats = stats
|
||||||
case ClusterMetricsChanged(nodes) ⇒ _clusterMetrics = nodes
|
case ClusterMetricsChanged(nodes) ⇒ _clusterMetrics = nodes
|
||||||
case _: InstantClusterState | _: InstantMemberEvent ⇒ // not used here
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}).withDispatcher(cluster.settings.UseDispatcher), name = "clusterEventBusListener")
|
}).withDispatcher(cluster.settings.UseDispatcher), name = "clusterEventBusListener")
|
||||||
|
|
@ -130,6 +126,12 @@ private[akka] class ClusterReadView(cluster: Cluster) extends Closeable {
|
||||||
*/
|
*/
|
||||||
def clusterMetrics: Set[NodeMetrics] = _clusterMetrics
|
def clusterMetrics: Set[NodeMetrics] = _clusterMetrics
|
||||||
|
|
||||||
|
/**
|
||||||
|
* INTERNAL API
|
||||||
|
*/
|
||||||
|
private[cluster] def refreshCurrentState(): Unit =
|
||||||
|
cluster.sendCurrentClusterState(eventBusListener)
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* INTERNAL API
|
* INTERNAL API
|
||||||
* The nodes that has seen current version of the Gossip.
|
* The nodes that has seen current version of the Gossip.
|
||||||
|
|
|
||||||
|
|
@ -52,7 +52,7 @@ abstract class ClientDowningNodeThatIsUnreachableSpec(multiNodeConfig: ClientDow
|
||||||
cluster.down(thirdAddress)
|
cluster.down(thirdAddress)
|
||||||
enterBarrier("down-third-node")
|
enterBarrier("down-third-node")
|
||||||
|
|
||||||
awaitUpConvergence(numberOfMembers = 3, canNotBePartOfMemberRing = Set(thirdAddress))
|
awaitMembersUp(numberOfMembers = 3, canNotBePartOfMemberRing = Set(thirdAddress))
|
||||||
clusterView.members.exists(_.address == thirdAddress) must be(false)
|
clusterView.members.exists(_.address == thirdAddress) must be(false)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
@ -63,7 +63,7 @@ abstract class ClientDowningNodeThatIsUnreachableSpec(multiNodeConfig: ClientDow
|
||||||
runOn(second, fourth) {
|
runOn(second, fourth) {
|
||||||
enterBarrier("down-third-node")
|
enterBarrier("down-third-node")
|
||||||
|
|
||||||
awaitUpConvergence(numberOfMembers = 3, canNotBePartOfMemberRing = Set(thirdAddress))
|
awaitMembersUp(numberOfMembers = 3, canNotBePartOfMemberRing = Set(thirdAddress))
|
||||||
}
|
}
|
||||||
|
|
||||||
enterBarrier("await-completion")
|
enterBarrier("await-completion")
|
||||||
|
|
|
||||||
|
|
@ -50,7 +50,7 @@ abstract class ClientDowningNodeThatIsUpSpec(multiNodeConfig: ClientDowningNodeT
|
||||||
|
|
||||||
markNodeAsUnavailable(thirdAddress)
|
markNodeAsUnavailable(thirdAddress)
|
||||||
|
|
||||||
awaitUpConvergence(numberOfMembers = 3, canNotBePartOfMemberRing = Set(thirdAddress))
|
awaitMembersUp(numberOfMembers = 3, canNotBePartOfMemberRing = Set(thirdAddress))
|
||||||
clusterView.members.exists(_.address == thirdAddress) must be(false)
|
clusterView.members.exists(_.address == thirdAddress) must be(false)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
@ -61,7 +61,7 @@ abstract class ClientDowningNodeThatIsUpSpec(multiNodeConfig: ClientDowningNodeT
|
||||||
runOn(second, fourth) {
|
runOn(second, fourth) {
|
||||||
enterBarrier("down-third-node")
|
enterBarrier("down-third-node")
|
||||||
|
|
||||||
awaitUpConvergence(numberOfMembers = 3, canNotBePartOfMemberRing = Set(thirdAddress))
|
awaitMembersUp(numberOfMembers = 3, canNotBePartOfMemberRing = Set(thirdAddress))
|
||||||
}
|
}
|
||||||
|
|
||||||
enterBarrier("await-completion")
|
enterBarrier("await-completion")
|
||||||
|
|
|
||||||
|
|
@ -92,33 +92,15 @@ abstract class ConvergenceSpec(multiNodeConfig: ConvergenceMultiNodeConfig)
|
||||||
def memberStatus(address: Address): Option[MemberStatus] =
|
def memberStatus(address: Address): Option[MemberStatus] =
|
||||||
clusterView.members.collectFirst { case m if m.address == address ⇒ m.status }
|
clusterView.members.collectFirst { case m if m.address == address ⇒ m.status }
|
||||||
|
|
||||||
def assertNotMovedUp(joining: Boolean): Unit = {
|
|
||||||
within(20 seconds) {
|
|
||||||
if (joining) awaitCond(clusterView.members.size == 0)
|
|
||||||
else awaitCond(clusterView.members.size == 2)
|
|
||||||
awaitSeenSameState(first, second, fourth)
|
|
||||||
if (joining) memberStatus(first) must be(None)
|
|
||||||
else memberStatus(first) must be(Some(MemberStatus.Up))
|
|
||||||
if (joining) memberStatus(second) must be(None)
|
|
||||||
else memberStatus(second) must be(Some(MemberStatus.Up))
|
|
||||||
// leader is not allowed to move the new node to Up
|
|
||||||
memberStatus(fourth) must be(None)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
enterBarrier("after-join")
|
enterBarrier("after-join")
|
||||||
|
|
||||||
runOn(first, second) {
|
runOn(first, second, fourth) {
|
||||||
for (n ← 1 to 5) {
|
for (n ← 1 to 5) {
|
||||||
assertNotMovedUp(joining = false)
|
awaitCond(clusterView.members.size == 2)
|
||||||
// wait and then check again
|
awaitSeenSameState(first, second, fourth)
|
||||||
Thread.sleep(1.second.dilated.toMillis)
|
memberStatus(first) must be(Some(MemberStatus.Up))
|
||||||
}
|
memberStatus(second) must be(Some(MemberStatus.Up))
|
||||||
}
|
memberStatus(fourth) must be(None)
|
||||||
|
|
||||||
runOn(fourth) {
|
|
||||||
for (n ← 1 to 5) {
|
|
||||||
assertNotMovedUp(joining = true)
|
|
||||||
// wait and then check again
|
// wait and then check again
|
||||||
Thread.sleep(1.second.dilated.toMillis)
|
Thread.sleep(1.second.dilated.toMillis)
|
||||||
}
|
}
|
||||||
|
|
|
||||||
|
|
@ -8,8 +8,7 @@ import scala.concurrent.duration._
|
||||||
import com.typesafe.config.ConfigFactory
|
import com.typesafe.config.ConfigFactory
|
||||||
import akka.actor.Actor
|
import akka.actor.Actor
|
||||||
import akka.actor.Props
|
import akka.actor.Props
|
||||||
import akka.cluster.ClusterEvent.InstantClusterState
|
import akka.cluster.ClusterEvent.CurrentClusterState
|
||||||
import akka.cluster.ClusterEvent.InstantMemberJoined
|
|
||||||
import akka.remote.testkit.MultiNodeConfig
|
import akka.remote.testkit.MultiNodeConfig
|
||||||
import akka.remote.testkit.MultiNodeSpec
|
import akka.remote.testkit.MultiNodeSpec
|
||||||
import akka.remote.transport.ThrottlerTransportAdapter.Direction
|
import akka.remote.transport.ThrottlerTransportAdapter.Direction
|
||||||
|
|
@ -48,20 +47,11 @@ abstract class InitialHeartbeatSpec
|
||||||
awaitClusterUp(first)
|
awaitClusterUp(first)
|
||||||
|
|
||||||
runOn(first) {
|
runOn(first) {
|
||||||
val joinLatch = TestLatch()
|
|
||||||
cluster.subscribe(system.actorOf(Props(new Actor {
|
|
||||||
def receive = {
|
|
||||||
case state: InstantClusterState ⇒
|
|
||||||
if (state.members.exists(_.address == secondAddress))
|
|
||||||
joinLatch.countDown()
|
|
||||||
case InstantMemberJoined(m) ⇒
|
|
||||||
if (m.address == secondAddress)
|
|
||||||
joinLatch.countDown()
|
|
||||||
}
|
|
||||||
})), classOf[InstantMemberJoined])
|
|
||||||
|
|
||||||
within(10 seconds) {
|
within(10 seconds) {
|
||||||
joinLatch.await
|
awaitCond {
|
||||||
|
cluster.sendCurrentClusterState(testActor)
|
||||||
|
expectMsgType[CurrentClusterState].members.exists(_.address == secondAddress)
|
||||||
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
runOn(second) {
|
runOn(second) {
|
||||||
|
|
|
||||||
|
|
@ -48,7 +48,7 @@ abstract class JoinSeedNodeSpec
|
||||||
|
|
||||||
runOn(seed1, seed2, seed3) {
|
runOn(seed1, seed2, seed3) {
|
||||||
cluster.joinSeedNodes(seedNodes)
|
cluster.joinSeedNodes(seedNodes)
|
||||||
awaitUpConvergence(3)
|
awaitMembersUp(3)
|
||||||
}
|
}
|
||||||
enterBarrier("after-1")
|
enterBarrier("after-1")
|
||||||
}
|
}
|
||||||
|
|
@ -57,7 +57,7 @@ abstract class JoinSeedNodeSpec
|
||||||
runOn(ordinary1, ordinary2) {
|
runOn(ordinary1, ordinary2) {
|
||||||
cluster.joinSeedNodes(seedNodes)
|
cluster.joinSeedNodes(seedNodes)
|
||||||
}
|
}
|
||||||
awaitUpConvergence(roles.size)
|
awaitMembersUp(roles.size)
|
||||||
enterBarrier("after-2")
|
enterBarrier("after-2")
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
|
||||||
|
|
@ -52,7 +52,7 @@ abstract class JoinTwoClustersSpec
|
||||||
cluster.join(c1)
|
cluster.join(c1)
|
||||||
}
|
}
|
||||||
|
|
||||||
awaitUpConvergence(numberOfMembers = 2)
|
awaitMembersUp(numberOfMembers = 2)
|
||||||
|
|
||||||
assertLeader(a1, a2)
|
assertLeader(a1, a2)
|
||||||
assertLeader(b1, b2)
|
assertLeader(b1, b2)
|
||||||
|
|
@ -65,7 +65,7 @@ abstract class JoinTwoClustersSpec
|
||||||
}
|
}
|
||||||
|
|
||||||
runOn(a1, a2, b1, b2) {
|
runOn(a1, a2, b1, b2) {
|
||||||
awaitUpConvergence(numberOfMembers = 4)
|
awaitMembersUp(numberOfMembers = 4)
|
||||||
}
|
}
|
||||||
|
|
||||||
assertLeader(a1, a2, b1, b2)
|
assertLeader(a1, a2, b1, b2)
|
||||||
|
|
@ -80,7 +80,7 @@ abstract class JoinTwoClustersSpec
|
||||||
cluster.join(c1)
|
cluster.join(c1)
|
||||||
}
|
}
|
||||||
|
|
||||||
awaitUpConvergence(numberOfMembers = 6)
|
awaitMembersUp(numberOfMembers = 6)
|
||||||
|
|
||||||
assertLeader(a1, a2, b1, b2, c1, c2)
|
assertLeader(a1, a2, b1, b2, c1, c2)
|
||||||
|
|
||||||
|
|
|
||||||
|
|
@ -60,7 +60,7 @@ abstract class LeaderDowningNodeThatIsUnreachableSpec(multiNodeConfig: LeaderDow
|
||||||
|
|
||||||
// --- HERE THE LEADER SHOULD DETECT FAILURE AND AUTO-DOWN THE UNREACHABLE NODE ---
|
// --- HERE THE LEADER SHOULD DETECT FAILURE AND AUTO-DOWN THE UNREACHABLE NODE ---
|
||||||
|
|
||||||
awaitUpConvergence(numberOfMembers = 3, canNotBePartOfMemberRing = Set(fourthAddress), 30.seconds)
|
awaitMembersUp(numberOfMembers = 3, canNotBePartOfMemberRing = Set(fourthAddress), 30.seconds)
|
||||||
}
|
}
|
||||||
|
|
||||||
runOn(fourth) {
|
runOn(fourth) {
|
||||||
|
|
@ -70,7 +70,7 @@ abstract class LeaderDowningNodeThatIsUnreachableSpec(multiNodeConfig: LeaderDow
|
||||||
runOn(second, third) {
|
runOn(second, third) {
|
||||||
enterBarrier("down-fourth-node")
|
enterBarrier("down-fourth-node")
|
||||||
|
|
||||||
awaitUpConvergence(numberOfMembers = 3, canNotBePartOfMemberRing = Set(fourthAddress), 30.seconds)
|
awaitMembersUp(numberOfMembers = 3, canNotBePartOfMemberRing = Set(fourthAddress), 30.seconds)
|
||||||
}
|
}
|
||||||
|
|
||||||
enterBarrier("await-completion-1")
|
enterBarrier("await-completion-1")
|
||||||
|
|
@ -90,7 +90,7 @@ abstract class LeaderDowningNodeThatIsUnreachableSpec(multiNodeConfig: LeaderDow
|
||||||
|
|
||||||
// --- HERE THE LEADER SHOULD DETECT FAILURE AND AUTO-DOWN THE UNREACHABLE NODE ---
|
// --- HERE THE LEADER SHOULD DETECT FAILURE AND AUTO-DOWN THE UNREACHABLE NODE ---
|
||||||
|
|
||||||
awaitUpConvergence(numberOfMembers = 2, canNotBePartOfMemberRing = Set(secondAddress), 30.seconds)
|
awaitMembersUp(numberOfMembers = 2, canNotBePartOfMemberRing = Set(secondAddress), 30.seconds)
|
||||||
}
|
}
|
||||||
|
|
||||||
runOn(second) {
|
runOn(second) {
|
||||||
|
|
@ -100,7 +100,7 @@ abstract class LeaderDowningNodeThatIsUnreachableSpec(multiNodeConfig: LeaderDow
|
||||||
runOn(third) {
|
runOn(third) {
|
||||||
enterBarrier("down-second-node")
|
enterBarrier("down-second-node")
|
||||||
|
|
||||||
awaitUpConvergence(numberOfMembers = 2, canNotBePartOfMemberRing = Set(secondAddress), 30 seconds)
|
awaitMembersUp(numberOfMembers = 2, canNotBePartOfMemberRing = Set(secondAddress), 30 seconds)
|
||||||
}
|
}
|
||||||
|
|
||||||
enterBarrier("await-completion-2")
|
enterBarrier("await-completion-2")
|
||||||
|
|
|
||||||
|
|
@ -102,7 +102,7 @@ abstract class LeaderElectionSpec(multiNodeConfig: LeaderElectionMultiNodeConfig
|
||||||
enterBarrier("after-unavailable" + n)
|
enterBarrier("after-unavailable" + n)
|
||||||
|
|
||||||
enterBarrier("after-down" + n)
|
enterBarrier("after-down" + n)
|
||||||
awaitUpConvergence(currentRoles.size - 1)
|
awaitMembersUp(currentRoles.size - 1)
|
||||||
val nextExpectedLeader = remainingRoles.head
|
val nextExpectedLeader = remainingRoles.head
|
||||||
clusterView.isLeader must be(myself == nextExpectedLeader)
|
clusterView.isLeader must be(myself == nextExpectedLeader)
|
||||||
assertLeaderIn(remainingRoles)
|
assertLeaderIn(remainingRoles)
|
||||||
|
|
|
||||||
|
|
@ -61,17 +61,13 @@ abstract class LeaderLeavingSpec
|
||||||
|
|
||||||
} else {
|
} else {
|
||||||
|
|
||||||
val leavingLatch = TestLatch()
|
|
||||||
val exitingLatch = TestLatch()
|
val exitingLatch = TestLatch()
|
||||||
|
|
||||||
cluster.subscribe(system.actorOf(Props(new Actor {
|
cluster.subscribe(system.actorOf(Props(new Actor {
|
||||||
def receive = {
|
def receive = {
|
||||||
case state: CurrentClusterState ⇒
|
case state: CurrentClusterState ⇒
|
||||||
if (state.members.exists(m ⇒ m.address == oldLeaderAddress && m.status == Leaving))
|
|
||||||
leavingLatch.countDown()
|
|
||||||
if (state.members.exists(m ⇒ m.address == oldLeaderAddress && m.status == Exiting))
|
if (state.members.exists(m ⇒ m.address == oldLeaderAddress && m.status == Exiting))
|
||||||
exitingLatch.countDown()
|
exitingLatch.countDown()
|
||||||
case MemberLeft(m) if m.address == oldLeaderAddress ⇒ leavingLatch.countDown()
|
|
||||||
case MemberExited(m) if m.address == oldLeaderAddress ⇒ exitingLatch.countDown()
|
case MemberExited(m) if m.address == oldLeaderAddress ⇒ exitingLatch.countDown()
|
||||||
case _ ⇒ // ignore
|
case _ ⇒ // ignore
|
||||||
}
|
}
|
||||||
|
|
@ -83,9 +79,6 @@ abstract class LeaderLeavingSpec
|
||||||
val expectedAddresses = roles.toSet map address
|
val expectedAddresses = roles.toSet map address
|
||||||
awaitCond(clusterView.members.map(_.address) == expectedAddresses)
|
awaitCond(clusterView.members.map(_.address) == expectedAddresses)
|
||||||
|
|
||||||
// verify that the LEADER is LEAVING
|
|
||||||
leavingLatch.await
|
|
||||||
|
|
||||||
// verify that the LEADER is EXITING
|
// verify that the LEADER is EXITING
|
||||||
exitingLatch.await
|
exitingLatch.await
|
||||||
|
|
||||||
|
|
|
||||||
|
|
@ -83,7 +83,7 @@ abstract class MBeanSpec
|
||||||
}
|
}
|
||||||
enterBarrier("joined")
|
enterBarrier("joined")
|
||||||
|
|
||||||
awaitUpConvergence(4)
|
awaitMembersUp(4)
|
||||||
assertMembers(clusterView.members, roles.map(address(_)): _*)
|
assertMembers(clusterView.members, roles.map(address(_)): _*)
|
||||||
awaitCond(mbeanServer.getAttribute(mbeanName, "MemberStatus") == "Up")
|
awaitCond(mbeanServer.getAttribute(mbeanName, "MemberStatus") == "Up")
|
||||||
val expectedMembers = roles.sorted.map(address(_)).mkString(",")
|
val expectedMembers = roles.sorted.map(address(_)).mkString(",")
|
||||||
|
|
@ -115,7 +115,7 @@ abstract class MBeanSpec
|
||||||
enterBarrier("fourth-down")
|
enterBarrier("fourth-down")
|
||||||
|
|
||||||
runOn(first, second, third) {
|
runOn(first, second, third) {
|
||||||
awaitUpConvergence(3, canNotBePartOfMemberRing = Set(fourthAddress))
|
awaitMembersUp(3, canNotBePartOfMemberRing = Set(fourthAddress))
|
||||||
assertMembers(clusterView.members, first, second, third)
|
assertMembers(clusterView.members, first, second, third)
|
||||||
awaitCond(mbeanServer.getAttribute(mbeanName, "Unreachable") == "")
|
awaitCond(mbeanServer.getAttribute(mbeanName, "Unreachable") == "")
|
||||||
}
|
}
|
||||||
|
|
@ -129,7 +129,7 @@ abstract class MBeanSpec
|
||||||
}
|
}
|
||||||
enterBarrier("third-left")
|
enterBarrier("third-left")
|
||||||
runOn(first, second) {
|
runOn(first, second) {
|
||||||
awaitUpConvergence(2)
|
awaitMembersUp(2)
|
||||||
assertMembers(clusterView.members, first, second)
|
assertMembers(clusterView.members, first, second)
|
||||||
val expectedMembers = Seq(first, second).sorted.map(address(_)).mkString(",")
|
val expectedMembers = Seq(first, second).sorted.map(address(_)).mkString(",")
|
||||||
awaitCond(mbeanServer.getAttribute(mbeanName, "Members") == expectedMembers)
|
awaitCond(mbeanServer.getAttribute(mbeanName, "Members") == expectedMembers)
|
||||||
|
|
|
||||||
|
|
@ -1,66 +0,0 @@
|
||||||
/**
|
|
||||||
* Copyright (C) 2009-2013 Typesafe Inc. <http://www.typesafe.com>
|
|
||||||
*/
|
|
||||||
package akka.cluster
|
|
||||||
|
|
||||||
import scala.collection.immutable.SortedSet
|
|
||||||
import com.typesafe.config.ConfigFactory
|
|
||||||
import org.scalatest.BeforeAndAfter
|
|
||||||
import akka.remote.testkit.MultiNodeConfig
|
|
||||||
import akka.remote.testkit.MultiNodeSpec
|
|
||||||
import akka.testkit._
|
|
||||||
import scala.concurrent.duration._
|
|
||||||
import akka.actor.Props
|
|
||||||
import akka.actor.Actor
|
|
||||||
|
|
||||||
object MembershipChangeListenerJoinMultiJvmSpec extends MultiNodeConfig {
|
|
||||||
val first = role("first")
|
|
||||||
val second = role("second")
|
|
||||||
|
|
||||||
commonConfig(debugConfig(on = false).withFallback(MultiNodeClusterSpec.clusterConfigWithFailureDetectorPuppet))
|
|
||||||
}
|
|
||||||
|
|
||||||
class MembershipChangeListenerJoinMultiJvmNode1 extends MembershipChangeListenerJoinSpec
|
|
||||||
class MembershipChangeListenerJoinMultiJvmNode2 extends MembershipChangeListenerJoinSpec
|
|
||||||
|
|
||||||
abstract class MembershipChangeListenerJoinSpec
|
|
||||||
extends MultiNodeSpec(MembershipChangeListenerJoinMultiJvmSpec)
|
|
||||||
with MultiNodeClusterSpec {
|
|
||||||
|
|
||||||
import MembershipChangeListenerJoinMultiJvmSpec._
|
|
||||||
import ClusterEvent._
|
|
||||||
|
|
||||||
"A registered MembershipChangeListener" must {
|
|
||||||
"be notified when new node is JOINING" taggedAs LongRunningTest in {
|
|
||||||
|
|
||||||
runOn(first) {
|
|
||||||
cluster.join(first)
|
|
||||||
val joinLatch = TestLatch()
|
|
||||||
val expectedAddresses = Set(first, second) map address
|
|
||||||
cluster.subscribe(system.actorOf(Props(new Actor {
|
|
||||||
var members = Set.empty[Member]
|
|
||||||
def receive = {
|
|
||||||
case state: CurrentClusterState ⇒ members = state.members
|
|
||||||
case MemberJoined(m) ⇒
|
|
||||||
members = members - m + m
|
|
||||||
if (members.map(_.address) == expectedAddresses)
|
|
||||||
joinLatch.countDown()
|
|
||||||
case _ ⇒ // ignore
|
|
||||||
}
|
|
||||||
})), classOf[MemberEvent])
|
|
||||||
enterBarrier("registered-listener")
|
|
||||||
|
|
||||||
joinLatch.await
|
|
||||||
}
|
|
||||||
|
|
||||||
runOn(second) {
|
|
||||||
enterBarrier("registered-listener")
|
|
||||||
cluster.join(first)
|
|
||||||
}
|
|
||||||
|
|
||||||
awaitUpConvergence(2)
|
|
||||||
|
|
||||||
enterBarrier("after")
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
@ -1,75 +0,0 @@
|
||||||
/**
|
|
||||||
* Copyright (C) 2009-2013 Typesafe Inc. <http://www.typesafe.com>
|
|
||||||
*/
|
|
||||||
package akka.cluster
|
|
||||||
|
|
||||||
import scala.collection.immutable.SortedSet
|
|
||||||
import org.scalatest.BeforeAndAfter
|
|
||||||
import com.typesafe.config.ConfigFactory
|
|
||||||
import akka.remote.testkit.MultiNodeConfig
|
|
||||||
import akka.remote.testkit.MultiNodeSpec
|
|
||||||
import akka.testkit._
|
|
||||||
import akka.actor.Address
|
|
||||||
import akka.actor.Props
|
|
||||||
import akka.actor.Actor
|
|
||||||
import akka.cluster.MemberStatus._
|
|
||||||
|
|
||||||
object MembershipChangeListenerLeavingMultiJvmSpec extends MultiNodeConfig {
|
|
||||||
val first = role("first")
|
|
||||||
val second = role("second")
|
|
||||||
val third = role("third")
|
|
||||||
|
|
||||||
commonConfig(
|
|
||||||
debugConfig(on = false)
|
|
||||||
.withFallback(ConfigFactory.parseString("""
|
|
||||||
akka.cluster.unreachable-nodes-reaper-interval = 300 s # turn "off"
|
|
||||||
"""))
|
|
||||||
.withFallback(MultiNodeClusterSpec.clusterConfigWithFailureDetectorPuppet))
|
|
||||||
}
|
|
||||||
|
|
||||||
class MembershipChangeListenerLeavingMultiJvmNode1 extends MembershipChangeListenerLeavingSpec
|
|
||||||
class MembershipChangeListenerLeavingMultiJvmNode2 extends MembershipChangeListenerLeavingSpec
|
|
||||||
class MembershipChangeListenerLeavingMultiJvmNode3 extends MembershipChangeListenerLeavingSpec
|
|
||||||
|
|
||||||
abstract class MembershipChangeListenerLeavingSpec
|
|
||||||
extends MultiNodeSpec(MembershipChangeListenerLeavingMultiJvmSpec)
|
|
||||||
with MultiNodeClusterSpec {
|
|
||||||
|
|
||||||
import MembershipChangeListenerLeavingMultiJvmSpec._
|
|
||||||
import ClusterEvent._
|
|
||||||
|
|
||||||
"A registered MembershipChangeListener" must {
|
|
||||||
"be notified when new node is LEAVING" taggedAs LongRunningTest in {
|
|
||||||
|
|
||||||
awaitClusterUp(first, second, third)
|
|
||||||
|
|
||||||
runOn(first) {
|
|
||||||
enterBarrier("registered-listener")
|
|
||||||
cluster.leave(second)
|
|
||||||
}
|
|
||||||
|
|
||||||
runOn(second) {
|
|
||||||
enterBarrier("registered-listener")
|
|
||||||
}
|
|
||||||
|
|
||||||
runOn(third) {
|
|
||||||
val latch = TestLatch()
|
|
||||||
val secondAddress = address(second)
|
|
||||||
cluster.subscribe(system.actorOf(Props(new Actor {
|
|
||||||
def receive = {
|
|
||||||
case state: CurrentClusterState ⇒
|
|
||||||
if (state.members.exists(m ⇒ m.address == secondAddress && m.status == Leaving))
|
|
||||||
latch.countDown()
|
|
||||||
case MemberLeft(m) if m.address == secondAddress ⇒
|
|
||||||
latch.countDown()
|
|
||||||
case _ ⇒ // ignore
|
|
||||||
}
|
|
||||||
})), classOf[MemberEvent])
|
|
||||||
enterBarrier("registered-listener")
|
|
||||||
latch.await
|
|
||||||
}
|
|
||||||
|
|
||||||
enterBarrier("finished")
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
@ -20,9 +20,8 @@ object MinMembersBeforeUpMultiJvmSpec extends MultiNodeConfig {
|
||||||
val second = role("second")
|
val second = role("second")
|
||||||
val third = role("third")
|
val third = role("third")
|
||||||
|
|
||||||
commonConfig(debugConfig(on = false).withFallback(ConfigFactory.parseString("""
|
commonConfig(debugConfig(on = false).withFallback(ConfigFactory.parseString(
|
||||||
# turn off unreachable reaper
|
"akka.cluster.min-nr-of-members = 3")).
|
||||||
akka.cluster.min-nr-of-members = 3""")).
|
|
||||||
withFallback(MultiNodeClusterSpec.clusterConfigWithFailureDetectorPuppet))
|
withFallback(MultiNodeClusterSpec.clusterConfigWithFailureDetectorPuppet))
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
@ -44,8 +43,12 @@ abstract class MinMembersBeforeUpSpec
|
||||||
cluster.registerOnMemberUp(onUpLatch.countDown())
|
cluster.registerOnMemberUp(onUpLatch.countDown())
|
||||||
|
|
||||||
runOn(first) {
|
runOn(first) {
|
||||||
startClusterNode()
|
cluster join myself
|
||||||
awaitCond(clusterView.status == Joining)
|
awaitCond {
|
||||||
|
val result = clusterView.status == Joining
|
||||||
|
clusterView.refreshCurrentState()
|
||||||
|
result
|
||||||
|
}
|
||||||
}
|
}
|
||||||
enterBarrier("first-started")
|
enterBarrier("first-started")
|
||||||
|
|
||||||
|
|
@ -56,7 +59,11 @@ abstract class MinMembersBeforeUpSpec
|
||||||
}
|
}
|
||||||
runOn(first, second) {
|
runOn(first, second) {
|
||||||
val expectedAddresses = Set(first, second) map address
|
val expectedAddresses = Set(first, second) map address
|
||||||
awaitCond(clusterView.members.map(_.address) == expectedAddresses)
|
awaitCond {
|
||||||
|
val result = clusterView.members.map(_.address) == expectedAddresses
|
||||||
|
clusterView.refreshCurrentState()
|
||||||
|
result
|
||||||
|
}
|
||||||
clusterView.members.map(_.status) must be(Set(Joining))
|
clusterView.members.map(_.status) must be(Set(Joining))
|
||||||
// and it should not change
|
// and it should not change
|
||||||
1 to 5 foreach { _ ⇒
|
1 to 5 foreach { _ ⇒
|
||||||
|
|
|
||||||
|
|
@ -151,22 +151,13 @@ trait MultiNodeClusterSpec extends Suite with STMultiNodeSpec { self: MultiNodeS
|
||||||
clusterView.self
|
clusterView.self
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
|
||||||
* Initialize the cluster with the specified member
|
|
||||||
* nodes (roles). First node will be started first
|
|
||||||
* and others will join the first.
|
|
||||||
*/
|
|
||||||
def startCluster(roles: RoleName*): Unit = awaitStartCluster(false, roles.to[immutable.Seq])
|
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Initialize the cluster of the specified member
|
* Initialize the cluster of the specified member
|
||||||
* nodes (roles) and wait until all joined and `Up`.
|
* nodes (roles) and wait until all joined and `Up`.
|
||||||
* First node will be started first and others will join
|
* First node will be started first and others will join
|
||||||
* the first.
|
* the first.
|
||||||
*/
|
*/
|
||||||
def awaitClusterUp(roles: RoleName*): Unit = awaitStartCluster(true, roles.to[immutable.Seq])
|
def awaitClusterUp(roles: RoleName*): Unit = {
|
||||||
|
|
||||||
private def awaitStartCluster(upConvergence: Boolean = true, roles: immutable.Seq[RoleName]): Unit = {
|
|
||||||
runOn(roles.head) {
|
runOn(roles.head) {
|
||||||
// make sure that the node-to-join is started before other join
|
// make sure that the node-to-join is started before other join
|
||||||
startClusterNode()
|
startClusterNode()
|
||||||
|
|
@ -175,8 +166,8 @@ trait MultiNodeClusterSpec extends Suite with STMultiNodeSpec { self: MultiNodeS
|
||||||
if (roles.tail.contains(myself)) {
|
if (roles.tail.contains(myself)) {
|
||||||
cluster.join(roles.head)
|
cluster.join(roles.head)
|
||||||
}
|
}
|
||||||
if (upConvergence && roles.contains(myself)) {
|
if (roles.contains(myself)) {
|
||||||
awaitUpConvergence(numberOfMembers = roles.length)
|
awaitMembersUp(numberOfMembers = roles.length)
|
||||||
}
|
}
|
||||||
enterBarrier(roles.map(_.name).mkString("-") + "-joined")
|
enterBarrier(roles.map(_.name).mkString("-") + "-joined")
|
||||||
}
|
}
|
||||||
|
|
@ -212,10 +203,10 @@ trait MultiNodeClusterSpec extends Suite with STMultiNodeSpec { self: MultiNodeS
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Wait until the expected number of members has status Up and convergence has been reached.
|
* Wait until the expected number of members has status Up has been reached.
|
||||||
* Also asserts that nodes in the 'canNotBePartOfMemberRing' are *not* part of the cluster ring.
|
* Also asserts that nodes in the 'canNotBePartOfMemberRing' are *not* part of the cluster ring.
|
||||||
*/
|
*/
|
||||||
def awaitUpConvergence(
|
def awaitMembersUp(
|
||||||
numberOfMembers: Int,
|
numberOfMembers: Int,
|
||||||
canNotBePartOfMemberRing: Set[Address] = Set.empty,
|
canNotBePartOfMemberRing: Set[Address] = Set.empty,
|
||||||
timeout: FiniteDuration = 20.seconds): Unit = {
|
timeout: FiniteDuration = 20.seconds): Unit = {
|
||||||
|
|
|
||||||
|
|
@ -45,16 +45,12 @@ abstract class NodeLeavingAndExitingSpec
|
||||||
|
|
||||||
runOn(first, third) {
|
runOn(first, third) {
|
||||||
val secondAddess = address(second)
|
val secondAddess = address(second)
|
||||||
val leavingLatch = TestLatch()
|
|
||||||
val exitingLatch = TestLatch()
|
val exitingLatch = TestLatch()
|
||||||
cluster.subscribe(system.actorOf(Props(new Actor {
|
cluster.subscribe(system.actorOf(Props(new Actor {
|
||||||
def receive = {
|
def receive = {
|
||||||
case state: CurrentClusterState ⇒
|
case state: CurrentClusterState ⇒
|
||||||
if (state.members.exists(m ⇒ m.address == secondAddess && m.status == Leaving))
|
|
||||||
leavingLatch.countDown()
|
|
||||||
if (state.members.exists(m ⇒ m.address == secondAddess && m.status == Exiting))
|
if (state.members.exists(m ⇒ m.address == secondAddess && m.status == Exiting))
|
||||||
exitingLatch.countDown()
|
exitingLatch.countDown()
|
||||||
case MemberLeft(m) if m.address == secondAddess ⇒ leavingLatch.countDown()
|
|
||||||
case MemberExited(m) if m.address == secondAddess ⇒ exitingLatch.countDown()
|
case MemberExited(m) if m.address == secondAddess ⇒ exitingLatch.countDown()
|
||||||
case MemberRemoved(m) ⇒ // not tested here
|
case MemberRemoved(m) ⇒ // not tested here
|
||||||
|
|
||||||
|
|
@ -70,9 +66,6 @@ abstract class NodeLeavingAndExitingSpec
|
||||||
val expectedAddresses = roles.toSet map address
|
val expectedAddresses = roles.toSet map address
|
||||||
awaitCond(clusterView.members.map(_.address) == expectedAddresses)
|
awaitCond(clusterView.members.map(_.address) == expectedAddresses)
|
||||||
|
|
||||||
// Verify that 'second' node is set to LEAVING
|
|
||||||
leavingLatch.await
|
|
||||||
|
|
||||||
// Verify that 'second' node is set to EXITING
|
// Verify that 'second' node is set to EXITING
|
||||||
exitingLatch.await
|
exitingLatch.await
|
||||||
|
|
||||||
|
|
|
||||||
|
|
@ -97,7 +97,7 @@ abstract class RestartFirstSeedNodeSpec
|
||||||
}
|
}
|
||||||
runOn(seed2, seed3) {
|
runOn(seed2, seed3) {
|
||||||
cluster.joinSeedNodes(seedNodes)
|
cluster.joinSeedNodes(seedNodes)
|
||||||
awaitUpConvergence(3)
|
awaitMembersUp(3)
|
||||||
}
|
}
|
||||||
enterBarrier("started")
|
enterBarrier("started")
|
||||||
|
|
||||||
|
|
@ -107,7 +107,7 @@ abstract class RestartFirstSeedNodeSpec
|
||||||
seed1System.awaitTermination(remaining)
|
seed1System.awaitTermination(remaining)
|
||||||
}
|
}
|
||||||
runOn(seed2, seed3) {
|
runOn(seed2, seed3) {
|
||||||
awaitUpConvergence(2, canNotBePartOfMemberRing = Set(seedNodes.head))
|
awaitMembersUp(2, canNotBePartOfMemberRing = Set(seedNodes.head))
|
||||||
awaitCond(clusterView.unreachableMembers.forall(_.address != seedNodes.head))
|
awaitCond(clusterView.unreachableMembers.forall(_.address != seedNodes.head))
|
||||||
}
|
}
|
||||||
enterBarrier("seed1-shutdown")
|
enterBarrier("seed1-shutdown")
|
||||||
|
|
@ -119,7 +119,7 @@ abstract class RestartFirstSeedNodeSpec
|
||||||
awaitCond(Cluster(restartedSeed1System).readView.members.forall(_.status == Up))
|
awaitCond(Cluster(restartedSeed1System).readView.members.forall(_.status == Up))
|
||||||
}
|
}
|
||||||
runOn(seed2, seed3) {
|
runOn(seed2, seed3) {
|
||||||
awaitUpConvergence(3)
|
awaitMembersUp(3)
|
||||||
}
|
}
|
||||||
enterBarrier("seed1-restarted")
|
enterBarrier("seed1-restarted")
|
||||||
|
|
||||||
|
|
|
||||||
|
|
@ -45,7 +45,7 @@ abstract class SingletonClusterSpec(multiNodeConfig: SingletonClusterMultiNodeCo
|
||||||
"A cluster of 2 nodes" must {
|
"A cluster of 2 nodes" must {
|
||||||
|
|
||||||
"become singleton cluster when started with 'auto-join=on' and 'seed-nodes=[]'" taggedAs LongRunningTest in {
|
"become singleton cluster when started with 'auto-join=on' and 'seed-nodes=[]'" taggedAs LongRunningTest in {
|
||||||
awaitUpConvergence(1)
|
awaitMembersUp(1)
|
||||||
clusterView.isSingletonCluster must be(true)
|
clusterView.isSingletonCluster must be(true)
|
||||||
|
|
||||||
enterBarrier("after-1")
|
enterBarrier("after-1")
|
||||||
|
|
@ -66,7 +66,7 @@ abstract class SingletonClusterSpec(multiNodeConfig: SingletonClusterMultiNodeCo
|
||||||
|
|
||||||
markNodeAsUnavailable(secondAddress)
|
markNodeAsUnavailable(secondAddress)
|
||||||
|
|
||||||
awaitUpConvergence(numberOfMembers = 1, canNotBePartOfMemberRing = Set(secondAddress), 30.seconds)
|
awaitMembersUp(numberOfMembers = 1, canNotBePartOfMemberRing = Set(secondAddress), 30.seconds)
|
||||||
clusterView.isSingletonCluster must be(true)
|
clusterView.isSingletonCluster must be(true)
|
||||||
awaitCond(clusterView.isLeader)
|
awaitCond(clusterView.isLeader)
|
||||||
}
|
}
|
||||||
|
|
|
||||||
|
|
@ -88,13 +88,13 @@ abstract class SplitBrainSpec(multiNodeConfig: SplitBrainMultiNodeConfig)
|
||||||
|
|
||||||
runOn(side1: _*) {
|
runOn(side1: _*) {
|
||||||
// auto-down = on
|
// auto-down = on
|
||||||
awaitUpConvergence(side1.size, side2.toSet map address)
|
awaitMembersUp(side1.size, side2.toSet map address)
|
||||||
assertLeader(side1: _*)
|
assertLeader(side1: _*)
|
||||||
}
|
}
|
||||||
|
|
||||||
runOn(side2: _*) {
|
runOn(side2: _*) {
|
||||||
// auto-down = on
|
// auto-down = on
|
||||||
awaitUpConvergence(side2.size, side1.toSet map address)
|
awaitMembersUp(side2.size, side1.toSet map address)
|
||||||
assertLeader(side2: _*)
|
assertLeader(side2: _*)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
||||||
|
|
@ -721,7 +721,7 @@ abstract class StressSpec
|
||||||
runOn(currentRoles.last) {
|
runOn(currentRoles.last) {
|
||||||
cluster.join(roles.head)
|
cluster.join(roles.head)
|
||||||
}
|
}
|
||||||
awaitUpConvergence(currentRoles.size, timeout = remaining)
|
awaitMembersUp(currentRoles.size, timeout = remaining)
|
||||||
}
|
}
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
@ -741,7 +741,7 @@ abstract class StressSpec
|
||||||
if (toSeedNodes) cluster.joinSeedNodes(seedNodes.toIndexedSeq map address)
|
if (toSeedNodes) cluster.joinSeedNodes(seedNodes.toIndexedSeq map address)
|
||||||
else cluster.join(roles.head)
|
else cluster.join(roles.head)
|
||||||
}
|
}
|
||||||
awaitUpConvergence(currentRoles.size, timeout = remaining)
|
awaitMembersUp(currentRoles.size, timeout = remaining)
|
||||||
}
|
}
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
@ -781,7 +781,7 @@ abstract class StressSpec
|
||||||
testConductor.shutdown(removeRole, 0).await
|
testConductor.shutdown(removeRole, 0).await
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
awaitUpConvergence(currentRoles.size, timeout = remaining)
|
awaitMembersUp(currentRoles.size, timeout = remaining)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
@ -814,7 +814,7 @@ abstract class StressSpec
|
||||||
testConductor.shutdown(r, 0).await
|
testConductor.shutdown(r, 0).await
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
awaitUpConvergence(currentRoles.size, timeout = remaining)
|
awaitMembersUp(currentRoles.size, timeout = remaining)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
awaitClusterResult
|
awaitClusterResult
|
||||||
|
|
@ -860,7 +860,7 @@ abstract class StressSpec
|
||||||
Some(sys)
|
Some(sys)
|
||||||
} else previousAS
|
} else previousAS
|
||||||
runOn(usedRoles: _*) {
|
runOn(usedRoles: _*) {
|
||||||
awaitUpConvergence(
|
awaitMembersUp(
|
||||||
nbrUsedRoles + activeRoles.size,
|
nbrUsedRoles + activeRoles.size,
|
||||||
canNotBePartOfMemberRing = allPreviousAddresses,
|
canNotBePartOfMemberRing = allPreviousAddresses,
|
||||||
timeout = remaining)
|
timeout = remaining)
|
||||||
|
|
@ -884,7 +884,7 @@ abstract class StressSpec
|
||||||
loop(1, None, Set.empty) foreach { _.shutdown }
|
loop(1, None, Set.empty) foreach { _.shutdown }
|
||||||
within(loopDuration) {
|
within(loopDuration) {
|
||||||
runOn(usedRoles: _*) {
|
runOn(usedRoles: _*) {
|
||||||
awaitUpConvergence(nbrUsedRoles, timeout = remaining)
|
awaitMembersUp(nbrUsedRoles, timeout = remaining)
|
||||||
phiObserver ! Reset
|
phiObserver ! Reset
|
||||||
statsObserver ! Reset
|
statsObserver ! Reset
|
||||||
}
|
}
|
||||||
|
|
@ -989,7 +989,7 @@ abstract class StressSpec
|
||||||
runOn((seedNodes ++ otherNodesJoiningSeedNodes): _*) {
|
runOn((seedNodes ++ otherNodesJoiningSeedNodes): _*) {
|
||||||
reportResult {
|
reportResult {
|
||||||
cluster.joinSeedNodes(seedNodes.toIndexedSeq map address)
|
cluster.joinSeedNodes(seedNodes.toIndexedSeq map address)
|
||||||
awaitUpConvergence(size, timeout = remaining)
|
awaitMembersUp(size, timeout = remaining)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
||||||
|
|
@ -73,7 +73,7 @@ abstract class SunnyWeatherSpec
|
||||||
for (n ← 1 to 30) {
|
for (n ← 1 to 30) {
|
||||||
enterBarrier("period-" + n)
|
enterBarrier("period-" + n)
|
||||||
unexpected.get must be(SortedSet.empty)
|
unexpected.get must be(SortedSet.empty)
|
||||||
awaitUpConvergence(roles.size)
|
awaitMembersUp(roles.size)
|
||||||
assertLeaderIn(roles)
|
assertLeaderIn(roles)
|
||||||
if (n % 5 == 0) log.debug("Passed period [{}]", n)
|
if (n % 5 == 0) log.debug("Passed period [{}]", n)
|
||||||
Thread.sleep(1000)
|
Thread.sleep(1000)
|
||||||
|
|
|
||||||
|
|
@ -47,8 +47,7 @@ abstract class TransitionSpec
|
||||||
val statusOption = (clusterView.members ++ clusterView.unreachableMembers).collectFirst {
|
val statusOption = (clusterView.members ++ clusterView.unreachableMembers).collectFirst {
|
||||||
case m if m.address == address ⇒ m.status
|
case m if m.address == address ⇒ m.status
|
||||||
}
|
}
|
||||||
statusOption must not be (None)
|
statusOption.getOrElse(Removed)
|
||||||
statusOption.get
|
|
||||||
}
|
}
|
||||||
|
|
||||||
def memberAddresses: Set[Address] = clusterView.members.map(_.address)
|
def memberAddresses: Set[Address] = clusterView.members.map(_.address)
|
||||||
|
|
@ -62,11 +61,15 @@ abstract class TransitionSpec
|
||||||
}
|
}
|
||||||
|
|
||||||
def awaitMembers(addresses: Address*): Unit = awaitCond {
|
def awaitMembers(addresses: Address*): Unit = awaitCond {
|
||||||
memberAddresses == addresses.toSet
|
val result = memberAddresses == addresses.toSet
|
||||||
|
clusterView.refreshCurrentState()
|
||||||
|
result
|
||||||
}
|
}
|
||||||
|
|
||||||
def awaitMemberStatus(address: Address, status: MemberStatus): Unit = awaitCond {
|
def awaitMemberStatus(address: Address, status: MemberStatus): Unit = awaitCond {
|
||||||
memberStatus(address) == status
|
val result = memberStatus(address) == status
|
||||||
|
clusterView.refreshCurrentState()
|
||||||
|
result
|
||||||
}
|
}
|
||||||
|
|
||||||
def leaderActions(): Unit =
|
def leaderActions(): Unit =
|
||||||
|
|
@ -111,11 +114,11 @@ abstract class TransitionSpec
|
||||||
"start nodes as singleton clusters" taggedAs LongRunningTest in {
|
"start nodes as singleton clusters" taggedAs LongRunningTest in {
|
||||||
|
|
||||||
runOn(first) {
|
runOn(first) {
|
||||||
startClusterNode()
|
cluster join myself
|
||||||
awaitCond(clusterView.isSingletonCluster)
|
|
||||||
awaitMemberStatus(myself, Joining)
|
awaitMemberStatus(myself, Joining)
|
||||||
leaderActions()
|
leaderActions()
|
||||||
awaitMemberStatus(myself, Up)
|
awaitMemberStatus(myself, Up)
|
||||||
|
awaitCond(clusterView.isSingletonCluster)
|
||||||
}
|
}
|
||||||
|
|
||||||
enterBarrier("after-1")
|
enterBarrier("after-1")
|
||||||
|
|
@ -188,14 +191,14 @@ abstract class TransitionSpec
|
||||||
leaderActions()
|
leaderActions()
|
||||||
awaitMemberStatus(first, Up)
|
awaitMemberStatus(first, Up)
|
||||||
awaitMemberStatus(second, Up)
|
awaitMemberStatus(second, Up)
|
||||||
awaitMemberStatus(third, Joining)
|
awaitMemberStatus(third, Up)
|
||||||
}
|
}
|
||||||
enterBarrier("leader-actions-3")
|
enterBarrier("leader-actions-3")
|
||||||
|
|
||||||
// leader gossipTo first non-leader
|
// leader gossipTo first non-leader
|
||||||
leader(first, second, third) gossipTo nonLeader(first, second, third).head
|
leader(first, second, third) gossipTo nonLeader(first, second, third).head
|
||||||
runOn(nonLeader(first, second, third).head) {
|
runOn(nonLeader(first, second, third).head) {
|
||||||
awaitMemberStatus(third, Joining)
|
awaitMemberStatus(third, Up)
|
||||||
awaitCond(seenLatestGossip == Set(leader(first, second, third), myself))
|
awaitCond(seenLatestGossip == Set(leader(first, second, third), myself))
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
||||||
|
|
@ -130,7 +130,7 @@ abstract class UnreachableNodeRejoinsClusterSpec(multiNodeConfig: UnreachableNod
|
||||||
}
|
}
|
||||||
|
|
||||||
runOn(allBut(victim): _*) {
|
runOn(allBut(victim): _*) {
|
||||||
awaitUpConvergence(roles.size - 1, Set(victim))
|
awaitMembersUp(roles.size - 1, Set(victim))
|
||||||
// eventually removed
|
// eventually removed
|
||||||
awaitCond(clusterView.unreachableMembers.isEmpty, 15 seconds)
|
awaitCond(clusterView.unreachableMembers.isEmpty, 15 seconds)
|
||||||
}
|
}
|
||||||
|
|
@ -152,7 +152,7 @@ abstract class UnreachableNodeRejoinsClusterSpec(multiNodeConfig: UnreachableNod
|
||||||
cluster join master
|
cluster join master
|
||||||
}
|
}
|
||||||
|
|
||||||
awaitUpConvergence(roles.size)
|
awaitMembersUp(roles.size)
|
||||||
|
|
||||||
endBarrier
|
endBarrier
|
||||||
}
|
}
|
||||||
|
|
|
||||||
|
|
@ -60,11 +60,7 @@ class ClusterDomainEventPublisherSpec extends AkkaSpec
|
||||||
|
|
||||||
"ClusterDomainEventPublisher" must {
|
"ClusterDomainEventPublisher" must {
|
||||||
|
|
||||||
"not publish MemberUp when there is no convergence" in {
|
"publish MemberUp" in {
|
||||||
publisher ! PublishChanges(g2)
|
|
||||||
}
|
|
||||||
|
|
||||||
"publish MemberEvents when there is convergence" in {
|
|
||||||
publisher ! PublishChanges(g2)
|
publisher ! PublishChanges(g2)
|
||||||
publisher ! PublishChanges(g3)
|
publisher ! PublishChanges(g3)
|
||||||
memberSubscriber.expectMsg(MemberUp(bUp))
|
memberSubscriber.expectMsg(MemberUp(bUp))
|
||||||
|
|
@ -73,12 +69,12 @@ class ClusterDomainEventPublisherSpec extends AkkaSpec
|
||||||
|
|
||||||
"publish leader changed when new leader after convergence" in {
|
"publish leader changed when new leader after convergence" in {
|
||||||
publisher ! PublishChanges(g4)
|
publisher ! PublishChanges(g4)
|
||||||
memberSubscriber.expectNoMsg(1 second)
|
|
||||||
|
|
||||||
publisher ! PublishChanges(g5)
|
|
||||||
memberSubscriber.expectMsg(MemberUp(dUp))
|
memberSubscriber.expectMsg(MemberUp(dUp))
|
||||||
memberSubscriber.expectMsg(MemberUp(bUp))
|
memberSubscriber.expectMsg(MemberUp(bUp))
|
||||||
memberSubscriber.expectMsg(MemberUp(cUp))
|
memberSubscriber.expectMsg(MemberUp(cUp))
|
||||||
|
memberSubscriber.expectNoMsg(1 second)
|
||||||
|
|
||||||
|
publisher ! PublishChanges(g5)
|
||||||
memberSubscriber.expectMsg(LeaderChanged(Some(dUp.address)))
|
memberSubscriber.expectMsg(LeaderChanged(Some(dUp.address)))
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
@ -99,20 +95,22 @@ class ClusterDomainEventPublisherSpec extends AkkaSpec
|
||||||
publisher ! PublishChanges(g6)
|
publisher ! PublishChanges(g6)
|
||||||
memberSubscriber.expectNoMsg(1 second)
|
memberSubscriber.expectNoMsg(1 second)
|
||||||
publisher ! PublishChanges(g7)
|
publisher ! PublishChanges(g7)
|
||||||
|
memberSubscriber.expectMsg(MemberExited(aExiting))
|
||||||
memberSubscriber.expectNoMsg(1 second)
|
memberSubscriber.expectNoMsg(1 second)
|
||||||
// at the removed member a an empty gossip is the last thing
|
// at the removed member a an empty gossip is the last thing
|
||||||
publisher ! PublishChanges(Gossip.empty)
|
publisher ! PublishChanges(Gossip.empty)
|
||||||
memberSubscriber.expectMsg(MemberLeft(aLeaving))
|
|
||||||
memberSubscriber.expectMsg(MemberExited(aExiting))
|
|
||||||
memberSubscriber.expectMsg(LeaderChanged(Some(bUp.address)))
|
|
||||||
memberSubscriber.expectMsg(MemberRemoved(aRemoved))
|
memberSubscriber.expectMsg(MemberRemoved(aRemoved))
|
||||||
memberSubscriber.expectMsg(MemberRemoved(bRemoved))
|
memberSubscriber.expectMsg(MemberRemoved(bRemoved))
|
||||||
memberSubscriber.expectMsg(MemberRemoved(cRemoved))
|
memberSubscriber.expectMsg(MemberRemoved(cRemoved))
|
||||||
|
memberSubscriber.expectMsg(LeaderChanged(Some(bUp.address)))
|
||||||
memberSubscriber.expectMsg(LeaderChanged(None))
|
memberSubscriber.expectMsg(LeaderChanged(None))
|
||||||
}
|
}
|
||||||
|
|
||||||
"not publish leader changed when not convergence" in {
|
"not publish leader changed when not convergence" in {
|
||||||
publisher ! PublishChanges(g4)
|
publisher ! PublishChanges(g4)
|
||||||
|
memberSubscriber.expectMsg(MemberUp(dUp))
|
||||||
|
memberSubscriber.expectMsg(MemberUp(bUp))
|
||||||
|
memberSubscriber.expectMsg(MemberUp(cUp))
|
||||||
memberSubscriber.expectNoMsg(1 second)
|
memberSubscriber.expectNoMsg(1 second)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
@ -133,7 +131,6 @@ class ClusterDomainEventPublisherSpec extends AkkaSpec
|
||||||
"send CurrentClusterState when subscribe" in {
|
"send CurrentClusterState when subscribe" in {
|
||||||
val subscriber = TestProbe()
|
val subscriber = TestProbe()
|
||||||
publisher ! Subscribe(subscriber.ref, classOf[ClusterDomainEvent])
|
publisher ! Subscribe(subscriber.ref, classOf[ClusterDomainEvent])
|
||||||
subscriber.expectMsgType[InstantClusterState]
|
|
||||||
subscriber.expectMsgType[CurrentClusterState]
|
subscriber.expectMsgType[CurrentClusterState]
|
||||||
// but only to the new subscriber
|
// but only to the new subscriber
|
||||||
memberSubscriber.expectNoMsg(1 second)
|
memberSubscriber.expectNoMsg(1 second)
|
||||||
|
|
@ -154,11 +151,8 @@ class ClusterDomainEventPublisherSpec extends AkkaSpec
|
||||||
"publish clean state when PublishStart" in {
|
"publish clean state when PublishStart" in {
|
||||||
val subscriber = TestProbe()
|
val subscriber = TestProbe()
|
||||||
publisher ! Subscribe(subscriber.ref, classOf[ClusterDomainEvent])
|
publisher ! Subscribe(subscriber.ref, classOf[ClusterDomainEvent])
|
||||||
subscriber.expectMsgType[InstantClusterState]
|
|
||||||
subscriber.expectMsgType[CurrentClusterState]
|
subscriber.expectMsgType[CurrentClusterState]
|
||||||
publisher ! PublishChanges(g3)
|
publisher ! PublishChanges(g3)
|
||||||
subscriber.expectMsg(InstantMemberUp(bUp))
|
|
||||||
subscriber.expectMsg(InstantMemberUp(cUp))
|
|
||||||
subscriber.expectMsg(MemberUp(bUp))
|
subscriber.expectMsg(MemberUp(bUp))
|
||||||
subscriber.expectMsg(MemberUp(cUp))
|
subscriber.expectMsg(MemberUp(cUp))
|
||||||
subscriber.expectMsgType[SeenChanged]
|
subscriber.expectMsgType[SeenChanged]
|
||||||
|
|
@ -167,18 +161,6 @@ class ClusterDomainEventPublisherSpec extends AkkaSpec
|
||||||
subscriber.expectMsgType[CurrentClusterState] must be(CurrentClusterState())
|
subscriber.expectMsgType[CurrentClusterState] must be(CurrentClusterState())
|
||||||
}
|
}
|
||||||
|
|
||||||
"publish immediately when subscribing to InstantMemberEvent" in {
|
|
||||||
val subscriber = TestProbe()
|
|
||||||
publisher ! Subscribe(subscriber.ref, classOf[InstantMemberEvent])
|
|
||||||
subscriber.expectMsgType[InstantClusterState]
|
|
||||||
publisher ! PublishChanges(g2)
|
|
||||||
subscriber.expectMsg(InstantMemberUp(bUp))
|
|
||||||
subscriber.expectMsg(InstantMemberUp(cUp))
|
|
||||||
subscriber.expectNoMsg(1 second)
|
|
||||||
publisher ! PublishChanges(g3)
|
|
||||||
subscriber.expectNoMsg(1 second)
|
|
||||||
}
|
|
||||||
|
|
||||||
"publish SeenChanged" in {
|
"publish SeenChanged" in {
|
||||||
val subscriber = TestProbe()
|
val subscriber = TestProbe()
|
||||||
publisher ! Subscribe(subscriber.ref, classOf[SeenChanged])
|
publisher ! Subscribe(subscriber.ref, classOf[SeenChanged])
|
||||||
|
|
|
||||||
|
|
@ -44,7 +44,7 @@ class ClusterDomainEventSpec extends WordSpec with MustMatchers {
|
||||||
val (g1, _) = converge(Gossip(members = SortedSet(a1)))
|
val (g1, _) = converge(Gossip(members = SortedSet(a1)))
|
||||||
val (g2, s2) = converge(Gossip(members = SortedSet(a1, b1, e1)))
|
val (g2, s2) = converge(Gossip(members = SortedSet(a1, b1, e1)))
|
||||||
|
|
||||||
diffMemberEvents(g1, g2) must be(Seq(MemberUp(b1), MemberJoined(e1)))
|
diffMemberEvents(g1, g2) must be(Seq(MemberUp(b1)))
|
||||||
diffUnreachable(g1, g2) must be(Seq.empty)
|
diffUnreachable(g1, g2) must be(Seq.empty)
|
||||||
diffSeen(g1, g2) must be(Seq(SeenChanged(convergence = true, seenBy = s2)))
|
diffSeen(g1, g2) must be(Seq(SeenChanged(convergence = true, seenBy = s2)))
|
||||||
}
|
}
|
||||||
|
|
@ -53,7 +53,7 @@ class ClusterDomainEventSpec extends WordSpec with MustMatchers {
|
||||||
val (g1, _) = converge(Gossip(members = SortedSet(a2, b1, c2)))
|
val (g1, _) = converge(Gossip(members = SortedSet(a2, b1, c2)))
|
||||||
val (g2, s2) = converge(Gossip(members = SortedSet(a1, b1, c1, e1)))
|
val (g2, s2) = converge(Gossip(members = SortedSet(a1, b1, c1, e1)))
|
||||||
|
|
||||||
diffMemberEvents(g1, g2) must be(Seq(MemberUp(a1), MemberLeft(c1), MemberJoined(e1)))
|
diffMemberEvents(g1, g2) must be(Seq(MemberUp(a1)))
|
||||||
diffUnreachable(g1, g2) must be(Seq.empty)
|
diffUnreachable(g1, g2) must be(Seq.empty)
|
||||||
diffSeen(g1, g2) must be(Seq(SeenChanged(convergence = true, seenBy = s2)))
|
diffSeen(g1, g2) must be(Seq(SeenChanged(convergence = true, seenBy = s2)))
|
||||||
}
|
}
|
||||||
|
|
@ -62,20 +62,10 @@ class ClusterDomainEventSpec extends WordSpec with MustMatchers {
|
||||||
val g1 = Gossip(members = SortedSet(a1, b1), overview = GossipOverview(unreachable = Set(c2, e2)))
|
val g1 = Gossip(members = SortedSet(a1, b1), overview = GossipOverview(unreachable = Set(c2, e2)))
|
||||||
val g2 = Gossip(members = SortedSet(a1), overview = GossipOverview(unreachable = Set(c2, b3, e3)))
|
val g2 = Gossip(members = SortedSet(a1), overview = GossipOverview(unreachable = Set(c2, b3, e3)))
|
||||||
|
|
||||||
diffMemberEvents(g1, g2) must be(Seq(MemberDowned(b3), MemberDowned(e3)))
|
|
||||||
diffUnreachable(g1, g2) must be(Seq(UnreachableMember(b3)))
|
diffUnreachable(g1, g2) must be(Seq(UnreachableMember(b3)))
|
||||||
diffSeen(g1, g2) must be(Seq.empty)
|
diffSeen(g1, g2) must be(Seq.empty)
|
||||||
}
|
}
|
||||||
|
|
||||||
"be produced for downed members" in {
|
|
||||||
val (g1, _) = converge(Gossip(members = SortedSet(a1, b1)))
|
|
||||||
val (g2, _) = converge(Gossip(members = SortedSet(a1, b1), overview = GossipOverview(unreachable = Set(e3))))
|
|
||||||
|
|
||||||
diffMemberEvents(g1, g2) must be(Seq(MemberDowned(e3)))
|
|
||||||
diffUnreachable(g1, g2) must be(Seq(UnreachableMember(e3)))
|
|
||||||
diffSeen(g1, g2) must be(Seq.empty)
|
|
||||||
}
|
|
||||||
|
|
||||||
"be produced for removed members" in {
|
"be produced for removed members" in {
|
||||||
val (g1, _) = converge(Gossip(members = SortedSet(a1, d1)))
|
val (g1, _) = converge(Gossip(members = SortedSet(a1, d1)))
|
||||||
val (g2, s2) = converge(Gossip(members = SortedSet(a1)))
|
val (g2, s2) = converge(Gossip(members = SortedSet(a1)))
|
||||||
|
|
|
||||||
|
|
@ -63,12 +63,10 @@ class ClusterSpec extends AkkaSpec(ClusterSpec.config) with ImplicitSender {
|
||||||
"initially become singleton cluster when joining itself and reach convergence" in {
|
"initially become singleton cluster when joining itself and reach convergence" in {
|
||||||
clusterView.members.size must be(0) // auto-join = off
|
clusterView.members.size must be(0) // auto-join = off
|
||||||
cluster.join(selfAddress)
|
cluster.join(selfAddress)
|
||||||
Thread.sleep(5000)
|
leaderActions() // Joining -> Up
|
||||||
awaitCond(clusterView.isSingletonCluster)
|
awaitCond(clusterView.isSingletonCluster)
|
||||||
clusterView.self.address must be(selfAddress)
|
clusterView.self.address must be(selfAddress)
|
||||||
clusterView.members.map(_.address) must be(Set(selfAddress))
|
clusterView.members.map(_.address) must be(Set(selfAddress))
|
||||||
clusterView.status must be(MemberStatus.Joining)
|
|
||||||
leaderActions()
|
|
||||||
awaitCond(clusterView.status == MemberStatus.Up)
|
awaitCond(clusterView.status == MemberStatus.Up)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
@ -76,7 +74,6 @@ class ClusterSpec extends AkkaSpec(ClusterSpec.config) with ImplicitSender {
|
||||||
try {
|
try {
|
||||||
cluster.subscribe(testActor, classOf[ClusterEvent.ClusterDomainEvent])
|
cluster.subscribe(testActor, classOf[ClusterEvent.ClusterDomainEvent])
|
||||||
// first, is in response to the subscription
|
// first, is in response to the subscription
|
||||||
expectMsgClass(classOf[ClusterEvent.InstantClusterState])
|
|
||||||
expectMsgClass(classOf[ClusterEvent.CurrentClusterState])
|
expectMsgClass(classOf[ClusterEvent.CurrentClusterState])
|
||||||
|
|
||||||
cluster.publishCurrentClusterState()
|
cluster.publishCurrentClusterState()
|
||||||
|
|
|
||||||
|
|
@ -237,8 +237,8 @@ class ClusterSingletonManagerIsStuck(message: String) extends AkkaException(mess
|
||||||
* `retryInterval` until the previous leader confirms that the hand
|
* `retryInterval` until the previous leader confirms that the hand
|
||||||
* over has started, or this `maxHandOverRetries` limit has been
|
* over has started, or this `maxHandOverRetries` limit has been
|
||||||
* reached. If the retry limit is reached it takes the decision to be
|
* reached. If the retry limit is reached it takes the decision to be
|
||||||
* the new leader if previous leader is unknown (typically removed or
|
* the new leader if previous leader is unknown (typically removed),
|
||||||
* downed), otherwise it initiates a new round by throwing
|
* otherwise it initiates a new round by throwing
|
||||||
* [[akka.contrib.pattern.ClusterSingletonManagerIsStuck]] and expecting
|
* [[akka.contrib.pattern.ClusterSingletonManagerIsStuck]] and expecting
|
||||||
* restart with fresh state. For a cluster with many members you might
|
* restart with fresh state. For a cluster with many members you might
|
||||||
* need to increase this retry limit because it takes longer time to
|
* need to increase this retry limit because it takes longer time to
|
||||||
|
|
@ -306,19 +306,13 @@ class ClusterSingletonManager(
|
||||||
// Previous GetNext request delivered event and new GetNext is to be sent
|
// Previous GetNext request delivered event and new GetNext is to be sent
|
||||||
var leaderChangedReceived = true
|
var leaderChangedReceived = true
|
||||||
|
|
||||||
// keep track of previously downed members
|
|
||||||
var downed = Map.empty[Address, Deadline]
|
|
||||||
// keep track of previously removed members
|
// keep track of previously removed members
|
||||||
var removed = Map.empty[Address, Deadline]
|
var removed = Map.empty[Address, Deadline]
|
||||||
|
|
||||||
def addDowned(address: Address): Unit =
|
|
||||||
downed += address -> (Deadline.now + 15.minutes)
|
|
||||||
|
|
||||||
def addRemoved(address: Address): Unit =
|
def addRemoved(address: Address): Unit =
|
||||||
removed += address -> (Deadline.now + 15.minutes)
|
removed += address -> (Deadline.now + 15.minutes)
|
||||||
|
|
||||||
def cleanupOverdueNotMemberAnyMore(): Unit = {
|
def cleanupOverdueNotMemberAnyMore(): Unit = {
|
||||||
downed = downed filter { case (address, deadline) ⇒ deadline.hasTimeLeft }
|
|
||||||
removed = removed filter { case (address, deadline) ⇒ deadline.hasTimeLeft }
|
removed = removed filter { case (address, deadline) ⇒ deadline.hasTimeLeft }
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
@ -336,7 +330,6 @@ class ClusterSingletonManager(
|
||||||
require(!cluster.isTerminated, "Cluster node must not be terminated")
|
require(!cluster.isTerminated, "Cluster node must not be terminated")
|
||||||
|
|
||||||
// subscribe to cluster changes, re-subscribe when restart
|
// subscribe to cluster changes, re-subscribe when restart
|
||||||
cluster.subscribe(self, classOf[MemberDowned])
|
|
||||||
cluster.subscribe(self, classOf[MemberRemoved])
|
cluster.subscribe(self, classOf[MemberRemoved])
|
||||||
|
|
||||||
setTimer(CleanupTimer, Cleanup, 1.minute, repeat = true)
|
setTimer(CleanupTimer, Cleanup, 1.minute, repeat = true)
|
||||||
|
|
@ -385,8 +378,8 @@ class ClusterSingletonManager(
|
||||||
if (leaderOption == selfAddressOption) {
|
if (leaderOption == selfAddressOption) {
|
||||||
logInfo("NonLeader observed LeaderChanged: [{} -> myself]", previousLeaderOption)
|
logInfo("NonLeader observed LeaderChanged: [{} -> myself]", previousLeaderOption)
|
||||||
previousLeaderOption match {
|
previousLeaderOption match {
|
||||||
case None ⇒ gotoLeader(None)
|
case None ⇒ gotoLeader(None)
|
||||||
case Some(prev) if downed.contains(prev) ⇒ gotoLeader(None)
|
case Some(prev) if removed.contains(prev) ⇒ gotoLeader(None)
|
||||||
case Some(prev) ⇒
|
case Some(prev) ⇒
|
||||||
peer(prev) ! HandOverToMe
|
peer(prev) ! HandOverToMe
|
||||||
goto(BecomingLeader) using BecomingLeaderData(previousLeaderOption)
|
goto(BecomingLeader) using BecomingLeaderData(previousLeaderOption)
|
||||||
|
|
@ -397,9 +390,9 @@ class ClusterSingletonManager(
|
||||||
stay using NonLeaderData(leaderOption)
|
stay using NonLeaderData(leaderOption)
|
||||||
}
|
}
|
||||||
|
|
||||||
case Event(MemberDowned(m), NonLeaderData(Some(previousLeader))) if m.address == previousLeader ⇒
|
case Event(MemberRemoved(m), NonLeaderData(Some(previousLeader))) if m.address == previousLeader ⇒
|
||||||
logInfo("Previous leader downed [{}]", m.address)
|
logInfo("Previous leader removed [{}]", m.address)
|
||||||
addDowned(m.address)
|
addRemoved(m.address)
|
||||||
// transition when LeaderChanged
|
// transition when LeaderChanged
|
||||||
stay using NonLeaderData(None)
|
stay using NonLeaderData(None)
|
||||||
|
|
||||||
|
|
@ -426,9 +419,9 @@ class ClusterSingletonManager(
|
||||||
stay
|
stay
|
||||||
}
|
}
|
||||||
|
|
||||||
case Event(MemberDowned(m), BecomingLeaderData(Some(previousLeader))) if m.address == previousLeader ⇒
|
case Event(MemberRemoved(m), BecomingLeaderData(Some(previousLeader))) if m.address == previousLeader ⇒
|
||||||
logInfo("Previous leader [{}] downed", previousLeader)
|
logInfo("Previous leader [{}] removed", previousLeader)
|
||||||
addDowned(m.address)
|
addRemoved(m.address)
|
||||||
gotoLeader(None)
|
gotoLeader(None)
|
||||||
|
|
||||||
case Event(TakeOverFromMe, BecomingLeaderData(None)) ⇒
|
case Event(TakeOverFromMe, BecomingLeaderData(None)) ⇒
|
||||||
|
|
@ -471,7 +464,7 @@ class ClusterSingletonManager(
|
||||||
case Some(a) if a == cluster.selfAddress ⇒
|
case Some(a) if a == cluster.selfAddress ⇒
|
||||||
// already leader
|
// already leader
|
||||||
stay
|
stay
|
||||||
case Some(a) if downed.contains(a) || removed.contains(a) ⇒
|
case Some(a) if removed.contains(a) ⇒
|
||||||
gotoHandingOver(singleton, singletonTerminated, handOverData, None)
|
gotoHandingOver(singleton, singletonTerminated, handOverData, None)
|
||||||
case Some(a) ⇒
|
case Some(a) ⇒
|
||||||
// send TakeOver request in case the new leader doesn't know previous leader
|
// send TakeOver request in case the new leader doesn't know previous leader
|
||||||
|
|
@ -507,8 +500,8 @@ class ClusterSingletonManager(
|
||||||
case Event(HandOverToMe, WasLeaderData(singleton, singletonTerminated, handOverData, _)) ⇒
|
case Event(HandOverToMe, WasLeaderData(singleton, singletonTerminated, handOverData, _)) ⇒
|
||||||
gotoHandingOver(singleton, singletonTerminated, handOverData, Some(sender))
|
gotoHandingOver(singleton, singletonTerminated, handOverData, Some(sender))
|
||||||
|
|
||||||
case Event(MemberDowned(m), WasLeaderData(singleton, singletonTerminated, handOverData, Some(newLeader))) if m.address == newLeader ⇒
|
case Event(MemberRemoved(m), WasLeaderData(singleton, singletonTerminated, handOverData, Some(newLeader))) if m.address == newLeader ⇒
|
||||||
addDowned(m.address)
|
addRemoved(m.address)
|
||||||
gotoHandingOver(singleton, singletonTerminated, handOverData, None)
|
gotoHandingOver(singleton, singletonTerminated, handOverData, None)
|
||||||
|
|
||||||
case Event(singletonHandOverMessage, d @ WasLeaderData(singleton, _, _, _)) if sender == singleton ⇒
|
case Event(singletonHandOverMessage, d @ WasLeaderData(singleton, _, _, _)) if sender == singleton ⇒
|
||||||
|
|
@ -554,13 +547,8 @@ class ClusterSingletonManager(
|
||||||
case Event(_: CurrentClusterState, _) ⇒ stay
|
case Event(_: CurrentClusterState, _) ⇒ stay
|
||||||
case Event(MemberRemoved(m), _) ⇒
|
case Event(MemberRemoved(m), _) ⇒
|
||||||
logInfo("Member removed [{}]", m.address)
|
logInfo("Member removed [{}]", m.address)
|
||||||
// if self removed, it will be stopped onTranstion to NonLeader
|
|
||||||
addRemoved(m.address)
|
addRemoved(m.address)
|
||||||
stay
|
stay
|
||||||
case Event(MemberDowned(m), _) ⇒
|
|
||||||
logInfo("Member downed [{}]", m.address)
|
|
||||||
addDowned(m.address)
|
|
||||||
stay
|
|
||||||
case Event(TakeOverFromMe, _) ⇒
|
case Event(TakeOverFromMe, _) ⇒
|
||||||
logInfo("Ignoring TakeOver request in [{}] from [{}].", stateName, sender.path.address)
|
logInfo("Ignoring TakeOver request in [{}] from [{}].", stateName, sender.path.address)
|
||||||
stay
|
stay
|
||||||
|
|
@ -587,7 +575,7 @@ class ClusterSingletonManager(
|
||||||
}
|
}
|
||||||
|
|
||||||
onTransition {
|
onTransition {
|
||||||
case _ -> NonLeader if removed.contains(cluster.selfAddress) || downed.contains(cluster.selfAddress) ⇒
|
case _ -> NonLeader if removed.contains(cluster.selfAddress) ⇒
|
||||||
logInfo("Self removed, stopping ClusterSingletonManager")
|
logInfo("Self removed, stopping ClusterSingletonManager")
|
||||||
stop()
|
stop()
|
||||||
}
|
}
|
||||||
|
|
|
||||||
|
|
@ -3,7 +3,6 @@ package sample.cluster.simple.japi;
|
||||||
import akka.actor.UntypedActor;
|
import akka.actor.UntypedActor;
|
||||||
import akka.cluster.ClusterEvent.ClusterDomainEvent;
|
import akka.cluster.ClusterEvent.ClusterDomainEvent;
|
||||||
import akka.cluster.ClusterEvent.CurrentClusterState;
|
import akka.cluster.ClusterEvent.CurrentClusterState;
|
||||||
import akka.cluster.ClusterEvent.MemberJoined;
|
|
||||||
import akka.cluster.ClusterEvent.MemberUp;
|
import akka.cluster.ClusterEvent.MemberUp;
|
||||||
import akka.cluster.ClusterEvent.UnreachableMember;
|
import akka.cluster.ClusterEvent.UnreachableMember;
|
||||||
import akka.event.Logging;
|
import akka.event.Logging;
|
||||||
|
|
@ -18,10 +17,6 @@ public class SimpleClusterListener extends UntypedActor {
|
||||||
CurrentClusterState state = (CurrentClusterState) message;
|
CurrentClusterState state = (CurrentClusterState) message;
|
||||||
log.info("Current members: {}", state.members());
|
log.info("Current members: {}", state.members());
|
||||||
|
|
||||||
} else if (message instanceof MemberJoined) {
|
|
||||||
MemberJoined mJoined = (MemberJoined) message;
|
|
||||||
log.info("Member joined: {}", mJoined);
|
|
||||||
|
|
||||||
} else if (message instanceof MemberUp) {
|
} else if (message instanceof MemberUp) {
|
||||||
MemberUp mUp = (MemberUp) message;
|
MemberUp mUp = (MemberUp) message;
|
||||||
log.info("Member is Up: {}", mUp.member());
|
log.info("Member is Up: {}", mUp.member());
|
||||||
|
|
|
||||||
|
|
@ -8,7 +8,7 @@ object SimpleClusterApp {
|
||||||
|
|
||||||
def main(args: Array[String]): Unit = {
|
def main(args: Array[String]): Unit = {
|
||||||
|
|
||||||
// Override the configuration of the port
|
// Override the configuration of the port
|
||||||
// when specified as program argument
|
// when specified as program argument
|
||||||
if (args.nonEmpty) System.setProperty("akka.remote.netty.tcp.port", args(0))
|
if (args.nonEmpty) System.setProperty("akka.remote.netty.tcp.port", args(0))
|
||||||
|
|
||||||
|
|
@ -18,8 +18,6 @@ object SimpleClusterApp {
|
||||||
def receive = {
|
def receive = {
|
||||||
case state: CurrentClusterState ⇒
|
case state: CurrentClusterState ⇒
|
||||||
log.info("Current members: {}", state.members)
|
log.info("Current members: {}", state.members)
|
||||||
case MemberJoined(member) ⇒
|
|
||||||
log.info("Member joined: {}", member)
|
|
||||||
case MemberUp(member) ⇒
|
case MemberUp(member) ⇒
|
||||||
log.info("Member is Up: {}", member)
|
log.info("Member is Up: {}", member)
|
||||||
case UnreachableMember(member) ⇒
|
case UnreachableMember(member) ⇒
|
||||||
|
|
|
||||||
Loading…
Add table
Add a link
Reference in a new issue