Merged with master
This commit is contained in:
commit
f74c96b424
19 changed files with 604 additions and 299 deletions
|
|
@ -307,7 +307,7 @@ class TypedActorSpec extends AkkaSpec(TypedActorSpec.config)
|
|||
"be able to call methods returning Scala Options" in {
|
||||
val t = newFooBar(Duration(500, "ms"))
|
||||
t.optionPigdog(200).get must be("Pigdog")
|
||||
t.optionPigdog(700) must be(None)
|
||||
t.optionPigdog(1000) must be(None)
|
||||
mustStop(t)
|
||||
}
|
||||
|
||||
|
|
|
|||
|
|
@ -27,6 +27,9 @@ akka {
|
|||
# how often should the node send out gossip information?
|
||||
gossip-interval = 1s
|
||||
|
||||
# how often should the node send out heartbeats?
|
||||
heartbeat-interval = 1s
|
||||
|
||||
# how often should the leader perform maintenance tasks?
|
||||
leader-actions-interval = 1s
|
||||
|
||||
|
|
@ -46,5 +49,14 @@ akka {
|
|||
|
||||
max-sample-size = 1000
|
||||
}
|
||||
|
||||
# If the tick-duration of the default scheduler is longer than the tick-duration
|
||||
# configured here a dedicated scheduler will be used for periodic tasks of the cluster,
|
||||
# otherwise the default scheduler is used.
|
||||
# See akka.scheduler settings for more details about the HashedWheelTimer.
|
||||
scheduler {
|
||||
tick-duration = 33ms
|
||||
ticks-per-wheel = 512
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
|||
|
|
@ -75,7 +75,7 @@ class AccrualFailureDetector(
|
|||
* Records a heartbeat for a connection.
|
||||
*/
|
||||
@tailrec
|
||||
final def heartbeat(connection: Address): Unit = {
|
||||
final def heartbeat(connection: Address) {
|
||||
log.debug("Heartbeat from connection [{}] ", connection)
|
||||
|
||||
val oldState = state.get
|
||||
|
|
@ -165,7 +165,8 @@ class AccrualFailureDetector(
|
|||
else PhiFactor * timestampDiff / mean
|
||||
}
|
||||
|
||||
log.debug("Phi value [{}] and threshold [{}] for connection [{}] ", phi, threshold, connection)
|
||||
// FIXME change to debug log level, when failure detector is stable
|
||||
log.info("Phi value [{}] and threshold [{}] for connection [{}] ", phi, threshold, connection)
|
||||
phi
|
||||
}
|
||||
|
||||
|
|
|
|||
|
|
@ -14,19 +14,19 @@ import akka.pattern.ask
|
|||
import akka.util._
|
||||
import akka.util.duration._
|
||||
import akka.ConfigurationException
|
||||
|
||||
import java.util.concurrent.atomic.{ AtomicReference, AtomicBoolean }
|
||||
import java.util.concurrent.TimeUnit._
|
||||
import java.util.concurrent.TimeoutException
|
||||
import akka.jsr166y.ThreadLocalRandom
|
||||
|
||||
import java.lang.management.ManagementFactory
|
||||
import java.io.Closeable
|
||||
import javax.management._
|
||||
|
||||
import scala.collection.immutable.{ Map, SortedSet }
|
||||
import scala.annotation.tailrec
|
||||
|
||||
import com.google.protobuf.ByteString
|
||||
import akka.util.internal.HashedWheelTimer
|
||||
import akka.dispatch.MonitorableThreadFactory
|
||||
import MemberStatus._
|
||||
|
||||
/**
|
||||
* Interface for membership change listener.
|
||||
|
|
@ -44,6 +44,8 @@ trait MetaDataChangeListener {
|
|||
|
||||
/**
|
||||
* Base trait for all cluster messages. All ClusterMessage's are serializable.
|
||||
*
|
||||
* FIXME Protobuf all ClusterMessages
|
||||
*/
|
||||
sealed trait ClusterMessage extends Serializable
|
||||
|
||||
|
|
@ -88,6 +90,7 @@ object ClusterLeaderAction {
|
|||
|
||||
/**
|
||||
* Represents the address and the current status of a cluster member node.
|
||||
*
|
||||
*/
|
||||
class Member(val address: Address, val status: MemberStatus) extends ClusterMessage {
|
||||
override def hashCode = address.##
|
||||
|
|
@ -100,7 +103,6 @@ class Member(val address: Address, val status: MemberStatus) extends ClusterMess
|
|||
* Factory and Utility module for Member instances.
|
||||
*/
|
||||
object Member {
|
||||
import MemberStatus._
|
||||
|
||||
/**
|
||||
* Sort Address by host and port
|
||||
|
|
@ -144,14 +146,20 @@ object Member {
|
|||
/**
|
||||
* Envelope adding a sender address to the gossip.
|
||||
*/
|
||||
case class GossipEnvelope(sender: Member, gossip: Gossip) extends ClusterMessage
|
||||
case class GossipEnvelope(from: Address, gossip: Gossip) extends ClusterMessage
|
||||
|
||||
/**
|
||||
* Defines the current status of a cluster member node
|
||||
*
|
||||
* Can be one of: Joining, Up, Leaving, Exiting and Down.
|
||||
*/
|
||||
sealed trait MemberStatus extends ClusterMessage
|
||||
sealed trait MemberStatus extends ClusterMessage {
|
||||
/**
|
||||
* Using the same notion for 'unavailable' as 'non-convergence': DOWN and REMOVED.
|
||||
*/
|
||||
def isUnavailable: Boolean = this == Down || this == Removed
|
||||
}
|
||||
|
||||
object MemberStatus {
|
||||
case object Joining extends MemberStatus
|
||||
case object Up extends MemberStatus
|
||||
|
|
@ -159,11 +167,6 @@ object MemberStatus {
|
|||
case object Exiting extends MemberStatus
|
||||
case object Down extends MemberStatus
|
||||
case object Removed extends MemberStatus
|
||||
|
||||
/**
|
||||
* Using the same notion for 'unavailable' as 'non-convergence': DOWN and REMOVED.
|
||||
*/
|
||||
def isUnavailable(status: MemberStatus): Boolean = status == MemberStatus.Down || status == MemberStatus.Removed
|
||||
}
|
||||
|
||||
/**
|
||||
|
|
@ -173,16 +176,42 @@ case class GossipOverview(
|
|||
seen: Map[Address, VectorClock] = Map.empty[Address, VectorClock],
|
||||
unreachable: Set[Member] = Set.empty[Member]) {
|
||||
|
||||
// FIXME document when nodes are put in 'unreachable' set and removed from 'members'
|
||||
|
||||
override def toString =
|
||||
"GossipOverview(seen = [" + seen.mkString(", ") +
|
||||
"], unreachable = [" + unreachable.mkString(", ") +
|
||||
"])"
|
||||
}
|
||||
|
||||
object Gossip {
|
||||
val emptyMembers: SortedSet[Member] = SortedSet.empty
|
||||
}
|
||||
|
||||
/**
|
||||
* Represents the state of the cluster; cluster ring membership, ring convergence, meta data - all versioned by a vector clock.
|
||||
* Represents the state of the cluster; cluster ring membership, ring convergence, meta data -
|
||||
* all versioned by a vector clock.
|
||||
*
|
||||
* When a node is joining the Member, with status Joining, is added to `members`.
|
||||
* If the joining node was downed it is moved from `overview.unreachable` (status Down)
|
||||
* to `members` (status Joining). It cannot rejoin if not first downed.
|
||||
*
|
||||
* When convergence is reached the leader change status of `members` from Joining
|
||||
* to Up.
|
||||
*
|
||||
* When failure detector consider a node as unavailble it will be moved from
|
||||
* `members` to `overview.unreachable`.
|
||||
*
|
||||
* When a node is downed, either manually or automatically, its status is changed to Down.
|
||||
* It is also removed from `overview.seen` table.
|
||||
* The node will reside as Down in the `overview.unreachable` set until joining
|
||||
* again and it will then go through the normal joining procedure.
|
||||
*
|
||||
* When a Gossip is received the version (vector clock) is used to determine if the
|
||||
* received Gossip is newer or older than the current local Gossip. The received Gossip
|
||||
* and local Gossip is merged in case of conflicting version, i.e. vector clocks without
|
||||
* same history. When merged the seen table is cleared.
|
||||
*
|
||||
* TODO document leaving, exiting and removed when that is implemented
|
||||
*
|
||||
*/
|
||||
case class Gossip(
|
||||
overview: GossipOverview = GossipOverview(),
|
||||
|
|
@ -192,12 +221,34 @@ case class Gossip(
|
|||
extends ClusterMessage // is a serializable cluster message
|
||||
with Versioned[Gossip] {
|
||||
|
||||
// FIXME can be disabled as optimization
|
||||
assertInvariants
|
||||
private def assertInvariants: Unit = {
|
||||
val unreachableAndLive = members.intersect(overview.unreachable)
|
||||
if (unreachableAndLive.nonEmpty)
|
||||
throw new IllegalArgumentException("Same nodes in both members and unreachable is not allowed, got [%s]"
|
||||
format unreachableAndLive.mkString(", "))
|
||||
|
||||
val allowedLiveMemberStatuses: Set[MemberStatus] = Set(Joining, Up, Leaving, Exiting)
|
||||
def hasNotAllowedLiveMemberStatus(m: Member) = !allowedLiveMemberStatuses.contains(m.status)
|
||||
if (members exists hasNotAllowedLiveMemberStatus)
|
||||
throw new IllegalArgumentException("Live members must have status [%s], got [%s]"
|
||||
format (allowedLiveMemberStatuses.mkString(", "),
|
||||
(members filter hasNotAllowedLiveMemberStatus).mkString(", ")))
|
||||
|
||||
val seenButNotMember = overview.seen.keySet -- members.map(_.address) -- overview.unreachable.map(_.address)
|
||||
if (seenButNotMember.nonEmpty)
|
||||
throw new IllegalArgumentException("Nodes not part of cluster have marked the Gossip as seen, got [%s]"
|
||||
format seenButNotMember.mkString(", "))
|
||||
|
||||
}
|
||||
|
||||
/**
|
||||
* Increments the version for this 'Node'.
|
||||
*/
|
||||
def +(node: VectorClock.Node): Gossip = copy(version = version + node)
|
||||
def :+(node: VectorClock.Node): Gossip = copy(version = version :+ node)
|
||||
|
||||
def +(member: Member): Gossip = {
|
||||
def :+(member: Member): Gossip = {
|
||||
if (members contains member) this
|
||||
else this copy (members = members + member)
|
||||
}
|
||||
|
|
@ -220,25 +271,30 @@ case class Gossip(
|
|||
// 1. merge vector clocks
|
||||
val mergedVClock = this.version merge that.version
|
||||
|
||||
// 2. group all members by Address => Seq[Member]
|
||||
val membersGroupedByAddress = (this.members.toSeq ++ that.members.toSeq).groupBy(_.address)
|
||||
|
||||
// 3. merge members by selecting the single Member with highest MemberStatus out of the Member groups
|
||||
val mergedMembers =
|
||||
SortedSet.empty[Member] ++
|
||||
membersGroupedByAddress.values.foldLeft(Vector.empty[Member]) { (acc, members) ⇒
|
||||
acc :+ members.reduceLeft(Member.highestPriorityOf(_, _))
|
||||
}
|
||||
|
||||
// 4. merge meta-data
|
||||
// 2. merge meta-data
|
||||
val mergedMeta = this.meta ++ that.meta
|
||||
|
||||
// 5. merge gossip overview
|
||||
val mergedOverview = GossipOverview(
|
||||
this.overview.seen ++ that.overview.seen,
|
||||
this.overview.unreachable ++ that.overview.unreachable)
|
||||
def pickHighestPriority(a: Seq[Member], b: Seq[Member]): Set[Member] = {
|
||||
// group all members by Address => Seq[Member]
|
||||
val groupedByAddress = (a ++ b).groupBy(_.address)
|
||||
// pick highest MemberStatus
|
||||
(Set.empty[Member] /: groupedByAddress) {
|
||||
case (acc, (_, members)) ⇒ acc + members.reduceLeft(Member.highestPriorityOf)
|
||||
}
|
||||
}
|
||||
|
||||
Gossip(mergedOverview, mergedMembers, mergedMeta, mergedVClock)
|
||||
// 3. merge unreachable by selecting the single Member with highest MemberStatus out of the Member groups
|
||||
val mergedUnreachable = pickHighestPriority(this.overview.unreachable.toSeq, that.overview.unreachable.toSeq)
|
||||
|
||||
// 4. merge members by selecting the single Member with highest MemberStatus out of the Member groups,
|
||||
// and exclude unreachable
|
||||
val mergedMembers = Gossip.emptyMembers ++ pickHighestPriority(this.members.toSeq, that.members.toSeq).
|
||||
filterNot(mergedUnreachable.contains)
|
||||
|
||||
// 5. fresh seen table
|
||||
val mergedSeen = Map.empty[Address, VectorClock]
|
||||
|
||||
Gossip(GossipOverview(mergedSeen, mergedUnreachable), mergedMembers, mergedMeta, mergedVClock)
|
||||
}
|
||||
|
||||
override def toString =
|
||||
|
|
@ -250,6 +306,11 @@ case class Gossip(
|
|||
")"
|
||||
}
|
||||
|
||||
/**
|
||||
* Sent at regular intervals for failure detection.
|
||||
*/
|
||||
case class Heartbeat(from: Address) extends ClusterMessage
|
||||
|
||||
/**
|
||||
* Manages routing of the different cluster commands.
|
||||
* Instantiated as a single instance for each Cluster - e.g. commands are serialized to Cluster message after message.
|
||||
|
|
@ -278,7 +339,8 @@ private[akka] final class ClusterGossipDaemon(cluster: Cluster) extends Actor {
|
|||
val log = Logging(context.system, this)
|
||||
|
||||
def receive = {
|
||||
case GossipEnvelope(sender, gossip) ⇒ cluster.receive(sender, gossip)
|
||||
case Heartbeat(from) ⇒ cluster.receiveHeartbeat(from)
|
||||
case GossipEnvelope(from, gossip) ⇒ cluster.receiveGossip(from, gossip)
|
||||
}
|
||||
|
||||
override def unhandled(unknown: Any) = log.error("[/system/cluster/gossip] can not respond to messages - received [{}]", unknown)
|
||||
|
|
@ -387,6 +449,7 @@ class Cluster(system: ExtendedActorSystem, val failureDetector: FailureDetector)
|
|||
import clusterSettings._
|
||||
|
||||
val selfAddress = remote.transport.address
|
||||
private val selfHeartbeat = Heartbeat(selfAddress)
|
||||
|
||||
private val vclockNode = VectorClock.Node(selfAddress.toString)
|
||||
|
||||
|
|
@ -404,7 +467,7 @@ class Cluster(system: ExtendedActorSystem, val failureDetector: FailureDetector)
|
|||
|
||||
log.info("Cluster Node [{}] - is starting up...", selfAddress)
|
||||
|
||||
// create superisor for daemons under path "/system/cluster"
|
||||
// create supervisor for daemons under path "/system/cluster"
|
||||
private val clusterDaemons = {
|
||||
val createChild = CreateChild(Props(new ClusterDaemonSupervisor(this)), "cluster")
|
||||
Await.result(system.systemGuardian ? createChild, defaultTimeout.duration) match {
|
||||
|
|
@ -414,9 +477,10 @@ class Cluster(system: ExtendedActorSystem, val failureDetector: FailureDetector)
|
|||
}
|
||||
|
||||
private val state = {
|
||||
val member = Member(selfAddress, MemberStatus.Joining)
|
||||
val gossip = Gossip(members = SortedSet.empty[Member] + member) + vclockNode // add me as member and update my vector clock
|
||||
new AtomicReference[State](State(gossip))
|
||||
val member = Member(selfAddress, Joining)
|
||||
val versionedGossip = Gossip(members = Gossip.emptyMembers + member) :+ vclockNode // add me as member and update my vector clock
|
||||
val seenVersionedGossip = versionedGossip seen selfAddress
|
||||
new AtomicReference[State](State(seenVersionedGossip))
|
||||
}
|
||||
|
||||
// try to join the node defined in the 'akka.cluster.node-to-join' option
|
||||
|
|
@ -426,23 +490,65 @@ class Cluster(system: ExtendedActorSystem, val failureDetector: FailureDetector)
|
|||
// ===================== WORK DAEMONS =====================
|
||||
// ========================================================
|
||||
|
||||
private val clusterScheduler: Scheduler with Closeable = {
|
||||
if (system.settings.SchedulerTickDuration > SchedulerTickDuration) {
|
||||
log.info("Using a dedicated scheduler for cluster. Default scheduler can be used if configured " +
|
||||
"with 'akka.scheduler.tick-duration' [{} ms] <= 'akka.cluster.scheduler.tick-duration' [{} ms].",
|
||||
system.settings.SchedulerTickDuration.toMillis, SchedulerTickDuration.toMillis)
|
||||
val threadFactory = system.threadFactory match {
|
||||
case tf: MonitorableThreadFactory ⇒ tf.copy(name = tf.name + "-cluster-scheduler")
|
||||
case tf ⇒ tf
|
||||
}
|
||||
val hwt = new HashedWheelTimer(log,
|
||||
threadFactory,
|
||||
SchedulerTickDuration, SchedulerTicksPerWheel)
|
||||
new DefaultScheduler(hwt, log, system.dispatcher)
|
||||
} else {
|
||||
// delegate to system.scheduler, but don't close
|
||||
val systemScheduler = system.scheduler
|
||||
new Scheduler with Closeable {
|
||||
// we are using system.scheduler, which we are not responsible for closing
|
||||
def close(): Unit = ()
|
||||
def schedule(initialDelay: Duration, frequency: Duration, receiver: ActorRef, message: Any): Cancellable =
|
||||
systemScheduler.schedule(initialDelay, frequency, receiver, message)
|
||||
def schedule(initialDelay: Duration, frequency: Duration)(f: ⇒ Unit): Cancellable =
|
||||
systemScheduler.schedule(initialDelay, frequency)(f)
|
||||
def schedule(initialDelay: Duration, frequency: Duration, runnable: Runnable): Cancellable =
|
||||
systemScheduler.schedule(initialDelay, frequency, runnable)
|
||||
def scheduleOnce(delay: Duration, runnable: Runnable): Cancellable =
|
||||
systemScheduler.scheduleOnce(delay, runnable)
|
||||
def scheduleOnce(delay: Duration, receiver: ActorRef, message: Any): Cancellable =
|
||||
systemScheduler.scheduleOnce(delay, receiver, message)
|
||||
def scheduleOnce(delay: Duration)(f: ⇒ Unit): Cancellable =
|
||||
systemScheduler.scheduleOnce(delay)(f)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// start periodic gossip to random nodes in cluster
|
||||
private val gossipCanceller = system.scheduler.schedule(PeriodicTasksInitialDelay, GossipInterval) {
|
||||
private val gossipTask = FixedRateTask(clusterScheduler, PeriodicTasksInitialDelay, GossipInterval) {
|
||||
gossip()
|
||||
}
|
||||
|
||||
// start periodic heartbeat to all nodes in cluster
|
||||
private val heartbeatTask = FixedRateTask(clusterScheduler, PeriodicTasksInitialDelay, HeartbeatInterval) {
|
||||
heartbeat()
|
||||
}
|
||||
|
||||
// start periodic cluster failure detector reaping (moving nodes condemned by the failure detector to unreachable list)
|
||||
private val failureDetectorReaperCanceller = system.scheduler.schedule(PeriodicTasksInitialDelay, UnreachableNodesReaperInterval) {
|
||||
private val failureDetectorReaperTask = FixedRateTask(clusterScheduler, PeriodicTasksInitialDelay, UnreachableNodesReaperInterval) {
|
||||
reapUnreachableMembers()
|
||||
}
|
||||
|
||||
// start periodic leader action management (only applies for the current leader)
|
||||
private val leaderActionsCanceller = system.scheduler.schedule(PeriodicTasksInitialDelay, LeaderActionsInterval) {
|
||||
private val leaderActionsTask = FixedRateTask(clusterScheduler, PeriodicTasksInitialDelay, LeaderActionsInterval) {
|
||||
leaderActions()
|
||||
}
|
||||
|
||||
createMBean()
|
||||
|
||||
system.registerOnTermination(shutdown())
|
||||
|
||||
log.info("Cluster Node [{}] - has started up successfully", selfAddress)
|
||||
|
||||
// ======================================================
|
||||
|
|
@ -509,11 +615,20 @@ class Cluster(system: ExtendedActorSystem, val failureDetector: FailureDetector)
|
|||
*/
|
||||
private[akka] def shutdown(): Unit = {
|
||||
if (isRunning.compareAndSet(true, false)) {
|
||||
log.info("Cluster Node [{}] - Shutting down cluster node...", selfAddress)
|
||||
gossipCanceller.cancel()
|
||||
failureDetectorReaperCanceller.cancel()
|
||||
leaderActionsCanceller.cancel()
|
||||
log.info("Cluster Node [{}] - Shutting down cluster Node and cluster daemons...", selfAddress)
|
||||
|
||||
// cancel the periodic tasks, note that otherwise they will be run when scheduler is shutdown
|
||||
gossipTask.cancel()
|
||||
heartbeatTask.cancel()
|
||||
failureDetectorReaperTask.cancel()
|
||||
leaderActionsTask.cancel()
|
||||
clusterScheduler.close()
|
||||
|
||||
// FIXME isTerminated check can be removed when ticket #2221 is fixed
|
||||
// now it prevents logging if system is shutdown (or in progress of shutdown)
|
||||
if (!clusterDaemons.isTerminated)
|
||||
system.stop(clusterDaemons)
|
||||
|
||||
try {
|
||||
mBeanServer.unregisterMBean(clusterMBeanName)
|
||||
} catch {
|
||||
|
|
@ -592,23 +707,30 @@ class Cluster(system: ExtendedActorSystem, val failureDetector: FailureDetector)
|
|||
val localState = state.get
|
||||
val localGossip = localState.latestGossip
|
||||
val localMembers = localGossip.members
|
||||
val localUnreachable = localGossip.overview.unreachable
|
||||
|
||||
if (!localMembers.exists(_.address == node)) {
|
||||
val alreadyMember = localMembers.exists(_.address == node)
|
||||
val isUnreachable = localUnreachable.exists { m ⇒
|
||||
m.address == node && m.status != Down && m.status != Removed
|
||||
}
|
||||
|
||||
if (!alreadyMember && !isUnreachable) {
|
||||
|
||||
// remove the node from the 'unreachable' set in case it is a DOWN node that is rejoining cluster
|
||||
val newUnreachableMembers = localGossip.overview.unreachable filterNot { _.address == node }
|
||||
val newUnreachableMembers = localUnreachable filterNot { _.address == node }
|
||||
val newOverview = localGossip.overview copy (unreachable = newUnreachableMembers)
|
||||
|
||||
val newMembers = localMembers + Member(node, MemberStatus.Joining) // add joining node as Joining
|
||||
val newMembers = localMembers + Member(node, Joining) // add joining node as Joining
|
||||
val newGossip = localGossip copy (overview = newOverview, members = newMembers)
|
||||
|
||||
val versionedGossip = newGossip + vclockNode
|
||||
val versionedGossip = newGossip :+ vclockNode
|
||||
val seenVersionedGossip = versionedGossip seen selfAddress
|
||||
|
||||
val newState = localState copy (latestGossip = seenVersionedGossip)
|
||||
|
||||
if (!state.compareAndSet(localState, newState)) joining(node) // recur if we failed update
|
||||
else {
|
||||
// treat join as initial heartbeat, so that it becomes unavailable if nothing more happens
|
||||
if (node != selfAddress) failureDetector heartbeat node
|
||||
notifyMembershipChangeListeners(localState, newState)
|
||||
}
|
||||
|
|
@ -626,17 +748,16 @@ class Cluster(system: ExtendedActorSystem, val failureDetector: FailureDetector)
|
|||
val localGossip = localState.latestGossip
|
||||
val localMembers = localGossip.members
|
||||
|
||||
val newMembers = localMembers + Member(address, MemberStatus.Leaving) // mark node as LEAVING
|
||||
val newMembers = localMembers + Member(address, Leaving) // mark node as LEAVING
|
||||
val newGossip = localGossip copy (members = newMembers)
|
||||
|
||||
val versionedGossip = newGossip + vclockNode
|
||||
val versionedGossip = newGossip :+ vclockNode
|
||||
val seenVersionedGossip = versionedGossip seen selfAddress
|
||||
|
||||
val newState = localState copy (latestGossip = seenVersionedGossip)
|
||||
|
||||
if (!state.compareAndSet(localState, newState)) leaving(address) // recur if we failed update
|
||||
else {
|
||||
if (address != selfAddress) failureDetector heartbeat address // update heartbeat in failure detector
|
||||
notifyMembershipChangeListeners(localState, newState)
|
||||
}
|
||||
}
|
||||
|
|
@ -665,8 +786,8 @@ class Cluster(system: ExtendedActorSystem, val failureDetector: FailureDetector)
|
|||
}
|
||||
|
||||
/**
|
||||
* The node to DOWN is removed from the 'members' set and put in the 'unreachable' set (if not alread there)
|
||||
* and its status is set to DOWN. The node is alo removed from the 'seen' table.
|
||||
* The node to DOWN is removed from the 'members' set and put in the 'unreachable' set (if not already there)
|
||||
* and its status is set to DOWN. The node is also removed from the 'seen' table.
|
||||
*
|
||||
* The node will reside as DOWN in the 'unreachable' set until an explicit command JOIN command is sent directly
|
||||
* to this node and it will then go through the normal JOINING procedure.
|
||||
|
|
@ -681,44 +802,38 @@ class Cluster(system: ExtendedActorSystem, val failureDetector: FailureDetector)
|
|||
val localUnreachableMembers = localOverview.unreachable
|
||||
|
||||
// 1. check if the node to DOWN is in the 'members' set
|
||||
var downedMember: Option[Member] = None
|
||||
val newMembers =
|
||||
localMembers
|
||||
.map { member ⇒
|
||||
if (member.address == address) {
|
||||
log.info("Cluster Node [{}] - Marking node [{}] as DOWN", selfAddress, member.address)
|
||||
val newMember = member copy (status = MemberStatus.Down)
|
||||
downedMember = Some(newMember)
|
||||
newMember
|
||||
} else member
|
||||
val downedMember: Option[Member] = localMembers.collectFirst {
|
||||
case m if m.address == address ⇒ m.copy(status = Down)
|
||||
}
|
||||
val newMembers = downedMember match {
|
||||
case Some(m) ⇒
|
||||
log.info("Cluster Node [{}] - Marking node [{}] as DOWN", selfAddress, m.address)
|
||||
localMembers - m
|
||||
case None ⇒ localMembers
|
||||
}
|
||||
.filter(_.status != MemberStatus.Down)
|
||||
|
||||
// 2. check if the node to DOWN is in the 'unreachable' set
|
||||
val newUnreachableMembers =
|
||||
localUnreachableMembers
|
||||
.filter(_.status != MemberStatus.Down) // no need to DOWN members already DOWN
|
||||
.map { member ⇒
|
||||
if (member.address == address) {
|
||||
localUnreachableMembers.map { member ⇒
|
||||
// no need to DOWN members already DOWN
|
||||
if (member.address == address && member.status != Down) {
|
||||
log.info("Cluster Node [{}] - Marking unreachable node [{}] as DOWN", selfAddress, member.address)
|
||||
member copy (status = MemberStatus.Down)
|
||||
member copy (status = Down)
|
||||
} else member
|
||||
}
|
||||
|
||||
// 3. add the newly DOWNED members from the 'members' (in step 1.) to the 'newUnreachableMembers' set.
|
||||
val newUnreachablePlusNewlyDownedMembers = downedMember match {
|
||||
case Some(member) ⇒ newUnreachableMembers + member
|
||||
case None ⇒ newUnreachableMembers
|
||||
}
|
||||
val newUnreachablePlusNewlyDownedMembers = newUnreachableMembers ++ downedMember
|
||||
|
||||
// 4. remove nodes marked as DOWN from the 'seen' table
|
||||
val newSeen = newUnreachablePlusNewlyDownedMembers.foldLeft(localSeen) { (currentSeen, member) ⇒
|
||||
currentSeen - member.address
|
||||
val newSeen = localSeen -- newUnreachablePlusNewlyDownedMembers.collect {
|
||||
case m if m.status == Down ⇒ m.address
|
||||
}
|
||||
|
||||
val newOverview = localOverview copy (seen = newSeen, unreachable = newUnreachablePlusNewlyDownedMembers) // update gossip overview
|
||||
// update gossip overview
|
||||
val newOverview = localOverview copy (seen = newSeen, unreachable = newUnreachablePlusNewlyDownedMembers)
|
||||
val newGossip = localGossip copy (overview = newOverview, members = newMembers) // update gossip
|
||||
val versionedGossip = newGossip + vclockNode
|
||||
val versionedGossip = newGossip :+ vclockNode
|
||||
val newState = localState copy (latestGossip = versionedGossip seen selfAddress)
|
||||
|
||||
if (!state.compareAndSet(localState, newState)) downing(address) // recur if we fail the update
|
||||
|
|
@ -731,7 +846,7 @@ class Cluster(system: ExtendedActorSystem, val failureDetector: FailureDetector)
|
|||
* Receive new gossip.
|
||||
*/
|
||||
@tailrec
|
||||
final private[cluster] def receive(sender: Member, remoteGossip: Gossip): Unit = {
|
||||
final private[cluster] def receiveGossip(from: Address, remoteGossip: Gossip): Unit = {
|
||||
val localState = state.get
|
||||
val localGossip = localState.latestGossip
|
||||
|
||||
|
|
@ -739,10 +854,11 @@ class Cluster(system: ExtendedActorSystem, val failureDetector: FailureDetector)
|
|||
if (remoteGossip.version <> localGossip.version) {
|
||||
// concurrent
|
||||
val mergedGossip = remoteGossip merge localGossip
|
||||
val versionedMergedGossip = mergedGossip + vclockNode
|
||||
val versionedMergedGossip = mergedGossip :+ vclockNode
|
||||
|
||||
log.debug(
|
||||
"Can't establish a causal relationship between \"remote\" gossip [{}] and \"local\" gossip [{}] - merging them into [{}]",
|
||||
// FIXME change to debug log level, when failure detector is stable
|
||||
log.info(
|
||||
"""Can't establish a causal relationship between "remote" gossip [{}] and "local" gossip [{}] - merging them into [{}]""",
|
||||
remoteGossip, localGossip, versionedMergedGossip)
|
||||
|
||||
versionedMergedGossip
|
||||
|
|
@ -759,55 +875,23 @@ class Cluster(system: ExtendedActorSystem, val failureDetector: FailureDetector)
|
|||
val newState = localState copy (latestGossip = winningGossip seen selfAddress)
|
||||
|
||||
// if we won the race then update else try again
|
||||
if (!state.compareAndSet(localState, newState)) receive(sender, remoteGossip) // recur if we fail the update
|
||||
if (!state.compareAndSet(localState, newState)) receiveGossip(from, remoteGossip) // recur if we fail the update
|
||||
else {
|
||||
log.info("Cluster Node [{}] - Receiving gossip from [{}]", selfAddress, sender.address)
|
||||
|
||||
if (sender.address != selfAddress) failureDetector heartbeat sender.address
|
||||
log.debug("Cluster Node [{}] - Receiving gossip from [{}]", selfAddress, from)
|
||||
notifyMembershipChangeListeners(localState, newState)
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* INTERNAL API
|
||||
*/
|
||||
private[cluster] def receiveHeartbeat(from: Address): Unit = failureDetector heartbeat from
|
||||
|
||||
/**
|
||||
* Joins the pre-configured contact point.
|
||||
*/
|
||||
private def autoJoin(): Unit = nodeToJoin foreach join
|
||||
|
||||
/**
|
||||
* Switches the member status.
|
||||
*
|
||||
* @param newStatus the new member status
|
||||
* @param oldState the state to change the member status in
|
||||
* @return the updated new state with the new member status
|
||||
*/
|
||||
private def switchMemberStatusTo(newStatus: MemberStatus, state: State): State = { // TODO: Removed this method? Currently not used.
|
||||
log.debug("Cluster Node [{}] - Switching membership status to [{}]", selfAddress, newStatus)
|
||||
|
||||
val localSelf = self
|
||||
|
||||
val localGossip = state.latestGossip
|
||||
val localMembers = localGossip.members
|
||||
|
||||
// change my state into a "new" self
|
||||
val newSelf = localSelf copy (status = newStatus)
|
||||
|
||||
// change my state in 'gossip.members'
|
||||
val newMembersSet = localMembers map { member ⇒
|
||||
if (member.address == selfAddress) newSelf
|
||||
else member
|
||||
}
|
||||
|
||||
// NOTE: ugly crap to work around bug in scala colletions ('val ss: SortedSet[Member] = SortedSet.empty[Member] ++ aSet' does not compile)
|
||||
val newMembersSortedSet = SortedSet[Member](newMembersSet.toList: _*)
|
||||
val newGossip = localGossip copy (members = newMembersSortedSet)
|
||||
|
||||
// version my changes
|
||||
val versionedGossip = newGossip + vclockNode
|
||||
val seenVersionedGossip = versionedGossip seen selfAddress
|
||||
|
||||
state copy (latestGossip = seenVersionedGossip)
|
||||
}
|
||||
|
||||
/**
|
||||
* INTERNAL API
|
||||
*
|
||||
|
|
@ -816,7 +900,7 @@ class Cluster(system: ExtendedActorSystem, val failureDetector: FailureDetector)
|
|||
private[akka] def gossipTo(address: Address): Unit = {
|
||||
val connection = clusterGossipConnectionFor(address)
|
||||
log.debug("Cluster Node [{}] - Gossiping to [{}]", selfAddress, connection)
|
||||
connection ! GossipEnvelope(self, latestGossip)
|
||||
connection ! GossipEnvelope(selfAddress, latestGossip)
|
||||
}
|
||||
|
||||
/**
|
||||
|
|
@ -863,12 +947,7 @@ class Cluster(system: ExtendedActorSystem, val failureDetector: FailureDetector)
|
|||
|
||||
log.debug("Cluster Node [{}] - Initiating new round of gossip", selfAddress)
|
||||
|
||||
if (isSingletonCluster(localState)) {
|
||||
// gossip to myself
|
||||
// TODO could perhaps be optimized, no need to gossip to myself when Up?
|
||||
gossipTo(selfAddress)
|
||||
|
||||
} else if (isAvailable(localState)) {
|
||||
if (!isSingletonCluster(localState) && isAvailable(localState)) {
|
||||
val localGossip = localState.latestGossip
|
||||
// important to not accidentally use `map` of the SortedSet, since the original order is not preserved
|
||||
val localMembers = localGossip.members.toIndexedSeq
|
||||
|
|
@ -899,6 +978,23 @@ class Cluster(system: ExtendedActorSystem, val failureDetector: FailureDetector)
|
|||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* INTERNAL API
|
||||
*/
|
||||
private[akka] def heartbeat(): Unit = {
|
||||
val localState = state.get
|
||||
|
||||
if (!isSingletonCluster(localState)) {
|
||||
val liveMembers = localState.latestGossip.members.toIndexedSeq
|
||||
|
||||
for (member ← liveMembers; if member.address != selfAddress) {
|
||||
val connection = clusterGossipConnectionFor(member.address)
|
||||
log.debug("Cluster Node [{}] - Heartbeat to [{}]", selfAddress, connection)
|
||||
connection ! selfHeartbeat
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* INTERNAL API
|
||||
*
|
||||
|
|
@ -920,14 +1016,14 @@ class Cluster(system: ExtendedActorSystem, val failureDetector: FailureDetector)
|
|||
|
||||
if (newlyDetectedUnreachableMembers.nonEmpty) { // we have newly detected members marked as unavailable
|
||||
|
||||
val newMembers = localMembers diff newlyDetectedUnreachableMembers
|
||||
val newUnreachableMembers: Set[Member] = localUnreachableMembers ++ newlyDetectedUnreachableMembers
|
||||
val newMembers = localMembers -- newlyDetectedUnreachableMembers
|
||||
val newUnreachableMembers = localUnreachableMembers ++ newlyDetectedUnreachableMembers
|
||||
|
||||
val newOverview = localOverview copy (unreachable = newUnreachableMembers)
|
||||
val newGossip = localGossip copy (overview = newOverview, members = newMembers)
|
||||
|
||||
// updating vclock and 'seen' table
|
||||
val versionedGossip = newGossip + vclockNode
|
||||
val versionedGossip = newGossip :+ vclockNode
|
||||
val seenVersionedGossip = versionedGossip seen selfAddress
|
||||
|
||||
val newState = localState copy (latestGossip = seenVersionedGossip)
|
||||
|
|
@ -987,33 +1083,45 @@ class Cluster(system: ExtendedActorSystem, val failureDetector: FailureDetector)
|
|||
// ----------------------
|
||||
// 1. Move EXITING => REMOVED - e.g. remove the nodes from the 'members' set/node ring
|
||||
// ----------------------
|
||||
localMembers filter { member ⇒
|
||||
if (member.status == MemberStatus.Exiting) {
|
||||
log.info("Cluster Node [{}] - Leader is moving node [{}] from EXITING to REMOVED - Removing node from node ring", selfAddress, member.address)
|
||||
// localMembers filter { member ⇒
|
||||
// if (member.status == MemberStatus.Exiting) {
|
||||
// log.info("Cluster Node [{}] - Leader is moving node [{}] from EXITING to REMOVED - Removing node from node ring", selfAddress, member.address)
|
||||
// hasChangedState = true
|
||||
// clusterCommandConnectionFor(member.address) ! ClusterUserAction.Remove(member.address) // tell the removed node to shut himself down
|
||||
// false
|
||||
// } else true
|
||||
|
||||
localMembers map { member ⇒
|
||||
// ----------------------
|
||||
// 1. Move JOINING => UP (once all nodes have seen that this node is JOINING e.g. we have a convergence)
|
||||
// ----------------------
|
||||
if (member.status == Joining) {
|
||||
log.info("Cluster Node [{}] - Leader is moving node [{}] from JOINING to UP", selfAddress, member.address)
|
||||
hasChangedState = true
|
||||
clusterCommandConnectionFor(member.address) ! ClusterUserAction.Remove(member.address) // tell the removed node to shut himself down
|
||||
false
|
||||
} else true
|
||||
member copy (status = Up)
|
||||
} else member
|
||||
|
||||
} map { member ⇒
|
||||
// ----------------------
|
||||
// 2. Move JOINING => UP (once all nodes have seen that this node is JOINING e.g. we have a convergence)
|
||||
// 2. Move EXITING => REMOVED (once all nodes have seen that this node is EXITING e.g. we have a convergence)
|
||||
// ----------------------
|
||||
if (member.status == MemberStatus.Joining) {
|
||||
log.info("Cluster Node [{}] - Leader is moving node [{}] from JOINING to UP", selfAddress, member.address)
|
||||
if (member.status == Exiting) {
|
||||
log.info("Cluster Node [{}] - Leader is moving node [{}] from EXITING to REMOVED", selfAddress, member.address)
|
||||
hasChangedState = true
|
||||
member copy (status = MemberStatus.Up)
|
||||
member copy (status = Removed)
|
||||
} else member
|
||||
|
||||
} map { member ⇒
|
||||
// ----------------------
|
||||
// 3. Move LEAVING => EXITING (once we have a convergence on LEAVING *and* if we have a successful partition handoff)
|
||||
// ----------------------
|
||||
if (member.status == MemberStatus.Leaving && hasPartionHandoffCompletedSuccessfully(localGossip)) {
|
||||
if (member.status == Leaving && hasPartionHandoffCompletedSuccessfully(localGossip)) {
|
||||
log.info("Cluster Node [{}] - Leader is moving node [{}] from LEAVING to EXITING", selfAddress, member.address)
|
||||
hasChangedState = true
|
||||
clusterCommandConnectionFor(member.address) ! ClusterLeaderAction.Exit(member.address) // FIXME should use ? to await completion of handoff?
|
||||
member copy (status = MemberStatus.Exiting)
|
||||
|
||||
// clusterCommandConnectionFor(member.address) ! ClusterLeaderAction.Exit(member.address) // FIXME should use ? to await completion of handoff?
|
||||
member copy (status = Exiting)
|
||||
|
||||
} else member
|
||||
|
||||
}
|
||||
|
|
@ -1028,16 +1136,20 @@ class Cluster(system: ExtendedActorSystem, val failureDetector: FailureDetector)
|
|||
// 4. Move UNREACHABLE => DOWN (auto-downing by leader)
|
||||
// ----------------------
|
||||
val newUnreachableMembers =
|
||||
localUnreachableMembers
|
||||
.filter(_.status != MemberStatus.Down) // no need to DOWN members already DOWN
|
||||
.map { member ⇒
|
||||
localUnreachableMembers.map { member ⇒
|
||||
// no need to DOWN members already DOWN
|
||||
if (member.status == Down) member
|
||||
else {
|
||||
log.info("Cluster Node [{}] - Leader is marking unreachable node [{}] as DOWN", selfAddress, member.address)
|
||||
hasChangedState = true
|
||||
member copy (status = MemberStatus.Down)
|
||||
member copy (status = Down)
|
||||
}
|
||||
}
|
||||
|
||||
// removing nodes marked as DOWN from the 'seen' table
|
||||
val newSeen = localUnreachableMembers.foldLeft(localSeen)((currentSeen, member) ⇒ currentSeen - member.address)
|
||||
val newSeen = localSeen -- newUnreachableMembers.collect {
|
||||
case m if m.status == Down ⇒ m.address
|
||||
}
|
||||
|
||||
val newOverview = localOverview copy (seen = newSeen, unreachable = newUnreachableMembers) // update gossip overview
|
||||
localGossip copy (overview = newOverview) // update gossip
|
||||
|
|
@ -1049,7 +1161,7 @@ class Cluster(system: ExtendedActorSystem, val failureDetector: FailureDetector)
|
|||
// ----------------------
|
||||
// 5. Updating the vclock version for the changes
|
||||
// ----------------------
|
||||
val versionedGossip = newGossip + vclockNode
|
||||
val versionedGossip = newGossip :+ vclockNode
|
||||
|
||||
// ----------------------
|
||||
// 6. Updating the 'seen' table
|
||||
|
|
@ -1076,24 +1188,39 @@ class Cluster(system: ExtendedActorSystem, val failureDetector: FailureDetector)
|
|||
private def convergence(gossip: Gossip): Option[Gossip] = {
|
||||
val overview = gossip.overview
|
||||
val unreachable = overview.unreachable
|
||||
val seen = overview.seen
|
||||
|
||||
// First check that:
|
||||
// 1. we don't have any members that are unreachable (unreachable.isEmpty == true), or
|
||||
// 2. all unreachable members in the set have status DOWN or REMOVED
|
||||
// 1. we don't have any members that are unreachable, or
|
||||
// 2. all unreachable members in the set have status DOWN
|
||||
// Else we can't continue to check for convergence
|
||||
// When that is done we check that all the entries in the 'seen' table have the same vector clock version
|
||||
if (unreachable.isEmpty || !unreachable.exists { m ⇒
|
||||
m.status != MemberStatus.Down &&
|
||||
m.status != MemberStatus.Removed
|
||||
}) {
|
||||
val seen = gossip.overview.seen
|
||||
val views = Set.empty[VectorClock] ++ seen.values
|
||||
// and that all members exists in seen table
|
||||
val hasUnreachable = unreachable.nonEmpty && unreachable.exists { m ⇒
|
||||
m.status != Down && m.status != Removed
|
||||
}
|
||||
val allMembersInSeen = gossip.members.forall(m ⇒ seen.contains(m.address))
|
||||
|
||||
if (views.size == 1) {
|
||||
if (hasUnreachable) {
|
||||
log.debug("Cluster Node [{}] - No cluster convergence, due to unreachable nodes [{}].", selfAddress, unreachable)
|
||||
None
|
||||
} else if (!allMembersInSeen) {
|
||||
log.debug("Cluster Node [{}] - No cluster convergence, due to members not in seen table [{}].", selfAddress,
|
||||
gossip.members.map(_.address) -- seen.keySet)
|
||||
None
|
||||
} else {
|
||||
|
||||
val views = seen.values.toSet.size
|
||||
|
||||
if (views == 1) {
|
||||
log.debug("Cluster Node [{}] - Cluster convergence reached: [{}]", selfAddress, gossip.members.mkString(", "))
|
||||
Some(gossip)
|
||||
} else None
|
||||
} else None
|
||||
} else {
|
||||
log.debug("Cluster Node [{}] - No cluster convergence, since not all nodes have seen the same state yet. [{} of {}]",
|
||||
selfAddress, views, seen.values.size)
|
||||
None
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
private def isAvailable(state: State): Boolean = !isUnavailable(state)
|
||||
|
|
@ -1104,7 +1231,7 @@ class Cluster(system: ExtendedActorSystem, val failureDetector: FailureDetector)
|
|||
val localMembers = localGossip.members
|
||||
val localUnreachableMembers = localOverview.unreachable
|
||||
val isUnreachable = localUnreachableMembers exists { _.address == selfAddress }
|
||||
val hasUnavailableMemberStatus = localMembers exists { m ⇒ (m == self) && MemberStatus.isUnavailable(m.status) }
|
||||
val hasUnavailableMemberStatus = localMembers exists { m ⇒ (m == self) && m.status.isUnavailable }
|
||||
isUnreachable || hasUnavailableMemberStatus
|
||||
}
|
||||
|
||||
|
|
|
|||
|
|
@ -13,21 +13,24 @@ import akka.actor.AddressFromURIString
|
|||
|
||||
class ClusterSettings(val config: Config, val systemName: String) {
|
||||
import config._
|
||||
val FailureDetectorThreshold = getInt("akka.cluster.failure-detector.threshold")
|
||||
val FailureDetectorMaxSampleSize = getInt("akka.cluster.failure-detector.max-sample-size")
|
||||
val FailureDetectorImplementationClass: Option[String] = getString("akka.cluster.failure-detector.implementation-class") match {
|
||||
final val FailureDetectorThreshold = getInt("akka.cluster.failure-detector.threshold")
|
||||
final val FailureDetectorMaxSampleSize = getInt("akka.cluster.failure-detector.max-sample-size")
|
||||
final val FailureDetectorImplementationClass: Option[String] = getString("akka.cluster.failure-detector.implementation-class") match {
|
||||
case "" ⇒ None
|
||||
case fqcn ⇒ Some(fqcn)
|
||||
}
|
||||
val NodeToJoin: Option[Address] = getString("akka.cluster.node-to-join") match {
|
||||
final val NodeToJoin: Option[Address] = getString("akka.cluster.node-to-join") match {
|
||||
case "" ⇒ None
|
||||
case AddressFromURIString(addr) ⇒ Some(addr)
|
||||
}
|
||||
val PeriodicTasksInitialDelay = Duration(getMilliseconds("akka.cluster.periodic-tasks-initial-delay"), MILLISECONDS)
|
||||
val GossipInterval = Duration(getMilliseconds("akka.cluster.gossip-interval"), MILLISECONDS)
|
||||
val LeaderActionsInterval = Duration(getMilliseconds("akka.cluster.leader-actions-interval"), MILLISECONDS)
|
||||
val UnreachableNodesReaperInterval = Duration(getMilliseconds("akka.cluster.unreachable-nodes-reaper-interval"), MILLISECONDS)
|
||||
val NrOfGossipDaemons = getInt("akka.cluster.nr-of-gossip-daemons")
|
||||
val NrOfDeputyNodes = getInt("akka.cluster.nr-of-deputy-nodes")
|
||||
val AutoDown = getBoolean("akka.cluster.auto-down")
|
||||
final val PeriodicTasksInitialDelay = Duration(getMilliseconds("akka.cluster.periodic-tasks-initial-delay"), MILLISECONDS)
|
||||
final val GossipInterval = Duration(getMilliseconds("akka.cluster.gossip-interval"), MILLISECONDS)
|
||||
final val HeartbeatInterval = Duration(getMilliseconds("akka.cluster.heartbeat-interval"), MILLISECONDS)
|
||||
final val LeaderActionsInterval = Duration(getMilliseconds("akka.cluster.leader-actions-interval"), MILLISECONDS)
|
||||
final val UnreachableNodesReaperInterval = Duration(getMilliseconds("akka.cluster.unreachable-nodes-reaper-interval"), MILLISECONDS)
|
||||
final val NrOfGossipDaemons = getInt("akka.cluster.nr-of-gossip-daemons")
|
||||
final val NrOfDeputyNodes = getInt("akka.cluster.nr-of-deputy-nodes")
|
||||
final val AutoDown = getBoolean("akka.cluster.auto-down")
|
||||
final val SchedulerTickDuration = Duration(getMilliseconds("akka.cluster.scheduler.tick-duration"), MILLISECONDS)
|
||||
final val SchedulerTicksPerWheel = getInt("akka.cluster.scheduler.ticks-per-wheel")
|
||||
}
|
||||
|
|
|
|||
51
akka-cluster/src/main/scala/akka/cluster/FixedRateTask.scala
Normal file
51
akka-cluster/src/main/scala/akka/cluster/FixedRateTask.scala
Normal file
|
|
@ -0,0 +1,51 @@
|
|||
/**
|
||||
* Copyright (C) 2009-2012 Typesafe Inc. <http://www.typesafe.com>
|
||||
*/
|
||||
|
||||
package akka.cluster
|
||||
|
||||
import java.util.concurrent.TimeUnit
|
||||
import java.util.concurrent.atomic.AtomicBoolean
|
||||
import java.util.concurrent.atomic.AtomicLong
|
||||
|
||||
import akka.actor.Scheduler
|
||||
import akka.util.Duration
|
||||
|
||||
/**
|
||||
* INTERNAL API
|
||||
*/
|
||||
private[akka] object FixedRateTask {
|
||||
def apply(scheduler: Scheduler, initalDelay: Duration, delay: Duration)(f: ⇒ Unit): FixedRateTask = {
|
||||
new FixedRateTask(scheduler, initalDelay, delay, new Runnable { def run(): Unit = f })
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* INTERNAL API
|
||||
*
|
||||
* Task to be scheduled periodically at a fixed rate, compensating, on average,
|
||||
* for inaccuracy in scheduler. It will start when constructed, using the
|
||||
* initialDelay.
|
||||
*/
|
||||
private[akka] class FixedRateTask(scheduler: Scheduler, initalDelay: Duration, delay: Duration, task: Runnable) extends Runnable {
|
||||
|
||||
private val delayNanos = delay.toNanos
|
||||
private val cancelled = new AtomicBoolean(false)
|
||||
private val counter = new AtomicLong(0L)
|
||||
private val startTime = System.nanoTime + initalDelay.toNanos
|
||||
scheduler.scheduleOnce(initalDelay, this)
|
||||
|
||||
def cancel(): Unit = cancelled.set(true)
|
||||
|
||||
override final def run(): Unit = if (!cancelled.get) try {
|
||||
task.run()
|
||||
} finally if (!cancelled.get) {
|
||||
val nextTime = startTime + delayNanos * counter.incrementAndGet
|
||||
// it's ok to schedule with negative duration, will run asap
|
||||
val nextDelay = Duration(nextTime - System.nanoTime, TimeUnit.NANOSECONDS)
|
||||
try {
|
||||
scheduler.scheduleOnce(nextDelay, this)
|
||||
} catch { case e: IllegalStateException ⇒ /* will happen when scheduler is closed, nothing wrong */ }
|
||||
}
|
||||
|
||||
}
|
||||
|
|
@ -19,7 +19,7 @@ class VectorClockException(message: String) extends AkkaException(message)
|
|||
*/
|
||||
trait Versioned[T] {
|
||||
def version: VectorClock
|
||||
def +(node: VectorClock.Node): T
|
||||
def :+(node: VectorClock.Node): T
|
||||
}
|
||||
|
||||
/**
|
||||
|
|
@ -142,7 +142,7 @@ case class VectorClock(
|
|||
/**
|
||||
* Increment the version for the node passed as argument. Returns a new VectorClock.
|
||||
*/
|
||||
def +(node: Node): VectorClock = copy(versions = versions + (node -> Timestamp()))
|
||||
def :+(node: Node): VectorClock = copy(versions = versions + (node -> Timestamp()))
|
||||
|
||||
/**
|
||||
* Returns true if <code>this</code> and <code>that</code> are concurrent else false.
|
||||
|
|
|
|||
|
|
@ -38,16 +38,16 @@ abstract class MembershipChangeListenerJoinSpec
|
|||
|
||||
runOn(first) {
|
||||
val joinLatch = TestLatch()
|
||||
val expectedAddresses = Set(firstAddress, secondAddress)
|
||||
cluster.registerListener(new MembershipChangeListener {
|
||||
def notify(members: SortedSet[Member]) {
|
||||
if (members.size == 2 && members.exists(_.status == MemberStatus.Joining)) // second node is not part of node ring anymore
|
||||
if (members.map(_.address) == expectedAddresses && members.exists(_.status == MemberStatus.Joining))
|
||||
joinLatch.countDown()
|
||||
}
|
||||
})
|
||||
testConductor.enter("registered-listener")
|
||||
|
||||
joinLatch.await
|
||||
cluster.convergence.isDefined must be(true)
|
||||
}
|
||||
|
||||
runOn(second) {
|
||||
|
|
@ -55,6 +55,8 @@ abstract class MembershipChangeListenerJoinSpec
|
|||
cluster.join(firstAddress)
|
||||
}
|
||||
|
||||
awaitUpConvergence(2)
|
||||
|
||||
testConductor.enter("after")
|
||||
}
|
||||
}
|
||||
|
|
|
|||
|
|
@ -54,9 +54,11 @@ abstract class MembershipChangeListenerLeavingSpec
|
|||
|
||||
runOn(third) {
|
||||
val latch = TestLatch()
|
||||
val expectedAddresses = Set(firstAddress, secondAddress, thirdAddress)
|
||||
cluster.registerListener(new MembershipChangeListener {
|
||||
def notify(members: SortedSet[Member]) {
|
||||
if (members.size == 3 && members.exists(m ⇒ m.address == secondAddress && m.status == MemberStatus.Leaving))
|
||||
if (members.map(_.address) == expectedAddresses &&
|
||||
members.exists(m ⇒ m.address == secondAddress && m.status == MemberStatus.Leaving))
|
||||
latch.countDown()
|
||||
}
|
||||
})
|
||||
|
|
|
|||
|
|
@ -29,6 +29,7 @@ abstract class MembershipChangeListenerUpSpec
|
|||
|
||||
lazy val firstAddress = node(first).address
|
||||
lazy val secondAddress = node(second).address
|
||||
lazy val thirdAddress = node(third).address
|
||||
|
||||
"A set of connected cluster systems" must {
|
||||
|
||||
|
|
@ -38,9 +39,10 @@ abstract class MembershipChangeListenerUpSpec
|
|||
|
||||
runOn(first, second) {
|
||||
val latch = TestLatch()
|
||||
val expectedAddresses = Set(firstAddress, secondAddress)
|
||||
cluster.registerListener(new MembershipChangeListener {
|
||||
def notify(members: SortedSet[Member]) {
|
||||
if (members.size == 2 && members.forall(_.status == MemberStatus.Up))
|
||||
if (members.map(_.address) == expectedAddresses && members.forall(_.status == MemberStatus.Up))
|
||||
latch.countDown()
|
||||
}
|
||||
})
|
||||
|
|
@ -59,9 +61,10 @@ abstract class MembershipChangeListenerUpSpec
|
|||
"(when three nodes) after cluster convergence updates the membership table then all MembershipChangeListeners should be triggered" taggedAs LongRunningTest in {
|
||||
|
||||
val latch = TestLatch()
|
||||
val expectedAddresses = Set(firstAddress, secondAddress, thirdAddress)
|
||||
cluster.registerListener(new MembershipChangeListener {
|
||||
def notify(members: SortedSet[Member]) {
|
||||
if (members.size == 3 && members.forall(_.status == MemberStatus.Up))
|
||||
if (members.map(_.address) == expectedAddresses && members.forall(_.status == MemberStatus.Up))
|
||||
latch.countDown()
|
||||
}
|
||||
})
|
||||
|
|
|
|||
|
|
@ -17,6 +17,7 @@ object MultiNodeClusterSpec {
|
|||
akka.cluster {
|
||||
auto-down = off
|
||||
gossip-interval = 200 ms
|
||||
heartbeat-interval = 400 ms
|
||||
leader-actions-interval = 200 ms
|
||||
unreachable-nodes-reaper-interval = 200 ms
|
||||
periodic-tasks-initial-delay = 300 ms
|
||||
|
|
|
|||
|
|
@ -24,7 +24,7 @@ object SunnyWeatherMultiJvmSpec extends MultiNodeConfig {
|
|||
gossip-interval = 400 ms
|
||||
nr-of-deputy-nodes = 0
|
||||
}
|
||||
akka.loglevel = DEBUG
|
||||
akka.loglevel = INFO
|
||||
"""))
|
||||
}
|
||||
|
||||
|
|
|
|||
|
|
@ -22,11 +22,14 @@ class ClusterConfigSpec extends AkkaSpec {
|
|||
NodeToJoin must be(None)
|
||||
PeriodicTasksInitialDelay must be(1 seconds)
|
||||
GossipInterval must be(1 second)
|
||||
HeartbeatInterval must be(1 second)
|
||||
LeaderActionsInterval must be(1 second)
|
||||
UnreachableNodesReaperInterval must be(1 second)
|
||||
NrOfGossipDaemons must be(4)
|
||||
NrOfDeputyNodes must be(3)
|
||||
AutoDown must be(true)
|
||||
SchedulerTickDuration must be(33 millis)
|
||||
SchedulerTicksPerWheel must be(512)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
|||
|
|
@ -97,15 +97,11 @@ class ClusterSpec extends AkkaSpec(ClusterSpec.config) with BeforeAndAfter {
|
|||
|
||||
"A Cluster" must {
|
||||
|
||||
"initially be singleton cluster and reach convergence after first gossip" in {
|
||||
"initially be singleton cluster and reach convergence immediately" in {
|
||||
cluster.isSingletonCluster must be(true)
|
||||
cluster.latestGossip.members.map(_.address) must be(Set(selfAddress))
|
||||
memberStatus(selfAddress) must be(Some(MemberStatus.Joining))
|
||||
cluster.convergence.isDefined must be(false)
|
||||
cluster.gossip()
|
||||
expectMsg(GossipTo(selfAddress))
|
||||
awaitCond(cluster.convergence.isDefined)
|
||||
memberStatus(selfAddress) must be(Some(MemberStatus.Joining))
|
||||
cluster.convergence.isDefined must be(true)
|
||||
cluster.leaderActions()
|
||||
memberStatus(selfAddress) must be(Some(MemberStatus.Up))
|
||||
}
|
||||
|
|
@ -114,8 +110,7 @@ class ClusterSpec extends AkkaSpec(ClusterSpec.config) with BeforeAndAfter {
|
|||
cluster.joining(addresses(1))
|
||||
cluster.latestGossip.members.map(_.address) must be(Set(selfAddress, addresses(1)))
|
||||
memberStatus(addresses(1)) must be(Some(MemberStatus.Joining))
|
||||
// FIXME why is it still convergence immediately after joining?
|
||||
//cluster.convergence.isDefined must be(false)
|
||||
cluster.convergence.isDefined must be(false)
|
||||
}
|
||||
|
||||
"accept a few more joining nodes" in {
|
||||
|
|
|
|||
|
|
@ -0,0 +1,43 @@
|
|||
/**
|
||||
* Copyright (C) 2009-2012 Typesafe Inc. <http://www.typesafe.com>
|
||||
*/
|
||||
|
||||
package akka.cluster
|
||||
|
||||
import akka.testkit.AkkaSpec
|
||||
import akka.util.duration._
|
||||
import akka.testkit.TimingTest
|
||||
import akka.testkit.TestLatch
|
||||
import akka.dispatch.Await
|
||||
|
||||
@org.junit.runner.RunWith(classOf[org.scalatest.junit.JUnitRunner])
|
||||
class FixedRateTaskSpec extends AkkaSpec {
|
||||
|
||||
"Task scheduled at fixed rate" must {
|
||||
"adjust for scheduler inaccuracy" taggedAs TimingTest in {
|
||||
val startTime = System.nanoTime
|
||||
val n = 33
|
||||
val latch = new TestLatch(n)
|
||||
FixedRateTask(system.scheduler, 150.millis, 150.millis) {
|
||||
latch.countDown()
|
||||
}
|
||||
Await.ready(latch, 6.seconds)
|
||||
val rate = n * 1000.0 / (System.nanoTime - startTime).nanos.toMillis
|
||||
rate must be(6.66 plusOrMinus (0.4))
|
||||
}
|
||||
|
||||
"compensate for long running task" taggedAs TimingTest in {
|
||||
val startTime = System.nanoTime
|
||||
val n = 22
|
||||
val latch = new TestLatch(n)
|
||||
FixedRateTask(system.scheduler, 225.millis, 225.millis) {
|
||||
80.millis.sleep()
|
||||
latch.countDown()
|
||||
}
|
||||
Await.ready(latch, 6.seconds)
|
||||
val rate = n * 1000.0 / (System.nanoTime - startTime).nanos.toMillis
|
||||
rate must be(4.4 plusOrMinus (0.3))
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
|
@ -12,10 +12,8 @@ import scala.collection.immutable.SortedSet
|
|||
@org.junit.runner.RunWith(classOf[org.scalatest.junit.JUnitRunner])
|
||||
class GossipSpec extends WordSpec with MustMatchers {
|
||||
|
||||
"A Gossip" must {
|
||||
|
||||
"merge members by status priority" in {
|
||||
import MemberStatus._
|
||||
|
||||
val a1 = Member(Address("akka", "sys", "a", 2552), Up)
|
||||
val a2 = Member(Address("akka", "sys", "a", 2552), Joining)
|
||||
val b1 = Member(Address("akka", "sys", "b", 2552), Up)
|
||||
|
|
@ -24,19 +22,83 @@ class GossipSpec extends WordSpec with MustMatchers {
|
|||
val c2 = Member(Address("akka", "sys", "c", 2552), Up)
|
||||
val d1 = Member(Address("akka", "sys", "d", 2552), Leaving)
|
||||
val d2 = Member(Address("akka", "sys", "d", 2552), Removed)
|
||||
val e1 = Member(Address("akka", "sys", "e", 2552), Joining)
|
||||
val e2 = Member(Address("akka", "sys", "e", 2552), Up)
|
||||
|
||||
val g1 = Gossip(members = SortedSet(a1, b1, c1, d1))
|
||||
val g2 = Gossip(members = SortedSet(a2, b2, c2, d2))
|
||||
"A Gossip" must {
|
||||
|
||||
"merge members by status priority" in {
|
||||
|
||||
val g1 = Gossip(members = SortedSet(a1, c1, e1))
|
||||
val g2 = Gossip(members = SortedSet(a2, c2, e2))
|
||||
|
||||
val merged1 = g1 merge g2
|
||||
merged1.members must be(SortedSet(a1, b2, c1, d2))
|
||||
merged1.members.toSeq.map(_.status) must be(Seq(Up, Removed, Leaving, Removed))
|
||||
merged1.members must be(SortedSet(a1, c1, e2))
|
||||
merged1.members.toSeq.map(_.status) must be(Seq(Up, Leaving, Up))
|
||||
|
||||
val merged2 = g2 merge g1
|
||||
merged2.members must be(SortedSet(a1, b2, c1, d2))
|
||||
merged2.members.toSeq.map(_.status) must be(Seq(Up, Removed, Leaving, Removed))
|
||||
merged2.members must be(SortedSet(a1, c1, e2))
|
||||
merged2.members.toSeq.map(_.status) must be(Seq(Up, Leaving, Up))
|
||||
|
||||
}
|
||||
|
||||
"merge unreachable by status priority" in {
|
||||
|
||||
val g1 = Gossip(members = Gossip.emptyMembers, overview = GossipOverview(unreachable = Set(a1, b1, c1, d1)))
|
||||
val g2 = Gossip(members = Gossip.emptyMembers, overview = GossipOverview(unreachable = Set(a2, b2, c2, d2)))
|
||||
|
||||
val merged1 = g1 merge g2
|
||||
merged1.overview.unreachable must be(Set(a1, b2, c1, d2))
|
||||
merged1.overview.unreachable.toSeq.sorted.map(_.status) must be(Seq(Up, Removed, Leaving, Removed))
|
||||
|
||||
val merged2 = g2 merge g1
|
||||
merged2.overview.unreachable must be(Set(a1, b2, c1, d2))
|
||||
merged2.overview.unreachable.toSeq.sorted.map(_.status) must be(Seq(Up, Removed, Leaving, Removed))
|
||||
|
||||
}
|
||||
|
||||
"merge by excluding unreachable from members" in {
|
||||
val g1 = Gossip(members = SortedSet(a1, b1), overview = GossipOverview(unreachable = Set(c1, d1)))
|
||||
val g2 = Gossip(members = SortedSet(a2, c2), overview = GossipOverview(unreachable = Set(b2, d2)))
|
||||
|
||||
val merged1 = g1 merge g2
|
||||
merged1.members must be(SortedSet(a1))
|
||||
merged1.members.toSeq.map(_.status) must be(Seq(Up))
|
||||
merged1.overview.unreachable must be(Set(b2, c1, d2))
|
||||
merged1.overview.unreachable.toSeq.sorted.map(_.status) must be(Seq(Removed, Leaving, Removed))
|
||||
|
||||
val merged2 = g2 merge g1
|
||||
merged2.members must be(SortedSet(a1))
|
||||
merged2.members.toSeq.map(_.status) must be(Seq(Up))
|
||||
merged2.overview.unreachable must be(Set(b2, c1, d2))
|
||||
merged2.overview.unreachable.toSeq.sorted.map(_.status) must be(Seq(Removed, Leaving, Removed))
|
||||
|
||||
}
|
||||
|
||||
"start with fresh seen table after merge" in {
|
||||
val g1 = Gossip(members = SortedSet(a1, e1)).seen(a1.address).seen(a1.address)
|
||||
val g2 = Gossip(members = SortedSet(a2, e2)).seen(e2.address).seen(e2.address)
|
||||
|
||||
val merged1 = g1 merge g2
|
||||
merged1.overview.seen.isEmpty must be(true)
|
||||
|
||||
val merged2 = g2 merge g1
|
||||
merged2.overview.seen.isEmpty must be(true)
|
||||
|
||||
}
|
||||
|
||||
"not have node in both members and unreachable" in intercept[IllegalArgumentException] {
|
||||
Gossip(members = SortedSet(a1, b1), overview = GossipOverview(unreachable = Set(b2)))
|
||||
}
|
||||
|
||||
"not have live members with wrong status" in intercept[IllegalArgumentException] {
|
||||
// b2 is Removed
|
||||
Gossip(members = SortedSet(a2, b2))
|
||||
}
|
||||
|
||||
"not have non cluster members in seen table" in intercept[IllegalArgumentException] {
|
||||
Gossip(members = SortedSet(a1, e1)).seen(a1.address).seen(e1.address).seen(b1.address)
|
||||
}
|
||||
|
||||
}
|
||||
}
|
||||
|
|
|
|||
|
|
@ -27,67 +27,67 @@ class VectorClockSpec extends AkkaSpec {
|
|||
|
||||
"pass misc comparison test 1" in {
|
||||
val clock1_1 = VectorClock()
|
||||
val clock2_1 = clock1_1 + Node("1")
|
||||
val clock3_1 = clock2_1 + Node("2")
|
||||
val clock4_1 = clock3_1 + Node("1")
|
||||
val clock2_1 = clock1_1 :+ Node("1")
|
||||
val clock3_1 = clock2_1 :+ Node("2")
|
||||
val clock4_1 = clock3_1 :+ Node("1")
|
||||
|
||||
val clock1_2 = VectorClock()
|
||||
val clock2_2 = clock1_2 + Node("1")
|
||||
val clock3_2 = clock2_2 + Node("2")
|
||||
val clock4_2 = clock3_2 + Node("1")
|
||||
val clock2_2 = clock1_2 :+ Node("1")
|
||||
val clock3_2 = clock2_2 :+ Node("2")
|
||||
val clock4_2 = clock3_2 :+ Node("1")
|
||||
|
||||
clock4_1 <> clock4_2 must be(false)
|
||||
}
|
||||
|
||||
"pass misc comparison test 2" in {
|
||||
val clock1_1 = VectorClock()
|
||||
val clock2_1 = clock1_1 + Node("1")
|
||||
val clock3_1 = clock2_1 + Node("2")
|
||||
val clock4_1 = clock3_1 + Node("1")
|
||||
val clock2_1 = clock1_1 :+ Node("1")
|
||||
val clock3_1 = clock2_1 :+ Node("2")
|
||||
val clock4_1 = clock3_1 :+ Node("1")
|
||||
|
||||
val clock1_2 = VectorClock()
|
||||
val clock2_2 = clock1_2 + Node("1")
|
||||
val clock3_2 = clock2_2 + Node("2")
|
||||
val clock4_2 = clock3_2 + Node("1")
|
||||
val clock5_2 = clock4_2 + Node("3")
|
||||
val clock2_2 = clock1_2 :+ Node("1")
|
||||
val clock3_2 = clock2_2 :+ Node("2")
|
||||
val clock4_2 = clock3_2 :+ Node("1")
|
||||
val clock5_2 = clock4_2 :+ Node("3")
|
||||
|
||||
clock4_1 < clock5_2 must be(true)
|
||||
}
|
||||
|
||||
"pass misc comparison test 3" in {
|
||||
var clock1_1 = VectorClock()
|
||||
val clock2_1 = clock1_1 + Node("1")
|
||||
val clock2_1 = clock1_1 :+ Node("1")
|
||||
|
||||
val clock1_2 = VectorClock()
|
||||
val clock2_2 = clock1_2 + Node("2")
|
||||
val clock2_2 = clock1_2 :+ Node("2")
|
||||
|
||||
clock2_1 <> clock2_2 must be(true)
|
||||
}
|
||||
|
||||
"pass misc comparison test 4" in {
|
||||
val clock1_3 = VectorClock()
|
||||
val clock2_3 = clock1_3 + Node("1")
|
||||
val clock3_3 = clock2_3 + Node("2")
|
||||
val clock4_3 = clock3_3 + Node("1")
|
||||
val clock2_3 = clock1_3 :+ Node("1")
|
||||
val clock3_3 = clock2_3 :+ Node("2")
|
||||
val clock4_3 = clock3_3 :+ Node("1")
|
||||
|
||||
val clock1_4 = VectorClock()
|
||||
val clock2_4 = clock1_4 + Node("1")
|
||||
val clock3_4 = clock2_4 + Node("1")
|
||||
val clock4_4 = clock3_4 + Node("3")
|
||||
val clock2_4 = clock1_4 :+ Node("1")
|
||||
val clock3_4 = clock2_4 :+ Node("1")
|
||||
val clock4_4 = clock3_4 :+ Node("3")
|
||||
|
||||
clock4_3 <> clock4_4 must be(true)
|
||||
}
|
||||
|
||||
"pass misc comparison test 5" in {
|
||||
val clock1_1 = VectorClock()
|
||||
val clock2_1 = clock1_1 + Node("2")
|
||||
val clock3_1 = clock2_1 + Node("2")
|
||||
val clock2_1 = clock1_1 :+ Node("2")
|
||||
val clock3_1 = clock2_1 :+ Node("2")
|
||||
|
||||
val clock1_2 = VectorClock()
|
||||
val clock2_2 = clock1_2 + Node("1")
|
||||
val clock3_2 = clock2_2 + Node("2")
|
||||
val clock4_2 = clock3_2 + Node("2")
|
||||
val clock5_2 = clock4_2 + Node("3")
|
||||
val clock2_2 = clock1_2 :+ Node("1")
|
||||
val clock3_2 = clock2_2 :+ Node("2")
|
||||
val clock4_2 = clock3_2 :+ Node("2")
|
||||
val clock5_2 = clock4_2 :+ Node("3")
|
||||
|
||||
clock3_1 < clock5_2 must be(true)
|
||||
clock5_2 > clock3_1 must be(true)
|
||||
|
|
@ -95,12 +95,12 @@ class VectorClockSpec extends AkkaSpec {
|
|||
|
||||
"pass misc comparison test 6" in {
|
||||
val clock1_1 = VectorClock()
|
||||
val clock2_1 = clock1_1 + Node("1")
|
||||
val clock3_1 = clock2_1 + Node("2")
|
||||
val clock2_1 = clock1_1 :+ Node("1")
|
||||
val clock3_1 = clock2_1 :+ Node("2")
|
||||
|
||||
val clock1_2 = VectorClock()
|
||||
val clock2_2 = clock1_2 + Node("1")
|
||||
val clock3_2 = clock2_2 + Node("1")
|
||||
val clock2_2 = clock1_2 :+ Node("1")
|
||||
val clock3_2 = clock2_2 :+ Node("1")
|
||||
|
||||
clock3_1 <> clock3_2 must be(true)
|
||||
clock3_2 <> clock3_1 must be(true)
|
||||
|
|
@ -108,14 +108,14 @@ class VectorClockSpec extends AkkaSpec {
|
|||
|
||||
"pass misc comparison test 7" in {
|
||||
val clock1_1 = VectorClock()
|
||||
val clock2_1 = clock1_1 + Node("1")
|
||||
val clock3_1 = clock2_1 + Node("2")
|
||||
val clock4_1 = clock3_1 + Node("2")
|
||||
val clock5_1 = clock4_1 + Node("3")
|
||||
val clock2_1 = clock1_1 :+ Node("1")
|
||||
val clock3_1 = clock2_1 :+ Node("2")
|
||||
val clock4_1 = clock3_1 :+ Node("2")
|
||||
val clock5_1 = clock4_1 :+ Node("3")
|
||||
|
||||
val clock1_2 = VectorClock()
|
||||
val clock2_2 = clock1_2 + Node("2")
|
||||
val clock3_2 = clock2_2 + Node("2")
|
||||
val clock2_2 = clock1_2 :+ Node("2")
|
||||
val clock3_2 = clock2_2 :+ Node("2")
|
||||
|
||||
clock5_1 <> clock3_2 must be(true)
|
||||
clock3_2 <> clock5_1 must be(true)
|
||||
|
|
@ -127,14 +127,14 @@ class VectorClockSpec extends AkkaSpec {
|
|||
val node3 = Node("3")
|
||||
|
||||
val clock1_1 = VectorClock()
|
||||
val clock2_1 = clock1_1 + node1
|
||||
val clock3_1 = clock2_1 + node2
|
||||
val clock4_1 = clock3_1 + node2
|
||||
val clock5_1 = clock4_1 + node3
|
||||
val clock2_1 = clock1_1 :+ node1
|
||||
val clock3_1 = clock2_1 :+ node2
|
||||
val clock4_1 = clock3_1 :+ node2
|
||||
val clock5_1 = clock4_1 :+ node3
|
||||
|
||||
val clock1_2 = VectorClock()
|
||||
val clock2_2 = clock1_2 + node2
|
||||
val clock3_2 = clock2_2 + node2
|
||||
val clock2_2 = clock1_2 :+ node2
|
||||
val clock3_2 = clock2_2 :+ node2
|
||||
|
||||
val merged1 = clock3_2 merge clock5_1
|
||||
merged1.versions.size must be(3)
|
||||
|
|
@ -164,14 +164,14 @@ class VectorClockSpec extends AkkaSpec {
|
|||
val node4 = Node("4")
|
||||
|
||||
val clock1_1 = VectorClock()
|
||||
val clock2_1 = clock1_1 + node1
|
||||
val clock3_1 = clock2_1 + node2
|
||||
val clock4_1 = clock3_1 + node2
|
||||
val clock5_1 = clock4_1 + node3
|
||||
val clock2_1 = clock1_1 :+ node1
|
||||
val clock3_1 = clock2_1 :+ node2
|
||||
val clock4_1 = clock3_1 :+ node2
|
||||
val clock5_1 = clock4_1 :+ node3
|
||||
|
||||
val clock1_2 = VectorClock()
|
||||
val clock2_2 = clock1_2 + node4
|
||||
val clock3_2 = clock2_2 + node4
|
||||
val clock2_2 = clock1_2 :+ node4
|
||||
val clock3_2 = clock2_2 :+ node4
|
||||
|
||||
val merged1 = clock3_2 merge clock5_1
|
||||
merged1.versions.size must be(4)
|
||||
|
|
@ -204,8 +204,8 @@ class VectorClockSpec extends AkkaSpec {
|
|||
val v1 = VectorClock()
|
||||
val v2 = VectorClock()
|
||||
|
||||
val vv1 = v1 + node1
|
||||
val vv2 = v2 + node2
|
||||
val vv1 = v1 :+ node1
|
||||
val vv2 = v2 :+ node2
|
||||
|
||||
(vv1 > v1) must equal(true)
|
||||
(vv2 > v2) must equal(true)
|
||||
|
|
@ -225,12 +225,12 @@ class VectorClockSpec extends AkkaSpec {
|
|||
val a = VectorClock()
|
||||
val b = VectorClock()
|
||||
|
||||
val a1 = a + node1
|
||||
val b1 = b + node2
|
||||
val a1 = a :+ node1
|
||||
val b1 = b :+ node2
|
||||
|
||||
var a2 = a1 + node1
|
||||
var a2 = a1 :+ node1
|
||||
var c = a2.merge(b1)
|
||||
var c1 = c + node3
|
||||
var c1 = c :+ node3
|
||||
|
||||
(c1 > a2) must equal(true)
|
||||
(c1 > b1) must equal(true)
|
||||
|
|
@ -239,7 +239,7 @@ class VectorClockSpec extends AkkaSpec {
|
|||
|
||||
"An instance of Versioned" must {
|
||||
class TestVersioned(val version: VectorClock = VectorClock()) extends Versioned[TestVersioned] {
|
||||
def +(node: Node): TestVersioned = new TestVersioned(version + node)
|
||||
def :+(node: Node): TestVersioned = new TestVersioned(version :+ node)
|
||||
}
|
||||
|
||||
import Versioned.latestVersionOf
|
||||
|
|
@ -251,67 +251,67 @@ class VectorClockSpec extends AkkaSpec {
|
|||
|
||||
"happen before an identical versioned with a single additional event" in {
|
||||
val versioned1_1 = new TestVersioned()
|
||||
val versioned2_1 = versioned1_1 + Node("1")
|
||||
val versioned3_1 = versioned2_1 + Node("2")
|
||||
val versioned4_1 = versioned3_1 + Node("1")
|
||||
val versioned2_1 = versioned1_1 :+ Node("1")
|
||||
val versioned3_1 = versioned2_1 :+ Node("2")
|
||||
val versioned4_1 = versioned3_1 :+ Node("1")
|
||||
|
||||
val versioned1_2 = new TestVersioned()
|
||||
val versioned2_2 = versioned1_2 + Node("1")
|
||||
val versioned3_2 = versioned2_2 + Node("2")
|
||||
val versioned4_2 = versioned3_2 + Node("1")
|
||||
val versioned5_2 = versioned4_2 + Node("3")
|
||||
val versioned2_2 = versioned1_2 :+ Node("1")
|
||||
val versioned3_2 = versioned2_2 :+ Node("2")
|
||||
val versioned4_2 = versioned3_2 :+ Node("1")
|
||||
val versioned5_2 = versioned4_2 :+ Node("3")
|
||||
|
||||
latestVersionOf[TestVersioned](versioned4_1, versioned5_2) must be(versioned5_2)
|
||||
}
|
||||
|
||||
"pass misc comparison test 1" in {
|
||||
var versioned1_1 = new TestVersioned()
|
||||
val versioned2_1 = versioned1_1 + Node("1")
|
||||
val versioned2_1 = versioned1_1 :+ Node("1")
|
||||
|
||||
val versioned1_2 = new TestVersioned()
|
||||
val versioned2_2 = versioned1_2 + Node("2")
|
||||
val versioned2_2 = versioned1_2 :+ Node("2")
|
||||
|
||||
latestVersionOf[TestVersioned](versioned2_1, versioned2_2) must be(versioned2_2)
|
||||
}
|
||||
|
||||
"pass misc comparison test 2" in {
|
||||
val versioned1_3 = new TestVersioned()
|
||||
val versioned2_3 = versioned1_3 + Node("1")
|
||||
val versioned3_3 = versioned2_3 + Node("2")
|
||||
val versioned4_3 = versioned3_3 + Node("1")
|
||||
val versioned2_3 = versioned1_3 :+ Node("1")
|
||||
val versioned3_3 = versioned2_3 :+ Node("2")
|
||||
val versioned4_3 = versioned3_3 :+ Node("1")
|
||||
|
||||
val versioned1_4 = new TestVersioned()
|
||||
val versioned2_4 = versioned1_4 + Node("1")
|
||||
val versioned3_4 = versioned2_4 + Node("1")
|
||||
val versioned4_4 = versioned3_4 + Node("3")
|
||||
val versioned2_4 = versioned1_4 :+ Node("1")
|
||||
val versioned3_4 = versioned2_4 :+ Node("1")
|
||||
val versioned4_4 = versioned3_4 :+ Node("3")
|
||||
|
||||
latestVersionOf[TestVersioned](versioned4_3, versioned4_4) must be(versioned4_4)
|
||||
}
|
||||
|
||||
"pass misc comparison test 3" in {
|
||||
val versioned1_1 = new TestVersioned()
|
||||
val versioned2_1 = versioned1_1 + Node("2")
|
||||
val versioned3_1 = versioned2_1 + Node("2")
|
||||
val versioned2_1 = versioned1_1 :+ Node("2")
|
||||
val versioned3_1 = versioned2_1 :+ Node("2")
|
||||
|
||||
val versioned1_2 = new TestVersioned()
|
||||
val versioned2_2 = versioned1_2 + Node("1")
|
||||
val versioned3_2 = versioned2_2 + Node("2")
|
||||
val versioned4_2 = versioned3_2 + Node("2")
|
||||
val versioned5_2 = versioned4_2 + Node("3")
|
||||
val versioned2_2 = versioned1_2 :+ Node("1")
|
||||
val versioned3_2 = versioned2_2 :+ Node("2")
|
||||
val versioned4_2 = versioned3_2 :+ Node("2")
|
||||
val versioned5_2 = versioned4_2 :+ Node("3")
|
||||
|
||||
latestVersionOf[TestVersioned](versioned3_1, versioned5_2) must be(versioned5_2)
|
||||
}
|
||||
|
||||
"pass misc comparison test 4" in {
|
||||
val versioned1_1 = new TestVersioned()
|
||||
val versioned2_1 = versioned1_1 + Node("1")
|
||||
val versioned3_1 = versioned2_1 + Node("2")
|
||||
val versioned4_1 = versioned3_1 + Node("2")
|
||||
val versioned5_1 = versioned4_1 + Node("3")
|
||||
val versioned2_1 = versioned1_1 :+ Node("1")
|
||||
val versioned3_1 = versioned2_1 :+ Node("2")
|
||||
val versioned4_1 = versioned3_1 :+ Node("2")
|
||||
val versioned5_1 = versioned4_1 :+ Node("3")
|
||||
|
||||
val versioned1_2 = new TestVersioned()
|
||||
val versioned2_2 = versioned1_2 + Node("2")
|
||||
val versioned3_2 = versioned2_2 + Node("2")
|
||||
val versioned2_2 = versioned1_2 :+ Node("2")
|
||||
val versioned3_2 = versioned2_2 :+ Node("2")
|
||||
|
||||
latestVersionOf[TestVersioned](versioned5_1, versioned3_2) must be(versioned3_2)
|
||||
}
|
||||
|
|
|
|||
|
|
@ -130,7 +130,7 @@ if needed. It will return ``None`` if a timeout occurs.
|
|||
.. includecode:: code/docs/actor/TypedActorDocTestBase.java
|
||||
:include: typed-actor-call-strict
|
||||
|
||||
This will block for as long as the timeout that was set in the ``Props` of the Typed Actor,
|
||||
This will block for as long as the timeout that was set in the ``Props`` of the Typed Actor,
|
||||
if needed. It will throw a ``java.util.concurrent.TimeoutException`` if a timeout occurs.
|
||||
|
||||
Request-reply-with-future message send
|
||||
|
|
|
|||
|
|
@ -93,7 +93,7 @@ fi
|
|||
declare -r version=$1
|
||||
declare -r publish_path="${release_server}:${release_path}"
|
||||
|
||||
[[ `java -version 2>&1 | grep "java version" | awk '{print $3}' | tr -d \" | awk '{split($0, array, ".")} END{print array[2]}'` -eq 6 ]] || fail "Java version is not 1.6"
|
||||
[[ `java -version 2>&1 | head -1 | cut -d ' ' -f3 | cut -d '.' -f2` -eq 6 ]] || fail "Java version is not 1.6"
|
||||
|
||||
# check for a git command
|
||||
type -P git &> /dev/null || fail "git command not found"
|
||||
|
|
|
|||
Loading…
Add table
Add a link
Reference in a new issue