scheduleWithFixedDelay vs scheduleAtFixedRate, #26910
* previous `schedule` method is trying to maintain a fixed average frequency over time, but that can result in undesired bursts of scheduled tasks after a long GC or if the JVM process has been suspended, same with all other periodic scheduled message sending via various Timer APIs * most of the time "fixed delay" is more desirable * we can't just change because it's too big behavioral change and some might depend on previous behavior * deprecate the old `schedule` and introduce new `scheduleWithFixedDelay` and `scheduleAtFixedRate`, when fixing the deprecation warning users should make a concious decision of which behavior to use (scheduleWithFixedDelay in most cases) * Streams * SchedulerSpec * test both fixed delay and fixed rate * TimerSpec * FSM and PersistentFSM * mima * runnable as second parameter list, also in typed.Scheduler * IllegalStateException vs SchedulerException * deprecated annotations * api and reference docs, all places * migration guide
This commit is contained in:
parent
72cfc2485e
commit
10d32fceb9
99 changed files with 2285 additions and 909 deletions
|
|
@ -569,7 +569,7 @@ class ClusterSingletonManager(singletonProps: Props, terminationMessage: Any, se
|
|||
// subscribe to cluster changes, re-subscribe when restart
|
||||
cluster.subscribe(self, ClusterEvent.InitialStateAsEvents, classOf[MemberRemoved], classOf[MemberDowned])
|
||||
|
||||
setTimer(CleanupTimer, Cleanup, 1.minute, repeat = true)
|
||||
startTimerWithFixedDelay(CleanupTimer, Cleanup, 1.minute)
|
||||
|
||||
// defer subscription to avoid some jitter when
|
||||
// starting/joining several nodes at the same time
|
||||
|
|
@ -723,7 +723,7 @@ class ClusterSingletonManager(singletonProps: Props, terminationMessage: Any, se
|
|||
if (count <= maxHandOverRetries) {
|
||||
logInfo("Retry [{}], sending HandOverToMe to [{}]", count, previousOldestOption.map(_.address))
|
||||
previousOldestOption.foreach(node => peer(node.address) ! HandOverToMe)
|
||||
setTimer(HandOverRetryTimer, HandOverRetry(count + 1), handOverRetryInterval, repeat = false)
|
||||
startSingleTimer(HandOverRetryTimer, HandOverRetry(count + 1), handOverRetryInterval)
|
||||
stay()
|
||||
} else if (previousOldestOption.forall(removed.contains)) {
|
||||
// can't send HandOverToMe, previousOldest unknown for new node (or restart)
|
||||
|
|
@ -771,7 +771,7 @@ class ClusterSingletonManager(singletonProps: Props, terminationMessage: Any, se
|
|||
if (result) {
|
||||
gotoOldest()
|
||||
} else {
|
||||
setTimer(LeaseRetryTimer, LeaseRetry, leaseRetryInterval)
|
||||
startSingleTimer(LeaseRetryTimer, LeaseRetry, leaseRetryInterval)
|
||||
stay.using(AcquiringLeaseData(leaseRequestInProgress = false, None))
|
||||
}
|
||||
case Event(Terminated(ref), AcquiringLeaseData(_, Some(singleton))) if ref == singleton =>
|
||||
|
|
@ -780,7 +780,7 @@ class ClusterSingletonManager(singletonProps: Props, terminationMessage: Any, se
|
|||
tryAcquireLease()
|
||||
case Event(AcquireLeaseFailure(t), _) =>
|
||||
log.error(t, "failed to get lease (will be retried)")
|
||||
setTimer(LeaseRetryTimer, LeaseRetry, leaseRetryInterval)
|
||||
startSingleTimer(LeaseRetryTimer, LeaseRetry, leaseRetryInterval)
|
||||
stay.using(AcquiringLeaseData(leaseRequestInProgress = false, None))
|
||||
case Event(LeaseRetry, _) =>
|
||||
// If lease was lost (so previous state was oldest) then we don't try and get the lease
|
||||
|
|
@ -827,11 +827,11 @@ class ClusterSingletonManager(singletonProps: Props, terminationMessage: Any, se
|
|||
case Some(a) =>
|
||||
// send TakeOver request in case the new oldest doesn't know previous oldest
|
||||
peer(a.address) ! TakeOverFromMe
|
||||
setTimer(TakeOverRetryTimer, TakeOverRetry(1), handOverRetryInterval, repeat = false)
|
||||
startSingleTimer(TakeOverRetryTimer, TakeOverRetry(1), handOverRetryInterval)
|
||||
goto(WasOldest).using(WasOldestData(singleton, newOldestOption = Some(a)))
|
||||
case None =>
|
||||
// new oldest will initiate the hand-over
|
||||
setTimer(TakeOverRetryTimer, TakeOverRetry(1), handOverRetryInterval, repeat = false)
|
||||
startSingleTimer(TakeOverRetryTimer, TakeOverRetry(1), handOverRetryInterval)
|
||||
goto(WasOldest).using(WasOldestData(singleton, newOldestOption = None))
|
||||
}
|
||||
}
|
||||
|
|
@ -890,7 +890,7 @@ class ClusterSingletonManager(singletonProps: Props, terminationMessage: Any, se
|
|||
else
|
||||
log.debug("Retry [{}], sending TakeOverFromMe to [{}]", count, newOldestOption.map(_.address))
|
||||
newOldestOption.foreach(node => peer(node.address) ! TakeOverFromMe)
|
||||
setTimer(TakeOverRetryTimer, TakeOverRetry(count + 1), handOverRetryInterval, repeat = false)
|
||||
startSingleTimer(TakeOverRetryTimer, TakeOverRetry(count + 1), handOverRetryInterval)
|
||||
stay
|
||||
} else
|
||||
throw new ClusterSingletonManagerIsStuck(s"Expected hand-over to [$newOldestOption] never occurred")
|
||||
|
|
@ -1043,7 +1043,7 @@ class ClusterSingletonManager(singletonProps: Props, terminationMessage: Any, se
|
|||
}
|
||||
|
||||
onTransition {
|
||||
case _ -> BecomingOldest => setTimer(HandOverRetryTimer, HandOverRetry(1), handOverRetryInterval, repeat = false)
|
||||
case _ -> BecomingOldest => startSingleTimer(HandOverRetryTimer, HandOverRetry(1), handOverRetryInterval)
|
||||
}
|
||||
|
||||
onTransition {
|
||||
|
|
|
|||
Loading…
Add table
Add a link
Reference in a new issue