=cls #18722 fix DDataShardCoordinator init
* the become logic was wrong when watchStateActors triggers an immediate state update
This commit is contained in:
parent
ef901becee
commit
27995af79f
4 changed files with 14 additions and 18 deletions
|
|
@ -801,8 +801,6 @@ class DDataShardCoordinator(typeName: String, settings: ClusterShardingSettings,
|
||||||
|
|
||||||
node.subscribe(self, ClusterShuttingDown.getClass)
|
node.subscribe(self, ClusterShuttingDown.getClass)
|
||||||
|
|
||||||
var afterUpdateCallback: DomainEvent ⇒ Unit = _
|
|
||||||
|
|
||||||
// get state from ddata replicator, repeat until GetSuccess
|
// get state from ddata replicator, repeat until GetSuccess
|
||||||
getState()
|
getState()
|
||||||
|
|
||||||
|
|
@ -812,8 +810,9 @@ class DDataShardCoordinator(typeName: String, settings: ClusterShardingSettings,
|
||||||
def waitingForState: Receive = ({
|
def waitingForState: Receive = ({
|
||||||
case g @ GetSuccess(CoordinatorStateKey, _) ⇒
|
case g @ GetSuccess(CoordinatorStateKey, _) ⇒
|
||||||
state = g.get(CoordinatorStateKey).value
|
state = g.get(CoordinatorStateKey).value
|
||||||
watchStateActors()
|
|
||||||
context.become(waitingForStateInitialized)
|
context.become(waitingForStateInitialized)
|
||||||
|
// note that watchStateActors may call update
|
||||||
|
watchStateActors()
|
||||||
|
|
||||||
case GetFailure(CoordinatorStateKey, _) ⇒
|
case GetFailure(CoordinatorStateKey, _) ⇒
|
||||||
log.error(
|
log.error(
|
||||||
|
|
@ -840,10 +839,12 @@ class DDataShardCoordinator(typeName: String, settings: ClusterShardingSettings,
|
||||||
}
|
}
|
||||||
|
|
||||||
// this state will stash all messages until it receives UpdateSuccess
|
// this state will stash all messages until it receives UpdateSuccess
|
||||||
def waitingForUpdate[E <: DomainEvent](evt: E): Receive = {
|
def waitingForUpdate[E <: DomainEvent](evt: E, afterUpdateCallback: DomainEvent ⇒ Unit): Receive = {
|
||||||
case UpdateSuccess(CoordinatorStateKey, Some(`evt`)) ⇒
|
case UpdateSuccess(CoordinatorStateKey, Some(`evt`)) ⇒
|
||||||
log.debug("The coordinator state was successfully updated with {}", evt)
|
log.debug("The coordinator state was successfully updated with {}", evt)
|
||||||
updateSuccess(evt)
|
context.unbecome()
|
||||||
|
afterUpdateCallback(evt)
|
||||||
|
unstashAll()
|
||||||
|
|
||||||
case UpdateTimeout(CoordinatorStateKey, Some(`evt`)) ⇒
|
case UpdateTimeout(CoordinatorStateKey, Some(`evt`)) ⇒
|
||||||
log.error(
|
log.error(
|
||||||
|
|
@ -870,18 +871,10 @@ class DDataShardCoordinator(typeName: String, settings: ClusterShardingSettings,
|
||||||
}
|
}
|
||||||
|
|
||||||
def update[E <: DomainEvent](evt: E)(f: E ⇒ Unit): Unit = {
|
def update[E <: DomainEvent](evt: E)(f: E ⇒ Unit): Unit = {
|
||||||
afterUpdateCallback = f.asInstanceOf[DomainEvent ⇒ Unit]
|
context.become(waitingForUpdate(evt, f.asInstanceOf[DomainEvent ⇒ Unit]), discardOld = false)
|
||||||
context.become(waitingForUpdate(evt))
|
|
||||||
sendUpdate(evt)
|
sendUpdate(evt)
|
||||||
}
|
}
|
||||||
|
|
||||||
def updateSuccess(evt: DomainEvent): Unit = {
|
|
||||||
afterUpdateCallback(evt)
|
|
||||||
afterUpdateCallback = null
|
|
||||||
context.become(active)
|
|
||||||
unstashAll()
|
|
||||||
}
|
|
||||||
|
|
||||||
def getState(): Unit =
|
def getState(): Unit =
|
||||||
replicator ! Get(CoordinatorStateKey, ReadMajority(waitingForStateTimeout))
|
replicator ! Get(CoordinatorStateKey, ReadMajority(waitingForStateTimeout))
|
||||||
|
|
||||||
|
|
|
||||||
|
|
@ -574,7 +574,10 @@ object MiMa extends AutoPlugin {
|
||||||
|
|
||||||
//#18353 Changes to methods and fields private to remoting actors
|
//#18353 Changes to methods and fields private to remoting actors
|
||||||
ProblemFilters.exclude[MissingMethodProblem]("akka.remote.EndpointManager.retryGateEnabled"),
|
ProblemFilters.exclude[MissingMethodProblem]("akka.remote.EndpointManager.retryGateEnabled"),
|
||||||
ProblemFilters.exclude[IncompatibleResultTypeProblem]("akka.remote.EndpointManager.pruneTimerCancellable")
|
ProblemFilters.exclude[IncompatibleResultTypeProblem]("akka.remote.EndpointManager.pruneTimerCancellable"),
|
||||||
|
|
||||||
|
// #18722 internal changes to actor
|
||||||
|
FilterAnyProblem("akka.cluster.sharding.DDataShardCoordinator")
|
||||||
)
|
)
|
||||||
)
|
)
|
||||||
}
|
}
|
||||||
|
|
|
||||||
Loading…
Add table
Add a link
Reference in a new issue