Solve wrong barrier problem, see #2583
* The problem was that we didn't wait for the testconductor.shutdown Future to complete and therefore barriers could be triggered in unexpected order. The reason why we didn't await, was that during shutdown the Future was completed with client disconnected failure. I have fixed that and added await to all shutdowns.
This commit is contained in:
parent
b68c7a8469
commit
5e83df74e9
8 changed files with 16 additions and 11 deletions
|
|
@ -18,7 +18,7 @@ case class LeaderElectionMultiNodeConfig(failureDetectorPuppet: Boolean) extends
|
|||
val third = role("third")
|
||||
val fourth = role("fourth")
|
||||
|
||||
commonConfig(debugConfig(on = true).withFallback(MultiNodeClusterSpec.clusterConfig(failureDetectorPuppet)))
|
||||
commonConfig(debugConfig(on = false).withFallback(MultiNodeClusterSpec.clusterConfig(failureDetectorPuppet)))
|
||||
}
|
||||
|
||||
class LeaderElectionWithFailureDetectorPuppetMultiJvmNode1 extends LeaderElectionSpec(failureDetectorPuppet = true)
|
||||
|
|
@ -70,7 +70,7 @@ abstract class LeaderElectionSpec(multiNodeConfig: LeaderElectionMultiNodeConfig
|
|||
case `controller` ⇒
|
||||
val leaderAddress = address(leader)
|
||||
enterBarrier("before-shutdown" + n)
|
||||
testConductor.shutdown(leader, 0)
|
||||
testConductor.shutdown(leader, 0).await
|
||||
enterBarrier("after-shutdown" + n, "after-unavailable" + n, "after-down" + n, "completed" + n)
|
||||
|
||||
case `leader` ⇒
|
||||
|
|
|
|||
Loading…
Add table
Add a link
Reference in a new issue