From 1bb8f1737f0983a7e20104ca13e133ebb41fc917 Mon Sep 17 00:00:00 2001 From: Patrik Nordwall Date: Tue, 8 Nov 2016 13:37:34 +0100 Subject: [PATCH 1/2] increase barrier-timeout in ClusterShardingSpec, #21718 * In the logs of the failing test we can see that the first node is removed as expected and then come back in the membership, which is possible in case of conflicting membership state merge. It is supposed to be removed again by the auto-down. That doesn't happen within the barrier-timeout. --- .../scala/akka/cluster/sharding/ClusterShardingSpec.scala | 1 + 1 file changed, 1 insertion(+) diff --git a/akka-cluster-sharding/src/multi-jvm/scala/akka/cluster/sharding/ClusterShardingSpec.scala b/akka-cluster-sharding/src/multi-jvm/scala/akka/cluster/sharding/ClusterShardingSpec.scala index 36a251628d..3e1c2bdba8 100644 --- a/akka-cluster-sharding/src/multi-jvm/scala/akka/cluster/sharding/ClusterShardingSpec.scala +++ b/akka-cluster-sharding/src/multi-jvm/scala/akka/cluster/sharding/ClusterShardingSpec.scala @@ -158,6 +158,7 @@ abstract class ClusterShardingSpecConfig( max-simultaneous-rebalance = 1 } } + akka.testconductor.barrier-timeout = 70s """)) nodeConfig(sixth) { ConfigFactory.parseString("""akka.cluster.roles = ["frontend"]""") From 48e85953d988290c1ca8d09229d4b47fcce9ce5e Mon Sep 17 00:00:00 2001 From: Patrik Nordwall Date: Tue, 8 Nov 2016 14:01:23 +0100 Subject: [PATCH 2/2] harden ClusterShardingSpec, #21535 --- .../cluster/sharding/ClusterShardingSpec.scala | 15 +++++++++------ 1 file changed, 9 insertions(+), 6 deletions(-) diff --git a/akka-cluster-sharding/src/multi-jvm/scala/akka/cluster/sharding/ClusterShardingSpec.scala b/akka-cluster-sharding/src/multi-jvm/scala/akka/cluster/sharding/ClusterShardingSpec.scala index 3e1c2bdba8..b6bbf9a57d 100644 --- a/akka-cluster-sharding/src/multi-jvm/scala/akka/cluster/sharding/ClusterShardingSpec.scala +++ b/akka-cluster-sharding/src/multi-jvm/scala/akka/cluster/sharding/ClusterShardingSpec.scala @@ -189,12 +189,10 @@ object PersistentClusterShardingSpecConfig extends ClusterShardingSpecConfig("pe object DDataClusterShardingSpecConfig extends ClusterShardingSpecConfig("ddata") object PersistentClusterShardingWithEntityRecoverySpecConfig extends ClusterShardingSpecConfig( "persistence", - "all" -) + "all") object DDataClusterShardingWithEntityRecoverySpecConfig extends ClusterShardingSpecConfig( "ddata", - "constant" -) + "constant") class PersistentClusterShardingSpec extends ClusterShardingSpec(PersistentClusterShardingSpecConfig) class DDataClusterShardingSpec extends ClusterShardingSpec(DDataClusterShardingSpecConfig) @@ -710,8 +708,13 @@ abstract class ClusterShardingSpec(config: ClusterShardingSpecConfig) extends Mu //Check that counter 1 is now alive again, even though we have // not sent a message to it via the ShardRegion val counter1 = system.actorSelection(lastSender.path.parent / "1") - counter1 ! Identify(2) - expectMsgType[ActorIdentity](3 seconds).ref should not be (None) + within(5.seconds) { + awaitAssert { + val p = TestProbe() + counter1.tell(Identify(2), p.ref) + p.expectMsgType[ActorIdentity](2.seconds).ref should not be (None) + } + } counter1 ! Get(1) expectMsg(1)