diff --git a/akka-cluster-sharding/src/main/scala/akka/cluster/sharding/ShardCoordinator.scala b/akka-cluster-sharding/src/main/scala/akka/cluster/sharding/ShardCoordinator.scala index 778aa401ea..9e7c18df42 100644 --- a/akka-cluster-sharding/src/main/scala/akka/cluster/sharding/ShardCoordinator.scala +++ b/akka-cluster-sharding/src/main/scala/akka/cluster/sharding/ShardCoordinator.scala @@ -1432,6 +1432,7 @@ private[akka] class DDataShardCoordinator( private var terminating = false private var getShardHomeRequests: Set[(ActorRef, GetShardHome)] = Set.empty + private var initialStateRetries = 0 private val rememberEntitiesStore = rememberEntitiesStoreProvider.map { provider => @@ -1468,10 +1469,16 @@ private[akka] class DDataShardCoordinator( onInitialState(existingState, rememberedShards) case GetFailure(CoordinatorStateKey, _) => - log.error( - "{}: The ShardCoordinator was unable to get an initial state within 'waiting-for-state-timeout': {} millis (retrying). Has ClusterSharding been started on all nodes?", - typeName, - stateReadConsistency.timeout.toMillis) + initialStateRetries += 1 + val template = + "{}: The ShardCoordinator was unable to get an initial state within 'waiting-for-state-timeout': {} millis (retrying). Has ClusterSharding been started on all nodes?" + if (initialStateRetries == 1) + log.info(template, typeName, stateReadConsistency.timeout.toMillis) + else if (initialStateRetries < 5) + log.warning(template, typeName, stateReadConsistency.timeout.toMillis) + else + log.error(template, typeName, stateReadConsistency.timeout.toMillis) + // repeat until GetSuccess getCoordinatorState()