From 2f11ec6f25df6f07191936fb7e5fe4d71a81f37f Mon Sep 17 00:00:00 2001 From: Arnout Engelen Date: Mon, 26 Jun 2017 14:32:57 +0200 Subject: [PATCH 01/34] Introduce cluster 'team' setting and add to Member Introduced cluster-team.md so we can grow the documentation with each PR, but did not add it to the ToC yet. (cherry picked from commit a06badaa03fa9f3c9a942b1468090f758c74a869) --- .../src/main/resources/reference.conf | 5 ++++ .../scala/akka/cluster/ClusterSettings.scala | 3 ++- .../src/main/scala/akka/cluster/Member.scala | 3 +++ .../scala/akka/cluster/MBeanSpec.scala | 12 ++++++---- .../scala/akka/cluster/QuickRestartSpec.scala | 2 +- .../akka/cluster/ClusterConfigSpec.scala | 24 +++++++++++++++++-- .../src/main/paradox/java/cluster-team.md | 1 + .../src/main/paradox/scala/cluster-team.md | 15 ++++++++++++ 8 files changed, 57 insertions(+), 8 deletions(-) create mode 120000 akka-docs/src/main/paradox/java/cluster-team.md create mode 100644 akka-docs/src/main/paradox/scala/cluster-team.md diff --git a/akka-cluster/src/main/resources/reference.conf b/akka-cluster/src/main/resources/reference.conf index d53aece519..8e69ea6d20 100644 --- a/akka-cluster/src/main/resources/reference.conf +++ b/akka-cluster/src/main/resources/reference.conf @@ -65,6 +65,11 @@ akka { # move 'WeaklyUp' members to 'Up' status once convergence has been reached. allow-weakly-up-members = on + # Teams are used to make islands of the cluster that are colocated. It can be used + # to make the cluster "dc-aware", run the cluster in multiple availability zones or regions. + # The team is added to the list of roles of the node with the prefix "team-". + team = "default" + # The roles of this member. List of strings, e.g. roles = ["A", "B"]. # The roles are part of the membership information and can be used by # routers or other services to distribute work to certain member types, diff --git a/akka-cluster/src/main/scala/akka/cluster/ClusterSettings.scala b/akka-cluster/src/main/scala/akka/cluster/ClusterSettings.scala index 80453ce1d8..44b92a8009 100644 --- a/akka-cluster/src/main/scala/akka/cluster/ClusterSettings.scala +++ b/akka-cluster/src/main/scala/akka/cluster/ClusterSettings.scala @@ -93,7 +93,8 @@ final class ClusterSettings(val config: Config, val systemName: String) { val AllowWeaklyUpMembers = cc.getBoolean("allow-weakly-up-members") - val Roles: Set[String] = immutableSeq(cc.getStringList("roles")).toSet + val Team: String = cc.getString("team") + val Roles: Set[String] = immutableSeq(cc.getStringList("roles")).toSet + s"team-$Team" val MinNrOfMembers: Int = { cc.getInt("min-nr-of-members") } requiring (_ > 0, "min-nr-of-members must be > 0") diff --git a/akka-cluster/src/main/scala/akka/cluster/Member.scala b/akka-cluster/src/main/scala/akka/cluster/Member.scala index 2fcb9bdf35..145d14f9ef 100644 --- a/akka-cluster/src/main/scala/akka/cluster/Member.scala +++ b/akka-cluster/src/main/scala/akka/cluster/Member.scala @@ -22,6 +22,9 @@ class Member private[cluster] ( val status: MemberStatus, val roles: Set[String]) extends Serializable { + lazy val team = roles.find(_.startsWith("team-")) + .getOrElse(throw new IllegalStateException("Team undefined, should not be possible")) + def address: Address = uniqueAddress.address override def hashCode = uniqueAddress.## diff --git a/akka-cluster/src/multi-jvm/scala/akka/cluster/MBeanSpec.scala b/akka-cluster/src/multi-jvm/scala/akka/cluster/MBeanSpec.scala index 8ac5e092b7..37ed3d24ae 100644 --- a/akka-cluster/src/multi-jvm/scala/akka/cluster/MBeanSpec.scala +++ b/akka-cluster/src/multi-jvm/scala/akka/cluster/MBeanSpec.scala @@ -120,28 +120,32 @@ abstract class MBeanSpec | { | "address": "${sortedNodes(0)}", | "roles": [ - | "testNode" + | "testNode", + | "team-default" | ], | "status": "Up" | }, | { | "address": "${sortedNodes(1)}", | "roles": [ - | "testNode" + | "testNode", + | "team-default" | ], | "status": "Up" | }, | { | "address": "${sortedNodes(2)}", | "roles": [ - | "testNode" + | "testNode", + | "team-default" | ], | "status": "Up" | }, | { | "address": "${sortedNodes(3)}", | "roles": [ - | "testNode" + | "testNode", + | "team-default" | ], | "status": "Up" | } diff --git a/akka-cluster/src/multi-jvm/scala/akka/cluster/QuickRestartSpec.scala b/akka-cluster/src/multi-jvm/scala/akka/cluster/QuickRestartSpec.scala index 3e1da843f2..df14986ac1 100644 --- a/akka-cluster/src/multi-jvm/scala/akka/cluster/QuickRestartSpec.scala +++ b/akka-cluster/src/multi-jvm/scala/akka/cluster/QuickRestartSpec.scala @@ -91,7 +91,7 @@ abstract class QuickRestartSpec Cluster(system).state.members.size should ===(totalNumberOfNodes) Cluster(system).state.members.map(_.status == MemberStatus.Up) // use the role to test that it is the new incarnation that joined, sneaky - Cluster(system).state.members.flatMap(_.roles) should ===(Set(s"round-$n")) + Cluster(system).state.members.flatMap(_.roles) should ===(Set(s"round-$n", "team-default")) } } enterBarrier("members-up-" + n) diff --git a/akka-cluster/src/test/scala/akka/cluster/ClusterConfigSpec.scala b/akka-cluster/src/test/scala/akka/cluster/ClusterConfigSpec.scala index 481556bd77..171e67d42c 100644 --- a/akka-cluster/src/test/scala/akka/cluster/ClusterConfigSpec.scala +++ b/akka-cluster/src/test/scala/akka/cluster/ClusterConfigSpec.scala @@ -5,9 +5,13 @@ package akka.cluster import language.postfixOps +import scala.concurrent.duration._ + +import com.typesafe.config.ConfigFactory + import akka.testkit.AkkaSpec import akka.dispatch.Dispatchers -import scala.concurrent.duration._ + import akka.remote.PhiAccrualFailureDetector import akka.util.Helpers.ConfigOps import akka.actor.Address @@ -41,7 +45,8 @@ class ClusterConfigSpec extends AkkaSpec { DownRemovalMargin should ===(Duration.Zero) MinNrOfMembers should ===(1) MinNrOfMembersOfRole should ===(Map.empty[String, Int]) - Roles should ===(Set.empty[String]) + Team should ===("default") + Roles should ===(Set("team-default")) JmxEnabled should ===(true) UseDispatcher should ===(Dispatchers.DefaultDispatcherId) GossipDifferentViewProbability should ===(0.8 +- 0.0001) @@ -49,5 +54,20 @@ class ClusterConfigSpec extends AkkaSpec { SchedulerTickDuration should ===(33 millis) SchedulerTicksPerWheel should ===(512) } + + "be able to parse non-default cluster config elements" in { + val settings = new ClusterSettings(ConfigFactory.parseString( + """ + |akka { + | cluster { + | roles = [ "hamlet" ] + | team = "blue" + | } + |} + """.stripMargin).withFallback(ConfigFactory.load()), system.name) + import settings._ + Roles should ===(Set("hamlet", "team-blue")) + Team should ===("blue") + } } } diff --git a/akka-docs/src/main/paradox/java/cluster-team.md b/akka-docs/src/main/paradox/java/cluster-team.md new file mode 120000 index 0000000000..b25b4db61f --- /dev/null +++ b/akka-docs/src/main/paradox/java/cluster-team.md @@ -0,0 +1 @@ +../scala/cluster-team.md \ No newline at end of file diff --git a/akka-docs/src/main/paradox/scala/cluster-team.md b/akka-docs/src/main/paradox/scala/cluster-team.md new file mode 100644 index 0000000000..436d5a9ad8 --- /dev/null +++ b/akka-docs/src/main/paradox/scala/cluster-team.md @@ -0,0 +1,15 @@ +# Cluster Team + +@@@ note + +Cluster teams are a work-in-progress feature, and behavior is still expected to change. + +@@@ + +Teams are used to define islands of the cluster that are colocated. +They can be used to make the cluster "dc-aware", run the cluster in multiple availability zones or regions. + +Cluster nodes can be assigned to a team by setting the `akka.cluster.team` setting. +When no team is specified, a node will belong to the 'default' team. + +The team is added to the list of roles of the node with the prefix "team-". \ No newline at end of file From 0115d5fddadf0ede97a1d6fe66f0c3e0a2b4cbf2 Mon Sep 17 00:00:00 2001 From: Arnout Engelen Date: Mon, 26 Jun 2017 15:40:35 +0200 Subject: [PATCH 02/34] Less abbreviations, more reliable test (cherry picked from commit 61e289b276f410654c1b063c33648e0d7ea88e50) --- akka-cluster/src/main/resources/reference.conf | 4 ++-- .../src/main/scala/akka/cluster/ClusterJmx.scala | 2 +- .../src/main/scala/akka/cluster/Member.scala | 2 +- .../multi-jvm/scala/akka/cluster/MBeanSpec.scala | 16 ++++++++-------- akka-docs/src/main/paradox/scala/cluster-team.md | 4 ++-- 5 files changed, 14 insertions(+), 14 deletions(-) diff --git a/akka-cluster/src/main/resources/reference.conf b/akka-cluster/src/main/resources/reference.conf index 8e69ea6d20..f5b49f6c81 100644 --- a/akka-cluster/src/main/resources/reference.conf +++ b/akka-cluster/src/main/resources/reference.conf @@ -65,8 +65,8 @@ akka { # move 'WeaklyUp' members to 'Up' status once convergence has been reached. allow-weakly-up-members = on - # Teams are used to make islands of the cluster that are colocated. It can be used - # to make the cluster "dc-aware", run the cluster in multiple availability zones or regions. + # Teams are used to make islands of the cluster that are colocated. This can be used + # to make the cluster aware that it is running across multiple availability zones or regions. # The team is added to the list of roles of the node with the prefix "team-". team = "default" diff --git a/akka-cluster/src/main/scala/akka/cluster/ClusterJmx.scala b/akka-cluster/src/main/scala/akka/cluster/ClusterJmx.scala index bd828e4e80..26056f8963 100644 --- a/akka-cluster/src/main/scala/akka/cluster/ClusterJmx.scala +++ b/akka-cluster/src/main/scala/akka/cluster/ClusterJmx.scala @@ -154,7 +154,7 @@ private[akka] class ClusterJmx(cluster: Cluster, log: LoggingAdapter) { val members = clusterView.members.toSeq.sorted(Member.ordering).map { m ⇒ s"""{ | "address": "${m.address}", - | "roles": [${if (m.roles.isEmpty) "" else m.roles.map("\"" + _ + "\"").mkString("\n ", ",\n ", "\n ")}], + | "roles": [${if (m.roles.isEmpty) "" else m.roles.toList.sorted.map("\"" + _ + "\"").mkString("\n ", ",\n ", "\n ")}], | "status": "${m.status}" | }""".stripMargin } mkString (",\n ") diff --git a/akka-cluster/src/main/scala/akka/cluster/Member.scala b/akka-cluster/src/main/scala/akka/cluster/Member.scala index 145d14f9ef..c0195d5e39 100644 --- a/akka-cluster/src/main/scala/akka/cluster/Member.scala +++ b/akka-cluster/src/main/scala/akka/cluster/Member.scala @@ -22,7 +22,7 @@ class Member private[cluster] ( val status: MemberStatus, val roles: Set[String]) extends Serializable { - lazy val team = roles.find(_.startsWith("team-")) + lazy val team: String = roles.find(_.startsWith("team-")) .getOrElse(throw new IllegalStateException("Team undefined, should not be possible")) def address: Address = uniqueAddress.address diff --git a/akka-cluster/src/multi-jvm/scala/akka/cluster/MBeanSpec.scala b/akka-cluster/src/multi-jvm/scala/akka/cluster/MBeanSpec.scala index 37ed3d24ae..cb43fe4d52 100644 --- a/akka-cluster/src/multi-jvm/scala/akka/cluster/MBeanSpec.scala +++ b/akka-cluster/src/multi-jvm/scala/akka/cluster/MBeanSpec.scala @@ -120,32 +120,32 @@ abstract class MBeanSpec | { | "address": "${sortedNodes(0)}", | "roles": [ - | "testNode", - | "team-default" + | "team-default", + | "testNode" | ], | "status": "Up" | }, | { | "address": "${sortedNodes(1)}", | "roles": [ - | "testNode", - | "team-default" + | "team-default", + | "testNode" | ], | "status": "Up" | }, | { | "address": "${sortedNodes(2)}", | "roles": [ - | "testNode", - | "team-default" + | "team-default", + | "testNode" | ], | "status": "Up" | }, | { | "address": "${sortedNodes(3)}", | "roles": [ - | "testNode", - | "team-default" + | "team-default", + | "testNode" | ], | "status": "Up" | } diff --git a/akka-docs/src/main/paradox/scala/cluster-team.md b/akka-docs/src/main/paradox/scala/cluster-team.md index 436d5a9ad8..a4019d360f 100644 --- a/akka-docs/src/main/paradox/scala/cluster-team.md +++ b/akka-docs/src/main/paradox/scala/cluster-team.md @@ -6,8 +6,8 @@ Cluster teams are a work-in-progress feature, and behavior is still expected to @@@ -Teams are used to define islands of the cluster that are colocated. -They can be used to make the cluster "dc-aware", run the cluster in multiple availability zones or regions. +Teams are used to make islands of the cluster that are colocated. This can be used +to make the cluster aware that it is running across multiple availability zones or regions. Cluster nodes can be assigned to a team by setting the `akka.cluster.team` setting. When no team is specified, a node will belong to the 'default' team. From 164387a89e6775ee4875728bbc6e15b75bed94dc Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Johan=20Andr=C3=A9n?= Date: Tue, 4 Jul 2017 09:09:40 +0100 Subject: [PATCH 03/34] [WIP] one leader per cluster team (#23239) * Guarantee no sneaky type puts more teams in the role list * Leader per team and initial tests * MiMa filters * Second iteration (not working though) * Verbose gossip logging etc. * Gossip to team-nodes even if there is inter-team unreachability * More work ... * Marking removed nodes with tombstones in Gossip * More test coverage for Gossip.remove * Bug failing other multi-node tests squashed * Multi-node test for team-split * Review fixes - only prune tombstones on leader ticks * Clean code is happy code. * All I want is for MiMa to be my friend * These constants are internal * Making the formatting gods happy * I used the wrong reachability for ignoring gossip :/ * Still hadn't quite gotten how reachability was supposed to work * Review feedback applied * Cross-team downing should still work * Actually prune tombstones in the prune tombstones method ... * Another round against reachability. Reachability leading with 15 - 2 so far. --- .../cluster/protobuf/msg/ClusterMessages.java | 961 +++++++++++++++++- .../src/main/protobuf/ClusterMessages.proto | 6 + .../src/main/resources/reference.conf | 10 + .../src/main/scala/akka/cluster/Cluster.scala | 25 +- .../scala/akka/cluster/ClusterDaemon.scala | 258 ++--- .../scala/akka/cluster/ClusterEvent.scala | 78 +- .../scala/akka/cluster/ClusterReadView.scala | 4 +- .../scala/akka/cluster/ClusterSettings.scala | 38 +- .../src/main/scala/akka/cluster/Gossip.scala | 192 +++- .../src/main/scala/akka/cluster/Member.scala | 32 +- .../scala/akka/cluster/Reachability.scala | 9 + .../protobuf/ClusterMessageSerializer.scala | 22 +- .../akka/cluster/MultiTeamClusterSpec.scala | 153 +++ .../cluster/MultiTeamSplitBrainSpec.scala | 141 +++ .../ClusterDomainEventPublisherSpec.scala | 5 +- .../akka/cluster/ClusterDomainEventSpec.scala | 33 +- .../test/scala/akka/cluster/GossipSpec.scala | 276 ++++- .../test/scala/akka/cluster/TestMember.scala | 4 +- .../ClusterMessageSerializerSpec.scala | 4 +- project/MiMa.scala | 23 +- 20 files changed, 1990 insertions(+), 284 deletions(-) create mode 100644 akka-cluster/src/multi-jvm/scala/akka/cluster/MultiTeamClusterSpec.scala create mode 100644 akka-cluster/src/multi-jvm/scala/akka/cluster/MultiTeamSplitBrainSpec.scala diff --git a/akka-cluster/src/main/java/akka/cluster/protobuf/msg/ClusterMessages.java b/akka-cluster/src/main/java/akka/cluster/protobuf/msg/ClusterMessages.java index 9e8b1ce958..533e3dd8a0 100644 --- a/akka-cluster/src/main/java/akka/cluster/protobuf/msg/ClusterMessages.java +++ b/akka-cluster/src/main/java/akka/cluster/protobuf/msg/ClusterMessages.java @@ -3574,6 +3574,31 @@ public final class ClusterMessages { * required .VectorClock version = 6; */ akka.cluster.protobuf.msg.ClusterMessages.VectorClockOrBuilder getVersionOrBuilder(); + + // repeated .Tombstone tombstones = 7; + /** + * repeated .Tombstone tombstones = 7; + */ + java.util.List + getTombstonesList(); + /** + * repeated .Tombstone tombstones = 7; + */ + akka.cluster.protobuf.msg.ClusterMessages.Tombstone getTombstones(int index); + /** + * repeated .Tombstone tombstones = 7; + */ + int getTombstonesCount(); + /** + * repeated .Tombstone tombstones = 7; + */ + java.util.List + getTombstonesOrBuilderList(); + /** + * repeated .Tombstone tombstones = 7; + */ + akka.cluster.protobuf.msg.ClusterMessages.TombstoneOrBuilder getTombstonesOrBuilder( + int index); } /** * Protobuf type {@code Gossip} @@ -3689,6 +3714,14 @@ public final class ClusterMessages { bitField0_ |= 0x00000002; break; } + case 58: { + if (!((mutable_bitField0_ & 0x00000040) == 0x00000040)) { + tombstones_ = new java.util.ArrayList(); + mutable_bitField0_ |= 0x00000040; + } + tombstones_.add(input.readMessage(akka.cluster.protobuf.msg.ClusterMessages.Tombstone.PARSER, extensionRegistry)); + break; + } } } } catch (akka.protobuf.InvalidProtocolBufferException e) { @@ -3709,6 +3742,9 @@ public final class ClusterMessages { if (((mutable_bitField0_ & 0x00000008) == 0x00000008)) { members_ = java.util.Collections.unmodifiableList(members_); } + if (((mutable_bitField0_ & 0x00000040) == 0x00000040)) { + tombstones_ = java.util.Collections.unmodifiableList(tombstones_); + } this.unknownFields = unknownFields.build(); makeExtensionsImmutable(); } @@ -3917,6 +3953,42 @@ public final class ClusterMessages { return version_; } + // repeated .Tombstone tombstones = 7; + public static final int TOMBSTONES_FIELD_NUMBER = 7; + private java.util.List tombstones_; + /** + * repeated .Tombstone tombstones = 7; + */ + public java.util.List getTombstonesList() { + return tombstones_; + } + /** + * repeated .Tombstone tombstones = 7; + */ + public java.util.List + getTombstonesOrBuilderList() { + return tombstones_; + } + /** + * repeated .Tombstone tombstones = 7; + */ + public int getTombstonesCount() { + return tombstones_.size(); + } + /** + * repeated .Tombstone tombstones = 7; + */ + public akka.cluster.protobuf.msg.ClusterMessages.Tombstone getTombstones(int index) { + return tombstones_.get(index); + } + /** + * repeated .Tombstone tombstones = 7; + */ + public akka.cluster.protobuf.msg.ClusterMessages.TombstoneOrBuilder getTombstonesOrBuilder( + int index) { + return tombstones_.get(index); + } + private void initFields() { allAddresses_ = java.util.Collections.emptyList(); allRoles_ = akka.protobuf.LazyStringArrayList.EMPTY; @@ -3924,6 +3996,7 @@ public final class ClusterMessages { members_ = java.util.Collections.emptyList(); overview_ = akka.cluster.protobuf.msg.ClusterMessages.GossipOverview.getDefaultInstance(); version_ = akka.cluster.protobuf.msg.ClusterMessages.VectorClock.getDefaultInstance(); + tombstones_ = java.util.Collections.emptyList(); } private byte memoizedIsInitialized = -1; public final boolean isInitialized() { @@ -3958,6 +4031,12 @@ public final class ClusterMessages { memoizedIsInitialized = 0; return false; } + for (int i = 0; i < getTombstonesCount(); i++) { + if (!getTombstones(i).isInitialized()) { + memoizedIsInitialized = 0; + return false; + } + } memoizedIsInitialized = 1; return true; } @@ -3983,6 +4062,9 @@ public final class ClusterMessages { if (((bitField0_ & 0x00000002) == 0x00000002)) { output.writeMessage(6, version_); } + for (int i = 0; i < tombstones_.size(); i++) { + output.writeMessage(7, tombstones_.get(i)); + } getUnknownFields().writeTo(output); } @@ -4026,6 +4108,10 @@ public final class ClusterMessages { size += akka.protobuf.CodedOutputStream .computeMessageSize(6, version_); } + for (int i = 0; i < tombstones_.size(); i++) { + size += akka.protobuf.CodedOutputStream + .computeMessageSize(7, tombstones_.get(i)); + } size += getUnknownFields().getSerializedSize(); memoizedSerializedSize = size; return size; @@ -4143,6 +4229,7 @@ public final class ClusterMessages { getMembersFieldBuilder(); getOverviewFieldBuilder(); getVersionFieldBuilder(); + getTombstonesFieldBuilder(); } } private static Builder create() { @@ -4179,6 +4266,12 @@ public final class ClusterMessages { versionBuilder_.clear(); } bitField0_ = (bitField0_ & ~0x00000020); + if (tombstonesBuilder_ == null) { + tombstones_ = java.util.Collections.emptyList(); + bitField0_ = (bitField0_ & ~0x00000040); + } else { + tombstonesBuilder_.clear(); + } return this; } @@ -4253,6 +4346,15 @@ public final class ClusterMessages { } else { result.version_ = versionBuilder_.build(); } + if (tombstonesBuilder_ == null) { + if (((bitField0_ & 0x00000040) == 0x00000040)) { + tombstones_ = java.util.Collections.unmodifiableList(tombstones_); + bitField0_ = (bitField0_ & ~0x00000040); + } + result.tombstones_ = tombstones_; + } else { + result.tombstones_ = tombstonesBuilder_.build(); + } result.bitField0_ = to_bitField0_; onBuilt(); return result; @@ -4347,6 +4449,32 @@ public final class ClusterMessages { if (other.hasVersion()) { mergeVersion(other.getVersion()); } + if (tombstonesBuilder_ == null) { + if (!other.tombstones_.isEmpty()) { + if (tombstones_.isEmpty()) { + tombstones_ = other.tombstones_; + bitField0_ = (bitField0_ & ~0x00000040); + } else { + ensureTombstonesIsMutable(); + tombstones_.addAll(other.tombstones_); + } + onChanged(); + } + } else { + if (!other.tombstones_.isEmpty()) { + if (tombstonesBuilder_.isEmpty()) { + tombstonesBuilder_.dispose(); + tombstonesBuilder_ = null; + tombstones_ = other.tombstones_; + bitField0_ = (bitField0_ & ~0x00000040); + tombstonesBuilder_ = + akka.protobuf.GeneratedMessage.alwaysUseFieldBuilders ? + getTombstonesFieldBuilder() : null; + } else { + tombstonesBuilder_.addAllMessages(other.tombstones_); + } + } + } this.mergeUnknownFields(other.getUnknownFields()); return this; } @@ -4380,6 +4508,12 @@ public final class ClusterMessages { return false; } + for (int i = 0; i < getTombstonesCount(); i++) { + if (!getTombstones(i).isInitialized()) { + + return false; + } + } return true; } @@ -5302,6 +5436,246 @@ public final class ClusterMessages { return versionBuilder_; } + // repeated .Tombstone tombstones = 7; + private java.util.List tombstones_ = + java.util.Collections.emptyList(); + private void ensureTombstonesIsMutable() { + if (!((bitField0_ & 0x00000040) == 0x00000040)) { + tombstones_ = new java.util.ArrayList(tombstones_); + bitField0_ |= 0x00000040; + } + } + + private akka.protobuf.RepeatedFieldBuilder< + akka.cluster.protobuf.msg.ClusterMessages.Tombstone, akka.cluster.protobuf.msg.ClusterMessages.Tombstone.Builder, akka.cluster.protobuf.msg.ClusterMessages.TombstoneOrBuilder> tombstonesBuilder_; + + /** + * repeated .Tombstone tombstones = 7; + */ + public java.util.List getTombstonesList() { + if (tombstonesBuilder_ == null) { + return java.util.Collections.unmodifiableList(tombstones_); + } else { + return tombstonesBuilder_.getMessageList(); + } + } + /** + * repeated .Tombstone tombstones = 7; + */ + public int getTombstonesCount() { + if (tombstonesBuilder_ == null) { + return tombstones_.size(); + } else { + return tombstonesBuilder_.getCount(); + } + } + /** + * repeated .Tombstone tombstones = 7; + */ + public akka.cluster.protobuf.msg.ClusterMessages.Tombstone getTombstones(int index) { + if (tombstonesBuilder_ == null) { + return tombstones_.get(index); + } else { + return tombstonesBuilder_.getMessage(index); + } + } + /** + * repeated .Tombstone tombstones = 7; + */ + public Builder setTombstones( + int index, akka.cluster.protobuf.msg.ClusterMessages.Tombstone value) { + if (tombstonesBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + ensureTombstonesIsMutable(); + tombstones_.set(index, value); + onChanged(); + } else { + tombstonesBuilder_.setMessage(index, value); + } + return this; + } + /** + * repeated .Tombstone tombstones = 7; + */ + public Builder setTombstones( + int index, akka.cluster.protobuf.msg.ClusterMessages.Tombstone.Builder builderForValue) { + if (tombstonesBuilder_ == null) { + ensureTombstonesIsMutable(); + tombstones_.set(index, builderForValue.build()); + onChanged(); + } else { + tombstonesBuilder_.setMessage(index, builderForValue.build()); + } + return this; + } + /** + * repeated .Tombstone tombstones = 7; + */ + public Builder addTombstones(akka.cluster.protobuf.msg.ClusterMessages.Tombstone value) { + if (tombstonesBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + ensureTombstonesIsMutable(); + tombstones_.add(value); + onChanged(); + } else { + tombstonesBuilder_.addMessage(value); + } + return this; + } + /** + * repeated .Tombstone tombstones = 7; + */ + public Builder addTombstones( + int index, akka.cluster.protobuf.msg.ClusterMessages.Tombstone value) { + if (tombstonesBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + ensureTombstonesIsMutable(); + tombstones_.add(index, value); + onChanged(); + } else { + tombstonesBuilder_.addMessage(index, value); + } + return this; + } + /** + * repeated .Tombstone tombstones = 7; + */ + public Builder addTombstones( + akka.cluster.protobuf.msg.ClusterMessages.Tombstone.Builder builderForValue) { + if (tombstonesBuilder_ == null) { + ensureTombstonesIsMutable(); + tombstones_.add(builderForValue.build()); + onChanged(); + } else { + tombstonesBuilder_.addMessage(builderForValue.build()); + } + return this; + } + /** + * repeated .Tombstone tombstones = 7; + */ + public Builder addTombstones( + int index, akka.cluster.protobuf.msg.ClusterMessages.Tombstone.Builder builderForValue) { + if (tombstonesBuilder_ == null) { + ensureTombstonesIsMutable(); + tombstones_.add(index, builderForValue.build()); + onChanged(); + } else { + tombstonesBuilder_.addMessage(index, builderForValue.build()); + } + return this; + } + /** + * repeated .Tombstone tombstones = 7; + */ + public Builder addAllTombstones( + java.lang.Iterable values) { + if (tombstonesBuilder_ == null) { + ensureTombstonesIsMutable(); + super.addAll(values, tombstones_); + onChanged(); + } else { + tombstonesBuilder_.addAllMessages(values); + } + return this; + } + /** + * repeated .Tombstone tombstones = 7; + */ + public Builder clearTombstones() { + if (tombstonesBuilder_ == null) { + tombstones_ = java.util.Collections.emptyList(); + bitField0_ = (bitField0_ & ~0x00000040); + onChanged(); + } else { + tombstonesBuilder_.clear(); + } + return this; + } + /** + * repeated .Tombstone tombstones = 7; + */ + public Builder removeTombstones(int index) { + if (tombstonesBuilder_ == null) { + ensureTombstonesIsMutable(); + tombstones_.remove(index); + onChanged(); + } else { + tombstonesBuilder_.remove(index); + } + return this; + } + /** + * repeated .Tombstone tombstones = 7; + */ + public akka.cluster.protobuf.msg.ClusterMessages.Tombstone.Builder getTombstonesBuilder( + int index) { + return getTombstonesFieldBuilder().getBuilder(index); + } + /** + * repeated .Tombstone tombstones = 7; + */ + public akka.cluster.protobuf.msg.ClusterMessages.TombstoneOrBuilder getTombstonesOrBuilder( + int index) { + if (tombstonesBuilder_ == null) { + return tombstones_.get(index); } else { + return tombstonesBuilder_.getMessageOrBuilder(index); + } + } + /** + * repeated .Tombstone tombstones = 7; + */ + public java.util.List + getTombstonesOrBuilderList() { + if (tombstonesBuilder_ != null) { + return tombstonesBuilder_.getMessageOrBuilderList(); + } else { + return java.util.Collections.unmodifiableList(tombstones_); + } + } + /** + * repeated .Tombstone tombstones = 7; + */ + public akka.cluster.protobuf.msg.ClusterMessages.Tombstone.Builder addTombstonesBuilder() { + return getTombstonesFieldBuilder().addBuilder( + akka.cluster.protobuf.msg.ClusterMessages.Tombstone.getDefaultInstance()); + } + /** + * repeated .Tombstone tombstones = 7; + */ + public akka.cluster.protobuf.msg.ClusterMessages.Tombstone.Builder addTombstonesBuilder( + int index) { + return getTombstonesFieldBuilder().addBuilder( + index, akka.cluster.protobuf.msg.ClusterMessages.Tombstone.getDefaultInstance()); + } + /** + * repeated .Tombstone tombstones = 7; + */ + public java.util.List + getTombstonesBuilderList() { + return getTombstonesFieldBuilder().getBuilderList(); + } + private akka.protobuf.RepeatedFieldBuilder< + akka.cluster.protobuf.msg.ClusterMessages.Tombstone, akka.cluster.protobuf.msg.ClusterMessages.Tombstone.Builder, akka.cluster.protobuf.msg.ClusterMessages.TombstoneOrBuilder> + getTombstonesFieldBuilder() { + if (tombstonesBuilder_ == null) { + tombstonesBuilder_ = new akka.protobuf.RepeatedFieldBuilder< + akka.cluster.protobuf.msg.ClusterMessages.Tombstone, akka.cluster.protobuf.msg.ClusterMessages.Tombstone.Builder, akka.cluster.protobuf.msg.ClusterMessages.TombstoneOrBuilder>( + tombstones_, + ((bitField0_ & 0x00000040) == 0x00000040), + getParentForChildren(), + isClean()); + tombstones_ = null; + } + return tombstonesBuilder_; + } + // @@protoc_insertion_point(builder_scope:Gossip) } @@ -7686,6 +8060,499 @@ public final class ClusterMessages { // @@protoc_insertion_point(class_scope:SubjectReachability) } + public interface TombstoneOrBuilder + extends akka.protobuf.MessageOrBuilder { + + // required int32 addressIndex = 1; + /** + * required int32 addressIndex = 1; + */ + boolean hasAddressIndex(); + /** + * required int32 addressIndex = 1; + */ + int getAddressIndex(); + + // required int64 timestamp = 2; + /** + * required int64 timestamp = 2; + */ + boolean hasTimestamp(); + /** + * required int64 timestamp = 2; + */ + long getTimestamp(); + } + /** + * Protobuf type {@code Tombstone} + */ + public static final class Tombstone extends + akka.protobuf.GeneratedMessage + implements TombstoneOrBuilder { + // Use Tombstone.newBuilder() to construct. + private Tombstone(akka.protobuf.GeneratedMessage.Builder builder) { + super(builder); + this.unknownFields = builder.getUnknownFields(); + } + private Tombstone(boolean noInit) { this.unknownFields = akka.protobuf.UnknownFieldSet.getDefaultInstance(); } + + private static final Tombstone defaultInstance; + public static Tombstone getDefaultInstance() { + return defaultInstance; + } + + public Tombstone getDefaultInstanceForType() { + return defaultInstance; + } + + private final akka.protobuf.UnknownFieldSet unknownFields; + @java.lang.Override + public final akka.protobuf.UnknownFieldSet + getUnknownFields() { + return this.unknownFields; + } + private Tombstone( + akka.protobuf.CodedInputStream input, + akka.protobuf.ExtensionRegistryLite extensionRegistry) + throws akka.protobuf.InvalidProtocolBufferException { + initFields(); + int mutable_bitField0_ = 0; + akka.protobuf.UnknownFieldSet.Builder unknownFields = + akka.protobuf.UnknownFieldSet.newBuilder(); + try { + boolean done = false; + while (!done) { + int tag = input.readTag(); + switch (tag) { + case 0: + done = true; + break; + default: { + if (!parseUnknownField(input, unknownFields, + extensionRegistry, tag)) { + done = true; + } + break; + } + case 8: { + bitField0_ |= 0x00000001; + addressIndex_ = input.readInt32(); + break; + } + case 16: { + bitField0_ |= 0x00000002; + timestamp_ = input.readInt64(); + break; + } + } + } + } catch (akka.protobuf.InvalidProtocolBufferException e) { + throw e.setUnfinishedMessage(this); + } catch (java.io.IOException e) { + throw new akka.protobuf.InvalidProtocolBufferException( + e.getMessage()).setUnfinishedMessage(this); + } finally { + this.unknownFields = unknownFields.build(); + makeExtensionsImmutable(); + } + } + public static final akka.protobuf.Descriptors.Descriptor + getDescriptor() { + return akka.cluster.protobuf.msg.ClusterMessages.internal_static_Tombstone_descriptor; + } + + protected akka.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return akka.cluster.protobuf.msg.ClusterMessages.internal_static_Tombstone_fieldAccessorTable + .ensureFieldAccessorsInitialized( + akka.cluster.protobuf.msg.ClusterMessages.Tombstone.class, akka.cluster.protobuf.msg.ClusterMessages.Tombstone.Builder.class); + } + + public static akka.protobuf.Parser PARSER = + new akka.protobuf.AbstractParser() { + public Tombstone parsePartialFrom( + akka.protobuf.CodedInputStream input, + akka.protobuf.ExtensionRegistryLite extensionRegistry) + throws akka.protobuf.InvalidProtocolBufferException { + return new Tombstone(input, extensionRegistry); + } + }; + + @java.lang.Override + public akka.protobuf.Parser getParserForType() { + return PARSER; + } + + private int bitField0_; + // required int32 addressIndex = 1; + public static final int ADDRESSINDEX_FIELD_NUMBER = 1; + private int addressIndex_; + /** + * required int32 addressIndex = 1; + */ + public boolean hasAddressIndex() { + return ((bitField0_ & 0x00000001) == 0x00000001); + } + /** + * required int32 addressIndex = 1; + */ + public int getAddressIndex() { + return addressIndex_; + } + + // required int64 timestamp = 2; + public static final int TIMESTAMP_FIELD_NUMBER = 2; + private long timestamp_; + /** + * required int64 timestamp = 2; + */ + public boolean hasTimestamp() { + return ((bitField0_ & 0x00000002) == 0x00000002); + } + /** + * required int64 timestamp = 2; + */ + public long getTimestamp() { + return timestamp_; + } + + private void initFields() { + addressIndex_ = 0; + timestamp_ = 0L; + } + private byte memoizedIsInitialized = -1; + public final boolean isInitialized() { + byte isInitialized = memoizedIsInitialized; + if (isInitialized != -1) return isInitialized == 1; + + if (!hasAddressIndex()) { + memoizedIsInitialized = 0; + return false; + } + if (!hasTimestamp()) { + memoizedIsInitialized = 0; + return false; + } + memoizedIsInitialized = 1; + return true; + } + + public void writeTo(akka.protobuf.CodedOutputStream output) + throws java.io.IOException { + getSerializedSize(); + if (((bitField0_ & 0x00000001) == 0x00000001)) { + output.writeInt32(1, addressIndex_); + } + if (((bitField0_ & 0x00000002) == 0x00000002)) { + output.writeInt64(2, timestamp_); + } + getUnknownFields().writeTo(output); + } + + private int memoizedSerializedSize = -1; + public int getSerializedSize() { + int size = memoizedSerializedSize; + if (size != -1) return size; + + size = 0; + if (((bitField0_ & 0x00000001) == 0x00000001)) { + size += akka.protobuf.CodedOutputStream + .computeInt32Size(1, addressIndex_); + } + if (((bitField0_ & 0x00000002) == 0x00000002)) { + size += akka.protobuf.CodedOutputStream + .computeInt64Size(2, timestamp_); + } + size += getUnknownFields().getSerializedSize(); + memoizedSerializedSize = size; + return size; + } + + private static final long serialVersionUID = 0L; + @java.lang.Override + protected java.lang.Object writeReplace() + throws java.io.ObjectStreamException { + return super.writeReplace(); + } + + public static akka.cluster.protobuf.msg.ClusterMessages.Tombstone parseFrom( + akka.protobuf.ByteString data) + throws akka.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + public static akka.cluster.protobuf.msg.ClusterMessages.Tombstone parseFrom( + akka.protobuf.ByteString data, + akka.protobuf.ExtensionRegistryLite extensionRegistry) + throws akka.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + public static akka.cluster.protobuf.msg.ClusterMessages.Tombstone parseFrom(byte[] data) + throws akka.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + public static akka.cluster.protobuf.msg.ClusterMessages.Tombstone parseFrom( + byte[] data, + akka.protobuf.ExtensionRegistryLite extensionRegistry) + throws akka.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + public static akka.cluster.protobuf.msg.ClusterMessages.Tombstone parseFrom(java.io.InputStream input) + throws java.io.IOException { + return PARSER.parseFrom(input); + } + public static akka.cluster.protobuf.msg.ClusterMessages.Tombstone parseFrom( + java.io.InputStream input, + akka.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return PARSER.parseFrom(input, extensionRegistry); + } + public static akka.cluster.protobuf.msg.ClusterMessages.Tombstone parseDelimitedFrom(java.io.InputStream input) + throws java.io.IOException { + return PARSER.parseDelimitedFrom(input); + } + public static akka.cluster.protobuf.msg.ClusterMessages.Tombstone parseDelimitedFrom( + java.io.InputStream input, + akka.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return PARSER.parseDelimitedFrom(input, extensionRegistry); + } + public static akka.cluster.protobuf.msg.ClusterMessages.Tombstone parseFrom( + akka.protobuf.CodedInputStream input) + throws java.io.IOException { + return PARSER.parseFrom(input); + } + public static akka.cluster.protobuf.msg.ClusterMessages.Tombstone parseFrom( + akka.protobuf.CodedInputStream input, + akka.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return PARSER.parseFrom(input, extensionRegistry); + } + + public static Builder newBuilder() { return Builder.create(); } + public Builder newBuilderForType() { return newBuilder(); } + public static Builder newBuilder(akka.cluster.protobuf.msg.ClusterMessages.Tombstone prototype) { + return newBuilder().mergeFrom(prototype); + } + public Builder toBuilder() { return newBuilder(this); } + + @java.lang.Override + protected Builder newBuilderForType( + akka.protobuf.GeneratedMessage.BuilderParent parent) { + Builder builder = new Builder(parent); + return builder; + } + /** + * Protobuf type {@code Tombstone} + */ + public static final class Builder extends + akka.protobuf.GeneratedMessage.Builder + implements akka.cluster.protobuf.msg.ClusterMessages.TombstoneOrBuilder { + public static final akka.protobuf.Descriptors.Descriptor + getDescriptor() { + return akka.cluster.protobuf.msg.ClusterMessages.internal_static_Tombstone_descriptor; + } + + protected akka.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return akka.cluster.protobuf.msg.ClusterMessages.internal_static_Tombstone_fieldAccessorTable + .ensureFieldAccessorsInitialized( + akka.cluster.protobuf.msg.ClusterMessages.Tombstone.class, akka.cluster.protobuf.msg.ClusterMessages.Tombstone.Builder.class); + } + + // Construct using akka.cluster.protobuf.msg.ClusterMessages.Tombstone.newBuilder() + private Builder() { + maybeForceBuilderInitialization(); + } + + private Builder( + akka.protobuf.GeneratedMessage.BuilderParent parent) { + super(parent); + maybeForceBuilderInitialization(); + } + private void maybeForceBuilderInitialization() { + if (akka.protobuf.GeneratedMessage.alwaysUseFieldBuilders) { + } + } + private static Builder create() { + return new Builder(); + } + + public Builder clear() { + super.clear(); + addressIndex_ = 0; + bitField0_ = (bitField0_ & ~0x00000001); + timestamp_ = 0L; + bitField0_ = (bitField0_ & ~0x00000002); + return this; + } + + public Builder clone() { + return create().mergeFrom(buildPartial()); + } + + public akka.protobuf.Descriptors.Descriptor + getDescriptorForType() { + return akka.cluster.protobuf.msg.ClusterMessages.internal_static_Tombstone_descriptor; + } + + public akka.cluster.protobuf.msg.ClusterMessages.Tombstone getDefaultInstanceForType() { + return akka.cluster.protobuf.msg.ClusterMessages.Tombstone.getDefaultInstance(); + } + + public akka.cluster.protobuf.msg.ClusterMessages.Tombstone build() { + akka.cluster.protobuf.msg.ClusterMessages.Tombstone result = buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException(result); + } + return result; + } + + public akka.cluster.protobuf.msg.ClusterMessages.Tombstone buildPartial() { + akka.cluster.protobuf.msg.ClusterMessages.Tombstone result = new akka.cluster.protobuf.msg.ClusterMessages.Tombstone(this); + int from_bitField0_ = bitField0_; + int to_bitField0_ = 0; + if (((from_bitField0_ & 0x00000001) == 0x00000001)) { + to_bitField0_ |= 0x00000001; + } + result.addressIndex_ = addressIndex_; + if (((from_bitField0_ & 0x00000002) == 0x00000002)) { + to_bitField0_ |= 0x00000002; + } + result.timestamp_ = timestamp_; + result.bitField0_ = to_bitField0_; + onBuilt(); + return result; + } + + public Builder mergeFrom(akka.protobuf.Message other) { + if (other instanceof akka.cluster.protobuf.msg.ClusterMessages.Tombstone) { + return mergeFrom((akka.cluster.protobuf.msg.ClusterMessages.Tombstone)other); + } else { + super.mergeFrom(other); + return this; + } + } + + public Builder mergeFrom(akka.cluster.protobuf.msg.ClusterMessages.Tombstone other) { + if (other == akka.cluster.protobuf.msg.ClusterMessages.Tombstone.getDefaultInstance()) return this; + if (other.hasAddressIndex()) { + setAddressIndex(other.getAddressIndex()); + } + if (other.hasTimestamp()) { + setTimestamp(other.getTimestamp()); + } + this.mergeUnknownFields(other.getUnknownFields()); + return this; + } + + public final boolean isInitialized() { + if (!hasAddressIndex()) { + + return false; + } + if (!hasTimestamp()) { + + return false; + } + return true; + } + + public Builder mergeFrom( + akka.protobuf.CodedInputStream input, + akka.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + akka.cluster.protobuf.msg.ClusterMessages.Tombstone parsedMessage = null; + try { + parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry); + } catch (akka.protobuf.InvalidProtocolBufferException e) { + parsedMessage = (akka.cluster.protobuf.msg.ClusterMessages.Tombstone) e.getUnfinishedMessage(); + throw e; + } finally { + if (parsedMessage != null) { + mergeFrom(parsedMessage); + } + } + return this; + } + private int bitField0_; + + // required int32 addressIndex = 1; + private int addressIndex_ ; + /** + * required int32 addressIndex = 1; + */ + public boolean hasAddressIndex() { + return ((bitField0_ & 0x00000001) == 0x00000001); + } + /** + * required int32 addressIndex = 1; + */ + public int getAddressIndex() { + return addressIndex_; + } + /** + * required int32 addressIndex = 1; + */ + public Builder setAddressIndex(int value) { + bitField0_ |= 0x00000001; + addressIndex_ = value; + onChanged(); + return this; + } + /** + * required int32 addressIndex = 1; + */ + public Builder clearAddressIndex() { + bitField0_ = (bitField0_ & ~0x00000001); + addressIndex_ = 0; + onChanged(); + return this; + } + + // required int64 timestamp = 2; + private long timestamp_ ; + /** + * required int64 timestamp = 2; + */ + public boolean hasTimestamp() { + return ((bitField0_ & 0x00000002) == 0x00000002); + } + /** + * required int64 timestamp = 2; + */ + public long getTimestamp() { + return timestamp_; + } + /** + * required int64 timestamp = 2; + */ + public Builder setTimestamp(long value) { + bitField0_ |= 0x00000002; + timestamp_ = value; + onChanged(); + return this; + } + /** + * required int64 timestamp = 2; + */ + public Builder clearTimestamp() { + bitField0_ = (bitField0_ & ~0x00000002); + timestamp_ = 0L; + onChanged(); + return this; + } + + // @@protoc_insertion_point(builder_scope:Tombstone) + } + + static { + defaultInstance = new Tombstone(true); + defaultInstance.initFields(); + } + + // @@protoc_insertion_point(class_scope:Tombstone) + } + public interface MemberOrBuilder extends akka.protobuf.MessageOrBuilder { @@ -13880,6 +14747,11 @@ public final class ClusterMessages { private static akka.protobuf.GeneratedMessage.FieldAccessorTable internal_static_SubjectReachability_fieldAccessorTable; + private static akka.protobuf.Descriptors.Descriptor + internal_static_Tombstone_descriptor; + private static + akka.protobuf.GeneratedMessage.FieldAccessorTable + internal_static_Tombstone_fieldAccessorTable; private static akka.protobuf.Descriptors.Descriptor internal_static_Member_descriptor; private static @@ -13942,40 +14814,43 @@ public final class ClusterMessages { "(\0132\016.UniqueAddress\022\030\n\020serializedGossip\030\003" + " \002(\014\"^\n\014GossipStatus\022\034\n\004from\030\001 \002(\0132\016.Uni" + "queAddress\022\021\n\tallHashes\030\002 \003(\t\022\035\n\007version" + - "\030\003 \002(\0132\014.VectorClock\"\257\001\n\006Gossip\022$\n\014allAd" + + "\030\003 \002(\0132\014.VectorClock\"\317\001\n\006Gossip\022$\n\014allAd" + "dresses\030\001 \003(\0132\016.UniqueAddress\022\020\n\010allRole", "s\030\002 \003(\t\022\021\n\tallHashes\030\003 \003(\t\022\030\n\007members\030\004 " + "\003(\0132\007.Member\022!\n\010overview\030\005 \002(\0132\017.GossipO" + - "verview\022\035\n\007version\030\006 \002(\0132\014.VectorClock\"S" + - "\n\016GossipOverview\022\014\n\004seen\030\001 \003(\005\0223\n\024observ" + - "erReachability\030\002 \003(\0132\025.ObserverReachabil" + - "ity\"p\n\024ObserverReachability\022\024\n\014addressIn" + - "dex\030\001 \002(\005\022\017\n\007version\030\004 \002(\003\0221\n\023subjectRea" + - "chability\030\002 \003(\0132\024.SubjectReachability\"a\n" + - "\023SubjectReachability\022\024\n\014addressIndex\030\001 \002" + - "(\005\022#\n\006status\030\003 \002(\0162\023.ReachabilityStatus\022", - "\017\n\007version\030\004 \002(\003\"i\n\006Member\022\024\n\014addressInd" + - "ex\030\001 \002(\005\022\020\n\010upNumber\030\002 \002(\005\022\035\n\006status\030\003 \002" + - "(\0162\r.MemberStatus\022\030\n\014rolesIndexes\030\004 \003(\005B" + - "\002\020\001\"y\n\013VectorClock\022\021\n\ttimestamp\030\001 \001(\003\022&\n" + - "\010versions\030\002 \003(\0132\024.VectorClock.Version\032/\n" + - "\007Version\022\021\n\thashIndex\030\001 \002(\005\022\021\n\ttimestamp" + - "\030\002 \002(\003\"\007\n\005Empty\"K\n\007Address\022\016\n\006system\030\001 \002" + - "(\t\022\020\n\010hostname\030\002 \002(\t\022\014\n\004port\030\003 \002(\r\022\020\n\010pr" + - "otocol\030\004 \001(\t\"E\n\rUniqueAddress\022\031\n\007address" + - "\030\001 \002(\0132\010.Address\022\013\n\003uid\030\002 \002(\r\022\014\n\004uid2\030\003 ", - "\001(\r\"V\n\021ClusterRouterPool\022\023\n\004pool\030\001 \002(\0132\005" + - ".Pool\022,\n\010settings\030\002 \002(\0132\032.ClusterRouterP" + - "oolSettings\"<\n\004Pool\022\024\n\014serializerId\030\001 \002(" + - "\r\022\020\n\010manifest\030\002 \002(\t\022\014\n\004data\030\003 \002(\014\"|\n\031Clu" + - "sterRouterPoolSettings\022\026\n\016totalInstances" + - "\030\001 \002(\r\022\033\n\023maxInstancesPerNode\030\002 \002(\r\022\031\n\021a" + - "llowLocalRoutees\030\003 \002(\010\022\017\n\007useRole\030\004 \001(\t*" + - "D\n\022ReachabilityStatus\022\r\n\tReachable\020\000\022\017\n\013" + - "Unreachable\020\001\022\016\n\nTerminated\020\002*b\n\014MemberS" + - "tatus\022\013\n\007Joining\020\000\022\006\n\002Up\020\001\022\013\n\007Leaving\020\002\022", - "\013\n\007Exiting\020\003\022\010\n\004Down\020\004\022\013\n\007Removed\020\005\022\014\n\010W" + - "eaklyUp\020\006B\035\n\031akka.cluster.protobuf.msgH\001" + "verview\022\035\n\007version\030\006 \002(\0132\014.VectorClock\022\036" + + "\n\ntombstones\030\007 \003(\0132\n.Tombstone\"S\n\016Gossip" + + "Overview\022\014\n\004seen\030\001 \003(\005\0223\n\024observerReacha" + + "bility\030\002 \003(\0132\025.ObserverReachability\"p\n\024O" + + "bserverReachability\022\024\n\014addressIndex\030\001 \002(" + + "\005\022\017\n\007version\030\004 \002(\003\0221\n\023subjectReachabilit" + + "y\030\002 \003(\0132\024.SubjectReachability\"a\n\023Subject" + + "Reachability\022\024\n\014addressIndex\030\001 \002(\005\022#\n\006st", + "atus\030\003 \002(\0162\023.ReachabilityStatus\022\017\n\007versi" + + "on\030\004 \002(\003\"4\n\tTombstone\022\024\n\014addressIndex\030\001 " + + "\002(\005\022\021\n\ttimestamp\030\002 \002(\003\"i\n\006Member\022\024\n\014addr" + + "essIndex\030\001 \002(\005\022\020\n\010upNumber\030\002 \002(\005\022\035\n\006stat" + + "us\030\003 \002(\0162\r.MemberStatus\022\030\n\014rolesIndexes\030" + + "\004 \003(\005B\002\020\001\"y\n\013VectorClock\022\021\n\ttimestamp\030\001 " + + "\001(\003\022&\n\010versions\030\002 \003(\0132\024.VectorClock.Vers" + + "ion\032/\n\007Version\022\021\n\thashIndex\030\001 \002(\005\022\021\n\ttim" + + "estamp\030\002 \002(\003\"\007\n\005Empty\"K\n\007Address\022\016\n\006syst" + + "em\030\001 \002(\t\022\020\n\010hostname\030\002 \002(\t\022\014\n\004port\030\003 \002(\r", + "\022\020\n\010protocol\030\004 \001(\t\"E\n\rUniqueAddress\022\031\n\007a" + + "ddress\030\001 \002(\0132\010.Address\022\013\n\003uid\030\002 \002(\r\022\014\n\004u" + + "id2\030\003 \001(\r\"V\n\021ClusterRouterPool\022\023\n\004pool\030\001" + + " \002(\0132\005.Pool\022,\n\010settings\030\002 \002(\0132\032.ClusterR" + + "outerPoolSettings\"<\n\004Pool\022\024\n\014serializerI" + + "d\030\001 \002(\r\022\020\n\010manifest\030\002 \002(\t\022\014\n\004data\030\003 \002(\014\"" + + "|\n\031ClusterRouterPoolSettings\022\026\n\016totalIns" + + "tances\030\001 \002(\r\022\033\n\023maxInstancesPerNode\030\002 \002(" + + "\r\022\031\n\021allowLocalRoutees\030\003 \002(\010\022\017\n\007useRole\030" + + "\004 \001(\t*D\n\022ReachabilityStatus\022\r\n\tReachable", + "\020\000\022\017\n\013Unreachable\020\001\022\016\n\nTerminated\020\002*b\n\014M" + + "emberStatus\022\013\n\007Joining\020\000\022\006\n\002Up\020\001\022\013\n\007Leav" + + "ing\020\002\022\013\n\007Exiting\020\003\022\010\n\004Down\020\004\022\013\n\007Removed\020" + + "\005\022\014\n\010WeaklyUp\020\006B\035\n\031akka.cluster.protobuf" + + ".msgH\001" }; akka.protobuf.Descriptors.FileDescriptor.InternalDescriptorAssigner assigner = new akka.protobuf.Descriptors.FileDescriptor.InternalDescriptorAssigner() { @@ -14011,7 +14886,7 @@ public final class ClusterMessages { internal_static_Gossip_fieldAccessorTable = new akka.protobuf.GeneratedMessage.FieldAccessorTable( internal_static_Gossip_descriptor, - new java.lang.String[] { "AllAddresses", "AllRoles", "AllHashes", "Members", "Overview", "Version", }); + new java.lang.String[] { "AllAddresses", "AllRoles", "AllHashes", "Members", "Overview", "Version", "Tombstones", }); internal_static_GossipOverview_descriptor = getDescriptor().getMessageTypes().get(5); internal_static_GossipOverview_fieldAccessorTable = new @@ -14030,14 +14905,20 @@ public final class ClusterMessages { akka.protobuf.GeneratedMessage.FieldAccessorTable( internal_static_SubjectReachability_descriptor, new java.lang.String[] { "AddressIndex", "Status", "Version", }); - internal_static_Member_descriptor = + internal_static_Tombstone_descriptor = getDescriptor().getMessageTypes().get(8); + internal_static_Tombstone_fieldAccessorTable = new + akka.protobuf.GeneratedMessage.FieldAccessorTable( + internal_static_Tombstone_descriptor, + new java.lang.String[] { "AddressIndex", "Timestamp", }); + internal_static_Member_descriptor = + getDescriptor().getMessageTypes().get(9); internal_static_Member_fieldAccessorTable = new akka.protobuf.GeneratedMessage.FieldAccessorTable( internal_static_Member_descriptor, new java.lang.String[] { "AddressIndex", "UpNumber", "Status", "RolesIndexes", }); internal_static_VectorClock_descriptor = - getDescriptor().getMessageTypes().get(9); + getDescriptor().getMessageTypes().get(10); internal_static_VectorClock_fieldAccessorTable = new akka.protobuf.GeneratedMessage.FieldAccessorTable( internal_static_VectorClock_descriptor, @@ -14049,37 +14930,37 @@ public final class ClusterMessages { internal_static_VectorClock_Version_descriptor, new java.lang.String[] { "HashIndex", "Timestamp", }); internal_static_Empty_descriptor = - getDescriptor().getMessageTypes().get(10); + getDescriptor().getMessageTypes().get(11); internal_static_Empty_fieldAccessorTable = new akka.protobuf.GeneratedMessage.FieldAccessorTable( internal_static_Empty_descriptor, new java.lang.String[] { }); internal_static_Address_descriptor = - getDescriptor().getMessageTypes().get(11); + getDescriptor().getMessageTypes().get(12); internal_static_Address_fieldAccessorTable = new akka.protobuf.GeneratedMessage.FieldAccessorTable( internal_static_Address_descriptor, new java.lang.String[] { "System", "Hostname", "Port", "Protocol", }); internal_static_UniqueAddress_descriptor = - getDescriptor().getMessageTypes().get(12); + getDescriptor().getMessageTypes().get(13); internal_static_UniqueAddress_fieldAccessorTable = new akka.protobuf.GeneratedMessage.FieldAccessorTable( internal_static_UniqueAddress_descriptor, new java.lang.String[] { "Address", "Uid", "Uid2", }); internal_static_ClusterRouterPool_descriptor = - getDescriptor().getMessageTypes().get(13); + getDescriptor().getMessageTypes().get(14); internal_static_ClusterRouterPool_fieldAccessorTable = new akka.protobuf.GeneratedMessage.FieldAccessorTable( internal_static_ClusterRouterPool_descriptor, new java.lang.String[] { "Pool", "Settings", }); internal_static_Pool_descriptor = - getDescriptor().getMessageTypes().get(14); + getDescriptor().getMessageTypes().get(15); internal_static_Pool_fieldAccessorTable = new akka.protobuf.GeneratedMessage.FieldAccessorTable( internal_static_Pool_descriptor, new java.lang.String[] { "SerializerId", "Manifest", "Data", }); internal_static_ClusterRouterPoolSettings_descriptor = - getDescriptor().getMessageTypes().get(15); + getDescriptor().getMessageTypes().get(16); internal_static_ClusterRouterPoolSettings_fieldAccessorTable = new akka.protobuf.GeneratedMessage.FieldAccessorTable( internal_static_ClusterRouterPoolSettings_descriptor, diff --git a/akka-cluster/src/main/protobuf/ClusterMessages.proto b/akka-cluster/src/main/protobuf/ClusterMessages.proto index fa4358dfac..9ff9926337 100644 --- a/akka-cluster/src/main/protobuf/ClusterMessages.proto +++ b/akka-cluster/src/main/protobuf/ClusterMessages.proto @@ -101,6 +101,7 @@ message Gossip { repeated Member members = 4; required GossipOverview overview = 5; required VectorClock version = 6; + repeated Tombstone tombstones = 7; } /** @@ -127,6 +128,11 @@ message SubjectReachability { required int64 version = 4; } +message Tombstone { + required int32 addressIndex = 1; + required int64 timestamp = 2; +} + /** * Reachability status */ diff --git a/akka-cluster/src/main/resources/reference.conf b/akka-cluster/src/main/resources/reference.conf index f5b49f6c81..ad9e3414bd 100644 --- a/akka-cluster/src/main/resources/reference.conf +++ b/akka-cluster/src/main/resources/reference.conf @@ -74,6 +74,8 @@ akka { # The roles are part of the membership information and can be used by # routers or other services to distribute work to certain member types, # e.g. front-end and back-end nodes. + # Roles are not allowed to start with "team-" as that is reserved for the + # special role assigned from the team a node belongs to (see above) roles = [] # Run the coordinated shutdown from phase 'cluster-shutdown' when the cluster @@ -145,6 +147,11 @@ akka { # greater than this value. reduce-gossip-different-view-probability = 400 + # When a node is removed the removal is marked with a tombstone + # which is kept at least this long, after which it is pruned, if there is a partition + # longer than this it could lead to removed nodes being re-added to the cluster + prune-gossip-tombstones-after = 24h + # Settings for the Phi accrual failure detector (http://www.jaist.ac.jp/~defago/files/pdf/IS_RR_2004_010.pdf # [Hayashibara et al]) used by the cluster subsystem to detect unreachable # members. @@ -209,6 +216,9 @@ akka { debug { # log heartbeat events (very verbose, useful mostly when debugging heartbeating issues) verbose-heartbeat-logging = off + + # log verbose details about gossip + verbose-gossip-logging = off } } diff --git a/akka-cluster/src/main/scala/akka/cluster/Cluster.scala b/akka-cluster/src/main/scala/akka/cluster/Cluster.scala index 94605654bf..558f1420f8 100644 --- a/akka-cluster/src/main/scala/akka/cluster/Cluster.scala +++ b/akka-cluster/src/main/scala/akka/cluster/Cluster.scala @@ -420,13 +420,32 @@ class Cluster(val system: ExtendedActorSystem) extends Extension { private[cluster] object InfoLogger { def logInfo(message: String): Unit = - if (LogInfo) log.info("Cluster Node [{}] - {}", selfAddress, message) + if (LogInfo) + if (settings.Team == ClusterSettings.DefaultTeam) + log.info("Cluster Node [{}] - {}", selfAddress, message) + else + log.info("Cluster Node [{}] team [{}] - {}", selfAddress, settings.Team, message) def logInfo(template: String, arg1: Any): Unit = - if (LogInfo) log.info("Cluster Node [{}] - " + template, selfAddress, arg1) + if (LogInfo) + if (settings.Team == ClusterSettings.DefaultTeam) + log.info("Cluster Node [{}] - " + template, selfAddress, arg1) + else + log.info("Cluster Node [{}] team [{}] - " + template, selfAddress, settings.Team, arg1) def logInfo(template: String, arg1: Any, arg2: Any): Unit = - if (LogInfo) log.info("Cluster Node [{}] - " + template, selfAddress, arg1, arg2) + if (LogInfo) + if (settings.Team == ClusterSettings.DefaultTeam) + log.info("Cluster Node [{}] - " + template, selfAddress, arg1, arg2) + else + log.info("Cluster Node [{}] team [{}] - " + template, selfAddress, settings.Team, arg1, arg2) + + def logInfo(template: String, arg1: Any, arg2: Any, arg3: Any): Unit = + if (LogInfo) + if (settings.Team == ClusterSettings.DefaultTeam) + log.info("Cluster Node [{}] - " + template, selfAddress, arg1, arg2, arg3) + else + log.info("Cluster Node [{}] team [" + settings.Team + "] - " + template, selfAddress, arg1, arg2, arg3) } } diff --git a/akka-cluster/src/main/scala/akka/cluster/ClusterDaemon.scala b/akka-cluster/src/main/scala/akka/cluster/ClusterDaemon.scala index b24b1249c2..b0e1e38130 100644 --- a/akka-cluster/src/main/scala/akka/cluster/ClusterDaemon.scala +++ b/akka-cluster/src/main/scala/akka/cluster/ClusterDaemon.scala @@ -7,19 +7,24 @@ import language.existentials import scala.collection.immutable import scala.concurrent.duration._ import java.util.concurrent.ThreadLocalRandom + import scala.util.control.NonFatal import akka.actor._ import akka.actor.SupervisorStrategy.Stop import akka.cluster.MemberStatus._ import akka.cluster.ClusterEvent._ -import akka.dispatch.{ UnboundedMessageQueueSemantics, RequiresMessageQueue } +import akka.dispatch.{ RequiresMessageQueue, UnboundedMessageQueueSemantics } + import scala.collection.breakOut import akka.remote.QuarantinedEvent import java.util.ArrayList import java.util.Collections + import akka.pattern.ask import akka.util.Timeout import akka.Done +import akka.annotation.InternalApi + import scala.concurrent.Future import scala.concurrent.Promise @@ -266,9 +271,22 @@ private[cluster] final class ClusterCoreSupervisor extends Actor with ActorLoggi /** * INTERNAL API. */ +@InternalApi +private[cluster] object ClusterCoreDaemon { + def vclockName(node: UniqueAddress): String = s"${node.address}-${node.longUid}" + + val NumberOfGossipsBeforeShutdownWhenLeaderExits = 5 + val MaxGossipsBeforeShuttingDownMyself = 5 +} + +/** + * INTERNAL API. + */ +@InternalApi private[cluster] class ClusterCoreDaemon(publisher: ActorRef) extends Actor with ActorLogging with RequiresMessageQueue[UnboundedMessageQueueSemantics] { import InternalClusterAction._ + import ClusterCoreDaemon._ val cluster = Cluster(context.system) import cluster.{ selfAddress, selfRoles, scheduler, failureDetector } @@ -277,10 +295,6 @@ private[cluster] class ClusterCoreDaemon(publisher: ActorRef) extends Actor with protected def selfUniqueAddress = cluster.selfUniqueAddress - val NumberOfGossipsBeforeShutdownWhenLeaderExits = 5 - val MaxGossipsBeforeShuttingDownMyself = 5 - - def vclockName(node: UniqueAddress): String = s"${node.address}-${node.longUid}" val vclockNode = VectorClock.Node(vclockName(selfUniqueAddress)) // note that self is not initially member, @@ -316,6 +330,8 @@ private[cluster] class ClusterCoreDaemon(publisher: ActorRef) extends Actor with } var exitingConfirmed = Set.empty[UniqueAddress] + def selfTeam = cluster.settings.Team + /** * Looks up and returns the remote cluster command connection for the specific address. */ @@ -544,28 +560,28 @@ private[cluster] class ClusterCoreDaemon(publisher: ActorRef) extends Actor with * Received `Join` message and replies with `Welcome` message, containing * current gossip state, including the new joining member. */ - def joining(node: UniqueAddress, roles: Set[String]): Unit = { + def joining(joiningNode: UniqueAddress, roles: Set[String]): Unit = { val selfStatus = latestGossip.member(selfUniqueAddress).status - if (node.address.protocol != selfAddress.protocol) + if (joiningNode.address.protocol != selfAddress.protocol) log.warning( "Member with wrong protocol tried to join, but was ignored, expected [{}] but was [{}]", - selfAddress.protocol, node.address.protocol) - else if (node.address.system != selfAddress.system) + selfAddress.protocol, joiningNode.address.protocol) + else if (joiningNode.address.system != selfAddress.system) log.warning( "Member with wrong ActorSystem name tried to join, but was ignored, expected [{}] but was [{}]", - selfAddress.system, node.address.system) + selfAddress.system, joiningNode.address.system) else if (Gossip.removeUnreachableWithMemberStatus.contains(selfStatus)) - logInfo("Trying to join [{}] to [{}] member, ignoring. Use a member that is Up instead.", node, selfStatus) + logInfo("Trying to join [{}] to [{}] member, ignoring. Use a member that is Up instead.", joiningNode, selfStatus) else { val localMembers = latestGossip.members // check by address without uid to make sure that node with same host:port is not allowed // to join until previous node with that host:port has been removed from the cluster - localMembers.find(_.address == node.address) match { - case Some(m) if m.uniqueAddress == node ⇒ + localMembers.find(_.address == joiningNode.address) match { + case Some(m) if m.uniqueAddress == joiningNode ⇒ // node retried join attempt, probably due to lost Welcome message logInfo("Existing member [{}] is joining again.", m) - if (node != selfUniqueAddress) + if (joiningNode != selfUniqueAddress) sender() ! Welcome(selfUniqueAddress, latestGossip) case Some(m) ⇒ // node restarted, same host:port as existing member, but with different uid @@ -584,17 +600,17 @@ private[cluster] class ClusterCoreDaemon(publisher: ActorRef) extends Actor with } case None ⇒ // remove the node from the failure detector - failureDetector.remove(node.address) + failureDetector.remove(joiningNode.address) // add joining node as Joining // add self in case someone else joins before self has joined (Set discards duplicates) - val newMembers = localMembers + Member(node, roles) + Member(selfUniqueAddress, cluster.selfRoles) + val newMembers = localMembers + Member(joiningNode, roles) + Member(selfUniqueAddress, cluster.selfRoles) val newGossip = latestGossip copy (members = newMembers) updateLatestGossip(newGossip) - logInfo("Node [{}] is JOINING, roles [{}]", node.address, roles.mkString(", ")) - if (node == selfUniqueAddress) { + logInfo("Node [{}] is JOINING, roles [{}]", joiningNode.address, roles.mkString(", ")) + if (joiningNode == selfUniqueAddress) { if (localMembers.isEmpty) leaderActions() // important for deterministic oldest when bootstrapping } else @@ -613,8 +629,8 @@ private[cluster] class ClusterCoreDaemon(publisher: ActorRef) extends Actor with if (joinWith != from.address) logInfo("Ignoring welcome from [{}] when trying to join with [{}]", from.address, joinWith) else { - logInfo("Welcome from [{}]", from.address) latestGossip = gossip seen selfUniqueAddress + logInfo("Welcome from [{}]", from.address) assertLatestGossip() publish(latestGossip) if (from != selfUniqueAddress) @@ -663,11 +679,12 @@ private[cluster] class ClusterCoreDaemon(publisher: ActorRef) extends Actor with gossipRandomN(NumberOfGossipsBeforeShutdownWhenLeaderExits) // send ExitingConfirmed to two potential leaders - val membersWithoutSelf = latestGossip.members.filterNot(_.uniqueAddress == selfUniqueAddress) - latestGossip.leaderOf(membersWithoutSelf, selfUniqueAddress) match { + val membersExceptSelf = latestGossip.members.filter(_.uniqueAddress != selfUniqueAddress) + + latestGossip.leaderOf(selfTeam, membersExceptSelf, selfUniqueAddress) match { case Some(node1) ⇒ clusterCore(node1.address) ! ExitingConfirmed(selfUniqueAddress) - latestGossip.leaderOf(membersWithoutSelf.filterNot(_.uniqueAddress == node1), selfUniqueAddress) match { + latestGossip.leaderOf(selfTeam, membersExceptSelf.filterNot(_.uniqueAddress == node1), selfUniqueAddress) match { case Some(node2) ⇒ clusterCore(node2.address) ! ExitingConfirmed(selfUniqueAddress) case None ⇒ // no more potential leader @@ -706,26 +723,18 @@ private[cluster] class ClusterCoreDaemon(publisher: ActorRef) extends Actor with val localMembers = localGossip.members val localOverview = localGossip.overview val localSeen = localOverview.seen - val localReachability = localOverview.reachability + val localReachability = localGossip.teamReachability(selfTeam) // check if the node to DOWN is in the `members` set localMembers.find(_.address == address) match { - case Some(m) if (m.status != Down) ⇒ + case Some(m) if m.status != Down ⇒ if (localReachability.isReachable(m.uniqueAddress)) logInfo("Marking node [{}] as [{}]", m.address, Down) else logInfo("Marking unreachable node [{}] as [{}]", m.address, Down) - // replace member (changed status) - val newMembers = localMembers - m + m.copy(status = Down) - // remove nodes marked as DOWN from the `seen` table - val newSeen = localSeen - m.uniqueAddress - - // update gossip overview - val newOverview = localOverview copy (seen = newSeen) - val newGossip = localGossip copy (members = newMembers, overview = newOverview) // update gossip + val newGossip = localGossip.markAsDown(m) updateLatestGossip(newGossip) - publish(latestGossip) case Some(_) ⇒ // already down case None ⇒ @@ -751,7 +760,7 @@ private[cluster] class ClusterCoreDaemon(publisher: ActorRef) extends Actor with def receiveGossipStatus(status: GossipStatus): Unit = { val from = status.from - if (!latestGossip.overview.reachability.isReachable(selfUniqueAddress, from)) + if (!latestGossip.isReachable(selfUniqueAddress, from)) logInfo("Ignoring received gossip status from unreachable [{}] ", from) else if (latestGossip.members.forall(_.uniqueAddress != from)) log.debug("Cluster Node [{}] - Ignoring received gossip status from unknown [{}]", selfAddress, from) @@ -778,6 +787,7 @@ private[cluster] class ClusterCoreDaemon(publisher: ActorRef) extends Actor with * Receive new gossip. */ def receiveGossip(envelope: GossipEnvelope): ReceiveGossipType = { + val from = envelope.from val remoteGossip = envelope.gossip val localGossip = latestGossip @@ -788,7 +798,7 @@ private[cluster] class ClusterCoreDaemon(publisher: ActorRef) extends Actor with } else if (envelope.to != selfUniqueAddress) { logInfo("Ignoring received gossip intended for someone else, from [{}] to [{}]", from.address, envelope.to) Ignored - } else if (!localGossip.overview.reachability.isReachable(selfUniqueAddress, from)) { + } else if (!localGossip.isReachable(selfUniqueAddress, from)) { logInfo("Ignoring received gossip from unreachable [{}] ", from) Ignored } else if (localGossip.members.forall(_.uniqueAddress != from)) { @@ -839,10 +849,9 @@ private[cluster] class ClusterCoreDaemon(publisher: ActorRef) extends Actor with // Don't mark gossip state as seen while exiting is in progress, e.g. // shutting down singleton actors. This delays removal of the member until // the exiting tasks have been completed. - if (exitingTasksInProgress) - latestGossip = winningGossip - else - latestGossip = winningGossip seen selfUniqueAddress + latestGossip = + if (exitingTasksInProgress) winningGossip + else winningGossip seen selfUniqueAddress assertLatestGossip() // for all new joining nodes we remove them from the failure detector @@ -852,7 +861,7 @@ private[cluster] class ClusterCoreDaemon(publisher: ActorRef) extends Actor with log.debug("Cluster Node [{}] - Receiving gossip from [{}]", selfAddress, from) - if (comparison == VectorClock.Concurrent) { + if (comparison == VectorClock.Concurrent && cluster.settings.Debug.VerboseGossipLogging) { log.debug( """Couldn't establish a causal relationship between "remote" gossip and "local" gossip - Remote[{}] - Local[{}] - merged them into [{}]""", remoteGossip, localGossip, winningGossip) @@ -995,11 +1004,11 @@ private[cluster] class ClusterCoreDaemon(publisher: ActorRef) extends Actor with * Runs periodic leader actions, such as member status transitions, assigning partitions etc. */ def leaderActions(): Unit = { - if (latestGossip.isLeader(selfUniqueAddress, selfUniqueAddress)) { - // only run the leader actions if we are the LEADER + if (latestGossip.isTeamLeader(selfTeam, selfUniqueAddress, selfUniqueAddress)) { + // only run the leader actions if we are the LEADER of the team val firstNotice = 20 val periodicNotice = 60 - if (latestGossip.convergence(selfUniqueAddress, exitingConfirmed)) { + if (latestGossip.convergence(selfTeam, selfUniqueAddress, exitingConfirmed)) { if (leaderActionCounter >= firstNotice) logInfo("Leader can perform its duties again") leaderActionCounter = 0 @@ -1012,9 +1021,11 @@ private[cluster] class ClusterCoreDaemon(publisher: ActorRef) extends Actor with if (leaderActionCounter == firstNotice || leaderActionCounter % periodicNotice == 0) logInfo( "Leader can currently not perform its duties, reachability status: [{}], member status: [{}]", - latestGossip.reachabilityExcludingDownedObservers, - latestGossip.members.map(m ⇒ - s"${m.address} ${m.status} seen=${latestGossip.seenByNode(m.uniqueAddress)}").mkString(", ")) + latestGossip.teamReachabilityExcludingDownedObservers(selfTeam), + latestGossip.members.collect { + case m if m.team == selfTeam ⇒ + s"${m.address} ${m.status} seen=${latestGossip.seenByNode(m.uniqueAddress)}" + }.mkString(", ")) } } cleanupExitingConfirmed() @@ -1025,8 +1036,8 @@ private[cluster] class ClusterCoreDaemon(publisher: ActorRef) extends Actor with if (latestGossip.member(selfUniqueAddress).status == Down) { // When all reachable have seen the state this member will shutdown itself when it has // status Down. The down commands should spread before we shutdown. - val unreachable = latestGossip.overview.reachability.allUnreachableOrTerminated - val downed = latestGossip.members.collect { case m if m.status == Down ⇒ m.uniqueAddress } + val unreachable = latestGossip.teamReachability(selfTeam).allUnreachableOrTerminated + val downed = latestGossip.teamMembers(selfTeam).collect { case m if m.status == Down ⇒ m.uniqueAddress } if (downed.forall(node ⇒ unreachable(node) || latestGossip.seenByNode(node))) { // the reason for not shutting down immediately is to give the gossip a chance to spread // the downing information to other downed nodes, so that they can shutdown themselves @@ -1059,95 +1070,85 @@ private[cluster] class ClusterCoreDaemon(publisher: ActorRef) extends Actor with * 9. Update the state with the new gossip */ def leaderActionsOnConvergence(): Unit = { - val localGossip = latestGossip - val localMembers = localGossip.members - val localOverview = localGossip.overview - val localSeen = localOverview.seen - - val enoughMembers: Boolean = isMinNrOfMembersFulfilled - def isJoiningToUp(m: Member): Boolean = (m.status == Joining || m.status == WeaklyUp) && enoughMembers val removedUnreachable = for { - node ← localOverview.reachability.allUnreachableOrTerminated - m = localGossip.member(node) - if Gossip.removeUnreachableWithMemberStatus(m.status) + node ← latestGossip.teamReachability(selfTeam).allUnreachableOrTerminated + m = latestGossip.member(node) + if m.team == selfTeam && Gossip.removeUnreachableWithMemberStatus(m.status) } yield m - val removedExitingConfirmed = exitingConfirmed.filter(n ⇒ localGossip.member(n).status == Exiting) + val removedExitingConfirmed = exitingConfirmed.filter { n ⇒ + val member = latestGossip.member(n) + member.team == selfTeam && member.status == Exiting + } - val changedMembers = localMembers collect { - var upNumber = 0 + val changedMembers = { + val enoughMembers: Boolean = isMinNrOfMembersFulfilled + def isJoiningToUp(m: Member): Boolean = (m.status == Joining || m.status == WeaklyUp) && enoughMembers - { - case m if isJoiningToUp(m) ⇒ - // Move JOINING => UP (once all nodes have seen that this node is JOINING, i.e. we have a convergence) - // and minimum number of nodes have joined the cluster - if (upNumber == 0) { - // It is alright to use same upNumber as already used by a removed member, since the upNumber - // is only used for comparing age of current cluster members (Member.isOlderThan) - val youngest = localGossip.youngestMember - upNumber = 1 + (if (youngest.upNumber == Int.MaxValue) 0 else youngest.upNumber) - } else { - upNumber += 1 - } - m.copyUp(upNumber) + latestGossip.members collect { + var upNumber = 0 - case m if m.status == Leaving ⇒ - // Move LEAVING => EXITING (once we have a convergence on LEAVING) - m copy (status = Exiting) + { + case m if m.team == selfTeam && isJoiningToUp(m) ⇒ + // Move JOINING => UP (once all nodes have seen that this node is JOINING, i.e. we have a convergence) + // and minimum number of nodes have joined the cluster + if (upNumber == 0) { + // It is alright to use same upNumber as already used by a removed member, since the upNumber + // is only used for comparing age of current cluster members (Member.isOlderThan) + val youngest = latestGossip.youngestMember + upNumber = 1 + (if (youngest.upNumber == Int.MaxValue) 0 else youngest.upNumber) + } else { + upNumber += 1 + } + m.copyUp(upNumber) + + case m if m.team == selfTeam && m.status == Leaving ⇒ + // Move LEAVING => EXITING (once we have a convergence on LEAVING) + m copy (status = Exiting) + } } } - if (removedUnreachable.nonEmpty || removedExitingConfirmed.nonEmpty || changedMembers.nonEmpty) { - // handle changes + val updatedGossip: Gossip = + if (removedUnreachable.nonEmpty || removedExitingConfirmed.nonEmpty || changedMembers.nonEmpty) { - // replace changed members - val newMembers = changedMembers.union(localMembers).diff(removedUnreachable) - .filterNot(m ⇒ removedExitingConfirmed(m.uniqueAddress)) + // replace changed members + val removed = removedUnreachable.map(_.uniqueAddress).union(removedExitingConfirmed) + val newGossip = + latestGossip.update(changedMembers).removeAll(removed, System.currentTimeMillis()) - // removing REMOVED nodes from the `seen` table - val removed = removedUnreachable.map(_.uniqueAddress).union(removedExitingConfirmed) - val newSeen = localSeen diff removed - // removing REMOVED nodes from the `reachability` table - val newReachability = localOverview.reachability.remove(removed) - val newOverview = localOverview copy (seen = newSeen, reachability = newReachability) - // Clear the VectorClock when member is removed. The change made by the leader is stamped - // and will propagate as is if there are no other changes on other nodes. - // If other concurrent changes on other nodes (e.g. join) the pruning is also - // taken care of when receiving gossips. - val newVersion = removed.foldLeft(localGossip.version) { (v, node) ⇒ - v.prune(VectorClock.Node(vclockName(node))) - } - val newGossip = localGossip copy (members = newMembers, overview = newOverview, version = newVersion) + if (!exitingTasksInProgress && newGossip.member(selfUniqueAddress).status == Exiting) { + // Leader is moving itself from Leaving to Exiting. + // ExitingCompleted will be received via CoordinatedShutdown to continue + // the leaving process. Meanwhile the gossip state is not marked as seen. + exitingTasksInProgress = true + logInfo("Exiting (leader), starting coordinated shutdown") + selfExiting.trySuccess(Done) + coordShutdown.run() + } - if (!exitingTasksInProgress && newGossip.member(selfUniqueAddress).status == Exiting) { - // Leader is moving itself from Leaving to Exiting. - // ExitingCompleted will be received via CoordinatedShutdown to continue - // the leaving process. Meanwhile the gossip state is not marked as seen. - exitingTasksInProgress = true - logInfo("Exiting (leader), starting coordinated shutdown") - selfExiting.trySuccess(Done) - coordShutdown.run() - } + exitingConfirmed = exitingConfirmed.filterNot(removedExitingConfirmed) - updateLatestGossip(newGossip) - exitingConfirmed = exitingConfirmed.filterNot(removedExitingConfirmed) + changedMembers foreach { m ⇒ + logInfo("Leader is moving node [{}] to [{}]", m.address, m.status) + } + removedUnreachable foreach { m ⇒ + val status = if (m.status == Exiting) "exiting" else "unreachable" + logInfo("Leader is removing {} node [{}]", status, m.address) + } + removedExitingConfirmed.foreach { n ⇒ + logInfo("Leader is removing confirmed Exiting node [{}]", n.address) + } - // log status changes - changedMembers foreach { m ⇒ - logInfo("Leader is moving node [{}] to [{}]", m.address, m.status) - } + newGossip + } else + latestGossip - // log the removal of the unreachable nodes - removedUnreachable foreach { m ⇒ - val status = if (m.status == Exiting) "exiting" else "unreachable" - logInfo("Leader is removing {} node [{}]", status, m.address) - } - removedExitingConfirmed.foreach { n ⇒ - logInfo("Leader is removing confirmed Exiting node [{}]", n.address) - } - - publish(latestGossip) + val pruned = updatedGossip.pruneTombstones(System.currentTimeMillis() - PruneGossipTombstonesAfter.toMillis) + if (pruned ne latestGossip) { + updateLatestGossip(pruned) + publish(pruned) } } @@ -1157,7 +1158,10 @@ private[cluster] class ClusterCoreDaemon(publisher: ActorRef) extends Actor with val enoughMembers: Boolean = isMinNrOfMembersFulfilled def isJoiningToWeaklyUp(m: Member): Boolean = - m.status == Joining && enoughMembers && latestGossip.reachabilityExcludingDownedObservers.isReachable(m.uniqueAddress) + m.team == selfTeam && + m.status == Joining && + enoughMembers && + latestGossip.teamReachabilityExcludingDownedObservers(selfTeam).isReachable(m.uniqueAddress) val changedMembers = localMembers.collect { case m if isJoiningToWeaklyUp(m) ⇒ m.copy(status = WeaklyUp) } @@ -1203,10 +1207,10 @@ private[cluster] class ClusterCoreDaemon(publisher: ActorRef) extends Actor with if (newlyDetectedUnreachableMembers.nonEmpty || newlyDetectedReachableMembers.nonEmpty) { - val newReachability1 = (localOverview.reachability /: newlyDetectedUnreachableMembers) { + val newReachability1 = newlyDetectedUnreachableMembers.foldLeft(localOverview.reachability) { (reachability, m) ⇒ reachability.unreachable(selfUniqueAddress, m.uniqueAddress) } - val newReachability2 = (newReachability1 /: newlyDetectedReachableMembers) { + val newReachability2 = newlyDetectedReachableMembers.foldLeft(newReachability1) { (reachability, m) ⇒ reachability.reachable(selfUniqueAddress, m.uniqueAddress) } @@ -1265,8 +1269,7 @@ private[cluster] class ClusterCoreDaemon(publisher: ActorRef) extends Actor with clusterCore(node.address) ! GossipStatus(selfUniqueAddress, latestGossip.version) def validNodeForGossip(node: UniqueAddress): Boolean = - (node != selfUniqueAddress && latestGossip.hasMember(node) && - latestGossip.reachabilityExcludingDownedObservers.isReachable(node)) + node != selfUniqueAddress && latestGossip.isReachableExcludingDownedObservers(selfTeam, node) def updateLatestGossip(newGossip: Gossip): Unit = { // Updating the vclock version for the changes @@ -1291,6 +1294,9 @@ private[cluster] class ClusterCoreDaemon(publisher: ActorRef) extends Actor with throw new IllegalStateException(s"Too many vector clock entries in gossip state ${latestGossip}") def publish(newGossip: Gossip): Unit = { + if (cluster.settings.Debug.VerboseGossipLogging) + log.debug("Cluster Node [{}] team [{}] - New gossip published [{}]", selfAddress, cluster.settings.Team, newGossip) + publisher ! PublishChanges(newGossip) if (PublishStatsInterval == Duration.Zero) publishInternalStats() } diff --git a/akka-cluster/src/main/scala/akka/cluster/ClusterEvent.scala b/akka-cluster/src/main/scala/akka/cluster/ClusterEvent.scala index 319581b9cf..9b50818a6a 100644 --- a/akka-cluster/src/main/scala/akka/cluster/ClusterEvent.scala +++ b/akka-cluster/src/main/scala/akka/cluster/ClusterEvent.scala @@ -7,11 +7,15 @@ import language.postfixOps import scala.collection.immutable import scala.collection.immutable.VectorBuilder import akka.actor.{ Actor, ActorLogging, ActorRef, Address } +import akka.cluster.ClusterSettings.Team import akka.cluster.ClusterEvent._ import akka.cluster.MemberStatus._ import akka.event.EventStream -import akka.dispatch.{ UnboundedMessageQueueSemantics, RequiresMessageQueue } +import akka.dispatch.{ RequiresMessageQueue, UnboundedMessageQueueSemantics } import akka.actor.DeadLetterSuppression +import akka.annotation.InternalApi + +import scala.collection.breakOut /** * Domain events published to the event bus. @@ -53,6 +57,8 @@ object ClusterEvent { /** * Current snapshot state of the cluster. Sent to new subscriber. + * + * @param leader leader of the team of this node */ final case class CurrentClusterState( members: immutable.SortedSet[Member] = immutable.SortedSet.empty, @@ -82,10 +88,21 @@ object ClusterEvent { scala.collection.JavaConverters.setAsJavaSetConverter(seenBy).asJava /** - * Java API: get address of current leader, or null if none + * Java API: get address of current team leader, or null if none */ def getLeader: Address = leader orNull + /** + * get address of current leader, if any, within the team that has the given role + */ + def roleLeader(role: String): Option[Address] = roleLeaderMap.getOrElse(role, None) + + /** + * Java API: get address of current leader, if any, within the team that has the given role + * or null if no such node exists + */ + def getRoleLeader(role: String): Address = roleLeaderMap.get(role).flatten.orNull + /** * All node roles in the cluster */ @@ -98,15 +115,16 @@ object ClusterEvent { scala.collection.JavaConverters.setAsJavaSetConverter(allRoles).asJava /** - * get address of current leader, if any, within the role set + * All teams in the cluster */ - def roleLeader(role: String): Option[Address] = roleLeaderMap.getOrElse(role, None) + def allTeams: Set[String] = members.map(_.team)(breakOut) /** - * Java API: get address of current leader within the role set, - * or null if no node with that role + * Java API: All teams in the cluster */ - def getRoleLeader(role: String): Address = roleLeaderMap.get(role).flatten.orNull + def getAllTeams: java.util.Set[String] = + scala.collection.JavaConverters.setAsJavaSetConverter(allTeams).asJava + } /** @@ -171,7 +189,7 @@ object ClusterEvent { } /** - * Leader of the cluster members changed. Published when the state change + * Leader of the cluster team of this node changed. Published when the state change * is first seen on a node. */ final case class LeaderChanged(leader: Option[Address]) extends ClusterDomainEvent { @@ -183,7 +201,7 @@ object ClusterEvent { } /** - * First member (leader) of the members within a role set changed. + * First member (leader) of the members within a role set (in the same team as this node, if cluster teams are used) changed. * Published when the state change is first seen on a node. */ final case class RoleLeaderChanged(role: String, leader: Option[Address]) extends ClusterDomainEvent { @@ -299,32 +317,35 @@ object ClusterEvent { /** * INTERNAL API */ - private[cluster] def diffLeader(oldGossip: Gossip, newGossip: Gossip, selfUniqueAddress: UniqueAddress): immutable.Seq[LeaderChanged] = { - val newLeader = newGossip.leader(selfUniqueAddress) - if (newLeader != oldGossip.leader(selfUniqueAddress)) List(LeaderChanged(newLeader.map(_.address))) + @InternalApi + private[cluster] def diffLeader(team: Team, oldGossip: Gossip, newGossip: Gossip, selfUniqueAddress: UniqueAddress): immutable.Seq[LeaderChanged] = { + val newLeader = newGossip.teamLeader(team, selfUniqueAddress) + if (newLeader != oldGossip.teamLeader(team, selfUniqueAddress)) List(LeaderChanged(newLeader.map(_.address))) else Nil } /** * INTERNAL API */ - private[cluster] def diffRolesLeader(oldGossip: Gossip, newGossip: Gossip, selfUniqueAddress: UniqueAddress): Set[RoleLeaderChanged] = { + @InternalApi + private[cluster] def diffRolesLeader(team: Team, oldGossip: Gossip, newGossip: Gossip, selfUniqueAddress: UniqueAddress): Set[RoleLeaderChanged] = { for { - role ← (oldGossip.allRoles union newGossip.allRoles) - newLeader = newGossip.roleLeader(role, selfUniqueAddress) - if newLeader != oldGossip.roleLeader(role, selfUniqueAddress) + role ← oldGossip.allRoles union newGossip.allRoles + newLeader = newGossip.roleLeader(team, role, selfUniqueAddress) + if newLeader != oldGossip.roleLeader(team, role, selfUniqueAddress) } yield RoleLeaderChanged(role, newLeader.map(_.address)) } /** * INTERNAL API */ - private[cluster] def diffSeen(oldGossip: Gossip, newGossip: Gossip, selfUniqueAddress: UniqueAddress): immutable.Seq[SeenChanged] = + @InternalApi + private[cluster] def diffSeen(team: Team, oldGossip: Gossip, newGossip: Gossip, selfUniqueAddress: UniqueAddress): immutable.Seq[SeenChanged] = if (newGossip eq oldGossip) Nil else { - val newConvergence = newGossip.convergence(selfUniqueAddress, Set.empty) + val newConvergence = newGossip.convergence(team, selfUniqueAddress, Set.empty) val newSeenBy = newGossip.seenBy - if (newConvergence != oldGossip.convergence(selfUniqueAddress, Set.empty) || newSeenBy != oldGossip.seenBy) + if (newConvergence != oldGossip.convergence(team, selfUniqueAddress, Set.empty) || newSeenBy != oldGossip.seenBy) List(SeenChanged(newConvergence, newSeenBy.map(_.address))) else Nil } @@ -332,6 +353,7 @@ object ClusterEvent { /** * INTERNAL API */ + @InternalApi private[cluster] def diffReachability(oldGossip: Gossip, newGossip: Gossip): immutable.Seq[ReachabilityChanged] = if (newGossip.overview.reachability eq oldGossip.overview.reachability) Nil else List(ReachabilityChanged(newGossip.overview.reachability)) @@ -347,8 +369,10 @@ private[cluster] final class ClusterDomainEventPublisher extends Actor with Acto with RequiresMessageQueue[UnboundedMessageQueueSemantics] { import InternalClusterAction._ - val selfUniqueAddress = Cluster(context.system).selfUniqueAddress + val cluster = Cluster(context.system) + val selfUniqueAddress = cluster.selfUniqueAddress var latestGossip: Gossip = Gossip.empty + def selfTeam = cluster.settings.Team override def preRestart(reason: Throwable, message: Option[Any]) { // don't postStop when restarted, no children to stop @@ -383,9 +407,11 @@ private[cluster] final class ClusterDomainEventPublisher extends Actor with Acto members = latestGossip.members, unreachable = unreachable, seenBy = latestGossip.seenBy.map(_.address), - leader = latestGossip.leader(selfUniqueAddress).map(_.address), - roleLeaderMap = latestGossip.allRoles.map(r ⇒ r → latestGossip.roleLeader(r, selfUniqueAddress) - .map(_.address))(collection.breakOut)) + leader = latestGossip.teamLeader(selfTeam, selfUniqueAddress).map(_.address), + roleLeaderMap = latestGossip.allRoles.map(r ⇒ + r → latestGossip.roleLeader(selfTeam, r, selfUniqueAddress).map(_.address) + )(collection.breakOut) + ) receiver ! state } @@ -420,10 +446,10 @@ private[cluster] final class ClusterDomainEventPublisher extends Actor with Acto diffMemberEvents(oldGossip, newGossip) foreach pub diffUnreachable(oldGossip, newGossip, selfUniqueAddress) foreach pub diffReachable(oldGossip, newGossip, selfUniqueAddress) foreach pub - diffLeader(oldGossip, newGossip, selfUniqueAddress) foreach pub - diffRolesLeader(oldGossip, newGossip, selfUniqueAddress) foreach pub + diffLeader(selfTeam, oldGossip, newGossip, selfUniqueAddress) foreach pub + diffRolesLeader(selfTeam, oldGossip, newGossip, selfUniqueAddress) foreach pub // publish internal SeenState for testing purposes - diffSeen(oldGossip, newGossip, selfUniqueAddress) foreach pub + diffSeen(selfTeam, oldGossip, newGossip, selfUniqueAddress) foreach pub diffReachability(oldGossip, newGossip) foreach pub } diff --git a/akka-cluster/src/main/scala/akka/cluster/ClusterReadView.scala b/akka-cluster/src/main/scala/akka/cluster/ClusterReadView.scala index 9bb9149651..10f7cb309a 100644 --- a/akka-cluster/src/main/scala/akka/cluster/ClusterReadView.scala +++ b/akka-cluster/src/main/scala/akka/cluster/ClusterReadView.scala @@ -109,12 +109,12 @@ private[akka] class ClusterReadView(cluster: Cluster) extends Closeable { def status: MemberStatus = self.status /** - * Is this node the leader? + * Is this node the current team leader */ def isLeader: Boolean = leader.contains(selfAddress) /** - * Get the address of the current leader. + * Get the address of the current team leader */ def leader: Option[Address] = state.leader diff --git a/akka-cluster/src/main/scala/akka/cluster/ClusterSettings.scala b/akka-cluster/src/main/scala/akka/cluster/ClusterSettings.scala index 44b92a8009..f6c8fca61d 100644 --- a/akka-cluster/src/main/scala/akka/cluster/ClusterSettings.scala +++ b/akka-cluster/src/main/scala/akka/cluster/ClusterSettings.scala @@ -10,14 +10,31 @@ import com.typesafe.config.ConfigObject import scala.concurrent.duration.Duration import akka.actor.Address import akka.actor.AddressFromURIString +import akka.annotation.InternalApi import akka.dispatch.Dispatchers -import akka.util.Helpers.{ Requiring, ConfigOps, toRootLowerCase } +import akka.util.Helpers.{ ConfigOps, Requiring, toRootLowerCase } import scala.concurrent.duration.FiniteDuration import akka.japi.Util.immutableSeq -final class ClusterSettings(val config: Config, val systemName: String) { +object ClusterSettings { + type Team = String + /** + * INTERNAL API. + */ + @InternalApi + private[akka] val TeamRolePrefix = "team-" + /** + * INTERNAL API. + */ + @InternalApi + private[akka] val DefaultTeam: Team = "default" + +} + +final class ClusterSettings(val config: Config, val systemName: String) { + import ClusterSettings._ private val cc = config.getConfig("akka.cluster") val LogInfo: Boolean = cc.getBoolean("log-info") @@ -58,6 +75,11 @@ final class ClusterSettings(val config: Config, val systemName: String) { } } + val PruneGossipTombstonesAfter: Duration = { + val key = "prune-gossip-tombstones-after" + cc.getMillisDuration(key) requiring (_ >= Duration.Zero, key + " >= 0s") + } + // specific to the [[akka.cluster.DefaultDowningProvider]] val AutoDownUnreachableAfter: Duration = { val key = "auto-down-unreachable-after" @@ -93,8 +115,15 @@ final class ClusterSettings(val config: Config, val systemName: String) { val AllowWeaklyUpMembers = cc.getBoolean("allow-weakly-up-members") - val Team: String = cc.getString("team") - val Roles: Set[String] = immutableSeq(cc.getStringList("roles")).toSet + s"team-$Team" + val Team: Team = cc.getString("team") + val Roles: Set[String] = { + val configuredRoles = (immutableSeq(cc.getStringList("roles")).toSet) requiring ( + _.forall(!_.startsWith(TeamRolePrefix)), + s"Roles must not start with '${TeamRolePrefix}' as that is reserved for the cluster team setting" + ) + + configuredRoles + s"$TeamRolePrefix$Team" + } val MinNrOfMembers: Int = { cc.getInt("min-nr-of-members") } requiring (_ > 0, "min-nr-of-members must be > 0") @@ -118,6 +147,7 @@ final class ClusterSettings(val config: Config, val systemName: String) { object Debug { val VerboseHeartbeatLogging = cc.getBoolean("debug.verbose-heartbeat-logging") + val VerboseGossipLogging = cc.getBoolean("debug.verbose-gossip-logging") } } diff --git a/akka-cluster/src/main/scala/akka/cluster/Gossip.scala b/akka-cluster/src/main/scala/akka/cluster/Gossip.scala index fee83bb317..6bc18f38a2 100644 --- a/akka-cluster/src/main/scala/akka/cluster/Gossip.scala +++ b/akka-cluster/src/main/scala/akka/cluster/Gossip.scala @@ -4,14 +4,18 @@ package akka.cluster -import scala.collection.immutable +import scala.collection.{ SortedSet, immutable } +import ClusterSettings.Team import MemberStatus._ +import akka.annotation.InternalApi + import scala.concurrent.duration.Deadline /** * INTERNAL API */ private[cluster] object Gossip { + type Timestamp = Long val emptyMembers: immutable.SortedSet[Member] = immutable.SortedSet.empty val empty: Gossip = new Gossip(Gossip.emptyMembers) @@ -59,10 +63,12 @@ private[cluster] object Gossip { * removed node telling it to shut itself down. */ @SerialVersionUID(1L) +@InternalApi private[cluster] final case class Gossip( - members: immutable.SortedSet[Member], // sorted set of members with their status, sorted by address - overview: GossipOverview = GossipOverview(), - version: VectorClock = VectorClock()) { // vector clock version + members: immutable.SortedSet[Member], // sorted set of members with their status, sorted by address + overview: GossipOverview = GossipOverview(), + version: VectorClock = VectorClock(), // vector clock version + tombstones: Map[UniqueAddress, Gossip.Timestamp] = Map.empty) { if (Cluster.isAssertInvariantsEnabled) assertInvariants() @@ -138,15 +144,18 @@ private[cluster] final case class Gossip( this copy (overview = overview copy (seen = overview.seen union that.overview.seen)) /** - * Merges two Gossip instances including membership tables, and the VectorClock histories. + * Merges two Gossip instances including membership tables, tombstones, and the VectorClock histories. */ def merge(that: Gossip): Gossip = { // 1. merge vector clocks val mergedVClock = this.version merge that.version + // 2. merge sets of tombstones + val mergedTombstones = tombstones ++ that.tombstones + // 2. merge members by selecting the single Member with highest MemberStatus out of the Member groups - val mergedMembers = Gossip.emptyMembers union Member.pickHighestPriority(this.members, that.members) + val mergedMembers = Gossip.emptyMembers union Member.pickHighestPriority(this.members, that.members, mergedTombstones) // 3. merge reachability table by picking records with highest version val mergedReachability = this.overview.reachability.merge( @@ -156,29 +165,36 @@ private[cluster] final case class Gossip( // 4. Nobody can have seen this new gossip yet val mergedSeen = Set.empty[UniqueAddress] - Gossip(mergedMembers, GossipOverview(mergedSeen, mergedReachability), mergedVClock) + Gossip(mergedMembers, GossipOverview(mergedSeen, mergedReachability), mergedVClock, mergedTombstones) } /** - * Checks if we have a cluster convergence. If there are any unreachable nodes then we can't have a convergence - - * waiting for user to act (issuing DOWN) or leader to act (issuing DOWN through auto-down). + * Checks if we have a cluster convergence. If there are any in team node pairs that cannot reach each other + * then we can't have a convergence until those nodes reach each other again or one of them is downed * * @return true if convergence have been reached and false if not */ - def convergence(selfUniqueAddress: UniqueAddress, exitingConfirmed: Set[UniqueAddress]): Boolean = { - // First check that: - // 1. we don't have any members that are unreachable, excluding observations from members - // that have status DOWN, or - // 2. all unreachable members in the set have status DOWN or EXITING - // Else we can't continue to check for convergence - // When that is done we check that all members with a convergence - // status is in the seen table, i.e. has seen this version - val unreachable = reachabilityExcludingDownedObservers.allUnreachableOrTerminated.collect { - case node if (node != selfUniqueAddress && !exitingConfirmed(node)) ⇒ member(node) + def convergence(team: Team, selfUniqueAddress: UniqueAddress, exitingConfirmed: Set[UniqueAddress]): Boolean = { + // Find cluster members in the team that are unreachable from other members of the team + // excluding observations from members outside of the team, that have status DOWN or is passed in as confirmed exiting. + val unreachableInTeam = teamReachabilityExcludingDownedObservers(team).allUnreachableOrTerminated.collect { + case node if node != selfUniqueAddress && !exitingConfirmed(node) ⇒ member(node) } - unreachable.forall(m ⇒ Gossip.convergenceSkipUnreachableWithMemberStatus(m.status)) && - !members.exists(m ⇒ Gossip.convergenceMemberStatus(m.status) && - !(seenByNode(m.uniqueAddress) || exitingConfirmed(m.uniqueAddress))) + + // If another member in the team that is UP or LEAVING and has not seen this gossip or is exiting + // convergence cannot be reached + def teamMemberHinderingConvergenceExists = + members.exists(member ⇒ + member.team == team && + Gossip.convergenceMemberStatus(member.status) && + !(seenByNode(member.uniqueAddress) || exitingConfirmed(member.uniqueAddress)) + ) + + // unreachables outside of the team or with status DOWN or EXITING does not affect convergence + def allUnreachablesCanBeIgnored = + unreachableInTeam.forall(unreachable ⇒ Gossip.convergenceSkipUnreachableWithMemberStatus(unreachable.status)) + + allUnreachablesCanBeIgnored && !teamMemberHinderingConvergenceExists } lazy val reachabilityExcludingDownedObservers: Reachability = { @@ -186,29 +202,81 @@ private[cluster] final case class Gossip( overview.reachability.removeObservers(downed.map(_.uniqueAddress)) } - def isLeader(node: UniqueAddress, selfUniqueAddress: UniqueAddress): Boolean = - leader(selfUniqueAddress).contains(node) + /** + * @return Reachability excluding observations from nodes outside of the team, but including observed unreachable + * nodes outside of the team + */ + def teamReachability(team: Team): Reachability = + overview.reachability.removeObservers(members.collect { case m if m.team != team ⇒ m.uniqueAddress }) - def leader(selfUniqueAddress: UniqueAddress): Option[UniqueAddress] = - leaderOf(members, selfUniqueAddress) - - def roleLeader(role: String, selfUniqueAddress: UniqueAddress): Option[UniqueAddress] = - leaderOf(members.filter(_.hasRole(role)), selfUniqueAddress) - - def leaderOf(mbrs: immutable.SortedSet[Member], selfUniqueAddress: UniqueAddress): Option[UniqueAddress] = { - val reachableMembers = - if (overview.reachability.isAllReachable) mbrs.filterNot(_.status == Down) - else mbrs.filter(m ⇒ m.status != Down && - (overview.reachability.isReachable(m.uniqueAddress) || m.uniqueAddress == selfUniqueAddress)) - if (reachableMembers.isEmpty) None - else reachableMembers.find(m ⇒ Gossip.leaderMemberStatus(m.status)). - orElse(Some(reachableMembers.min(Member.leaderStatusOrdering))).map(_.uniqueAddress) + /** + * @return reachability for team nodes, with observations from outside the team or from downed nodes filtered out + */ + def teamReachabilityExcludingDownedObservers(team: Team): Reachability = { + val membersToExclude = members.collect { case m if m.status == Down || m.team != team ⇒ m.uniqueAddress } + overview.reachability.removeObservers(membersToExclude).remove(members.collect { case m if m.team != team ⇒ m.uniqueAddress }) } + def teamMembers(team: Team): SortedSet[Member] = + members.filter(_.team == team) + + def isTeamLeader(team: Team, node: UniqueAddress, selfUniqueAddress: UniqueAddress): Boolean = + teamLeader(team, selfUniqueAddress).contains(node) + + def teamLeader(team: Team, selfUniqueAddress: UniqueAddress): Option[UniqueAddress] = + leaderOf(team, members, selfUniqueAddress) + + def roleLeader(team: Team, role: String, selfUniqueAddress: UniqueAddress): Option[UniqueAddress] = + leaderOf(team, members.filter(_.hasRole(role)), selfUniqueAddress) + + def leaderOf(team: Team, mbrs: immutable.SortedSet[Member], selfUniqueAddress: UniqueAddress): Option[UniqueAddress] = { + val reachability = teamReachability(team) + + val reachableTeamMembers = + if (reachability.isAllReachable) mbrs.filter(m ⇒ m.team == team && m.status != Down) + else mbrs.filter(m ⇒ + m.team == team && + m.status != Down && + (reachability.isReachable(m.uniqueAddress) || m.uniqueAddress == selfUniqueAddress)) + if (reachableTeamMembers.isEmpty) None + else reachableTeamMembers.find(m ⇒ Gossip.leaderMemberStatus(m.status)) + .orElse(Some(reachableTeamMembers.min(Member.leaderStatusOrdering))) + .map(_.uniqueAddress) + } + + def allTeams: Set[Team] = members.map(_.team) + def allRoles: Set[String] = members.flatMap(_.roles) def isSingletonCluster: Boolean = members.size == 1 + /** + * @return true if toAddress should be reachable from the fromTeam in general, within a team + * this means only caring about team-local observations, across teams it means caring + * about all observations for the toAddress. + */ + def isReachableExcludingDownedObservers(fromTeam: Team, toAddress: UniqueAddress): Boolean = + if (!hasMember(toAddress)) false + else { + val to = member(toAddress) + + // if member is in the same team, we ignore cross-team unreachability + if (fromTeam == to.team) teamReachabilityExcludingDownedObservers(fromTeam).isReachable(toAddress) + // if not it is enough that any non-downed node observed it as unreachable + else reachabilityExcludingDownedObservers.isReachable(toAddress) + } + + /** + * @return true if fromAddress should be able to reach toAddress based on the unreachability data and their + * respective teams + */ + def isReachable(fromAddress: UniqueAddress, toAddress: UniqueAddress): Boolean = + if (!hasMember(toAddress)) false + else { + // as it looks for specific unreachable entires for the node pair we don't have to filter on team + overview.reachability.isReachable(fromAddress, toAddress) + } + def member(node: UniqueAddress): Member = { membersMap.getOrElse( node, @@ -222,14 +290,60 @@ private[cluster] final case class Gossip( members.maxBy(m ⇒ if (m.upNumber == Int.MaxValue) 0 else m.upNumber) } + def removeAll(nodes: Iterable[UniqueAddress], removalTimestamp: Long): Gossip = { + nodes.foldLeft(this)((gossip, node) ⇒ gossip.remove(node, removalTimestamp)) + } + + def update(updatedMembers: immutable.SortedSet[Member]): Gossip = { + copy(members = updatedMembers union members) + } + + /** + * Remove the given member from the set of members and mark it's removal with a tombstone to avoid having it + * reintroduced when merging with another gossip that has not seen the removal. + */ + def remove(node: UniqueAddress, removalTimestamp: Long): Gossip = { + // removing REMOVED nodes from the `seen` table + val newSeen = overview.seen - node + // removing REMOVED nodes from the `reachability` table + val newReachability = overview.reachability.remove(node :: Nil) + val newOverview = overview.copy(seen = newSeen, reachability = newReachability) + + // Clear the VectorClock when member is removed. The change made by the leader is stamped + // and will propagate as is if there are no other changes on other nodes. + // If other concurrent changes on other nodes (e.g. join) the pruning is also + // taken care of when receiving gossips. + val newVersion = version.prune(VectorClock.Node(ClusterCoreDaemon.vclockName(node))) + val newMembers = members.filterNot(_.uniqueAddress == node) + val newTombstones = tombstones + (node → removalTimestamp) + copy(version = newVersion, members = newMembers, overview = newOverview, tombstones = newTombstones) + } + + def markAsDown(member: Member): Gossip = { + // replace member (changed status) + val newMembers = members - member + member.copy(status = Down) + // remove nodes marked as DOWN from the `seen` table + val newSeen = overview.seen - member.uniqueAddress + + // update gossip overview + val newOverview = overview copy (seen = newSeen) + copy(members = newMembers, overview = newOverview) // update gossip + } + def prune(removedNode: VectorClock.Node): Gossip = { val newVersion = version.prune(removedNode) if (newVersion eq version) this else copy(version = newVersion) } + def pruneTombstones(removeEarlierThan: Gossip.Timestamp): Gossip = { + val newTombstones = tombstones.filter { case (_, timestamp) ⇒ timestamp > removeEarlierThan } + if (newTombstones.size == tombstones.size) this + else copy(tombstones = newTombstones) + } + override def toString = - s"Gossip(members = [${members.mkString(", ")}], overview = ${overview}, version = ${version})" + s"Gossip(members = [${members.mkString(", ")}], overview = $overview, version = $version, tombstones = $tombstones)" } /** diff --git a/akka-cluster/src/main/scala/akka/cluster/Member.scala b/akka-cluster/src/main/scala/akka/cluster/Member.scala index c0195d5e39..79b6ac7b77 100644 --- a/akka-cluster/src/main/scala/akka/cluster/Member.scala +++ b/akka-cluster/src/main/scala/akka/cluster/Member.scala @@ -6,6 +6,8 @@ package akka.cluster import akka.actor.Address import MemberStatus._ +import akka.annotation.InternalApi +import akka.cluster.ClusterSettings.Team import scala.runtime.AbstractFunction2 @@ -22,8 +24,9 @@ class Member private[cluster] ( val status: MemberStatus, val roles: Set[String]) extends Serializable { - lazy val team: String = roles.find(_.startsWith("team-")) + lazy val team: String = roles.find(_.startsWith(ClusterSettings.TeamRolePrefix)) .getOrElse(throw new IllegalStateException("Team undefined, should not be possible")) + .substring(ClusterSettings.TeamRolePrefix.length) def address: Address = uniqueAddress.address @@ -32,7 +35,11 @@ class Member private[cluster] ( case m: Member ⇒ uniqueAddress == m.uniqueAddress case _ ⇒ false } - override def toString = s"Member(address = ${address}, status = ${status})" + override def toString = + if (team == ClusterSettings.DefaultTeam) + s"Member(address = $address, status = $status)" + else + s"Member(address = $address, team = $team, status = $status)" def hasRole(role: String): Boolean = roles.contains(role) @@ -46,7 +53,9 @@ class Member private[cluster] ( * Is this member older, has been part of cluster longer, than another * member. It is only correct when comparing two existing members in a * cluster. A member that joined after removal of another member may be - * considered older than the removed member. + * considered older than the removed member. Note that is only makes + * sense to compare with other members inside of one team (upNumber has + * a higher risk of being reused across teams). */ def isOlderThan(other: Member): Boolean = if (upNumber == other.upNumber) @@ -87,7 +96,8 @@ object Member { /** * INTERNAL API */ - private[cluster] def removed(node: UniqueAddress): Member = new Member(node, Int.MaxValue, Removed, Set.empty) + private[cluster] def removed(node: UniqueAddress): Member = + new Member(node, Int.MaxValue, Removed, Set(ClusterSettings.TeamRolePrefix + "-N/A")) /** * `Address` ordering type class, sorts addresses by host and port. @@ -136,16 +146,24 @@ object Member { (a, b) ⇒ a.isOlderThan(b) } - def pickHighestPriority(a: Set[Member], b: Set[Member]): Set[Member] = { + @deprecated("Was accidentally made a public API, internal", since = "2.5.4") + def pickHighestPriority(a: Set[Member], b: Set[Member]): Set[Member] = + pickHighestPriority(a, b, Map.empty) + + /** + * INTERNAL API. + */ + @InternalApi + private[akka] def pickHighestPriority(a: Set[Member], b: Set[Member], tombstones: Map[UniqueAddress, Long]): Set[Member] = { // group all members by Address => Seq[Member] val groupedByAddress = (a.toSeq ++ b.toSeq).groupBy(_.uniqueAddress) // pick highest MemberStatus - (Member.none /: groupedByAddress) { + groupedByAddress.foldLeft(Member.none) { case (acc, (_, members)) ⇒ if (members.size == 2) acc + members.reduceLeft(highestPriorityOf) else { val m = members.head - if (Gossip.removeUnreachableWithMemberStatus(m.status)) acc // removed + if (tombstones.contains(m.uniqueAddress) || Gossip.removeUnreachableWithMemberStatus(m.status)) acc // removed else acc + m } } diff --git a/akka-cluster/src/main/scala/akka/cluster/Reachability.scala b/akka-cluster/src/main/scala/akka/cluster/Reachability.scala index c27e6b3710..9b8c39e336 100644 --- a/akka-cluster/src/main/scala/akka/cluster/Reachability.scala +++ b/akka-cluster/src/main/scala/akka/cluster/Reachability.scala @@ -3,6 +3,8 @@ */ package akka.cluster +import akka.annotation.InternalApi + import scala.collection.immutable import scala.collection.breakOut @@ -46,6 +48,7 @@ private[cluster] object Reachability { * - Reachable otherwise, i.e. no observer node considers it as Unreachable */ @SerialVersionUID(1L) +@InternalApi private[cluster] class Reachability private ( val records: immutable.IndexedSeq[Reachability.Record], val versions: Map[UniqueAddress, Long]) extends Serializable { @@ -205,8 +208,14 @@ private[cluster] class Reachability private ( else if (cache.allUnreachable(node)) Unreachable else Reachable + /** + * @return true if there is no observer that has marked node unreachable or terminated + */ def isReachable(node: UniqueAddress): Boolean = isAllReachable || !allUnreachableOrTerminated.contains(node) + /** + * @return true if there is no specific entry saying observer observed subject as unreachable + */ def isReachable(observer: UniqueAddress, subject: UniqueAddress): Boolean = status(observer, subject) == Reachable diff --git a/akka-cluster/src/main/scala/akka/cluster/protobuf/ClusterMessageSerializer.scala b/akka-cluster/src/main/scala/akka/cluster/protobuf/ClusterMessageSerializer.scala index 23c8834001..4896c706e3 100644 --- a/akka-cluster/src/main/scala/akka/cluster/protobuf/ClusterMessageSerializer.scala +++ b/akka-cluster/src/main/scala/akka/cluster/protobuf/ClusterMessageSerializer.scala @@ -244,7 +244,7 @@ class ClusterMessageSerializer(val system: ExtendedActorSystem) extends BaseSeri private def gossipToProto(gossip: Gossip): cm.Gossip.Builder = { val allMembers = gossip.members.toVector - val allAddresses: Vector[UniqueAddress] = allMembers.map(_.uniqueAddress) + val allAddresses: Vector[UniqueAddress] = allMembers.map(_.uniqueAddress) ++ gossip.tombstones.keys val addressMapping = allAddresses.zipWithIndex.toMap val allRoles = allMembers.foldLeft(Set.empty[String])((acc, m) ⇒ acc union m.roles).to[Vector] val roleMapping = allRoles.zipWithIndex.toMap @@ -271,6 +271,12 @@ class ClusterMessageSerializer(val system: ExtendedActorSystem) extends BaseSeri } } + def tombstoneToProto(t: (UniqueAddress, Long)): cm.Tombstone = + cm.Tombstone.newBuilder() + .setAddressIndex(mapUniqueAddress(t._1)) + .setTimestamp(t._2) + .build() + val reachability = reachabilityToProto(gossip.overview.reachability) val members = gossip.members.map(memberToProto) val seen = gossip.overview.seen.map(mapUniqueAddress) @@ -279,8 +285,12 @@ class ClusterMessageSerializer(val system: ExtendedActorSystem) extends BaseSeri addAllObserverReachability(reachability.map(_.build).asJava) cm.Gossip.newBuilder().addAllAllAddresses(allAddresses.map(uniqueAddressToProto(_).build).asJava). - addAllAllRoles(allRoles.asJava).addAllAllHashes(allHashes.asJava).addAllMembers(members.map(_.build).asJava). - setOverview(overview).setVersion(vectorClockToProto(gossip.version, hashMapping)) + addAllAllRoles(allRoles.asJava) + .addAllAllHashes(allHashes.asJava) + .addAllMembers(members.map(_.build).asJava) + .setOverview(overview) + .setVersion(vectorClockToProto(gossip.version, hashMapping)) + .addAllTombstones(gossip.tombstones.map(tombstoneToProto).asJava) } private def vectorClockToProto(version: VectorClock, hashMapping: Map[String, Int]): cm.VectorClock.Builder = { @@ -338,13 +348,17 @@ class ClusterMessageSerializer(val system: ExtendedActorSystem) extends BaseSeri new Member(addressMapping(member.getAddressIndex), member.getUpNumber, memberStatusFromInt(member.getStatus.getNumber), member.getRolesIndexesList.asScala.map(roleMapping(_))(breakOut)) + def tombstoneFromProto(tombstone: cm.Tombstone): (UniqueAddress, Long) = + (addressMapping(tombstone.getAddressIndex), tombstone.getTimestamp) + val members: immutable.SortedSet[Member] = gossip.getMembersList.asScala.map(memberFromProto)(breakOut) val reachability = reachabilityFromProto(gossip.getOverview.getObserverReachabilityList.asScala) val seen: Set[UniqueAddress] = gossip.getOverview.getSeenList.asScala.map(addressMapping(_))(breakOut) val overview = GossipOverview(seen, reachability) + val tombstones: Map[UniqueAddress, Long] = gossip.getTombstonesList.asScala.map(tombstoneFromProto)(breakOut) - Gossip(members, overview, vectorClockFromProto(gossip.getVersion, hashMapping)) + Gossip(members, overview, vectorClockFromProto(gossip.getVersion, hashMapping), tombstones) } private def vectorClockFromProto(version: cm.VectorClock, hashMapping: immutable.Seq[String]) = { diff --git a/akka-cluster/src/multi-jvm/scala/akka/cluster/MultiTeamClusterSpec.scala b/akka-cluster/src/multi-jvm/scala/akka/cluster/MultiTeamClusterSpec.scala new file mode 100644 index 0000000000..a1355f6ea9 --- /dev/null +++ b/akka-cluster/src/multi-jvm/scala/akka/cluster/MultiTeamClusterSpec.scala @@ -0,0 +1,153 @@ +/** + * Copyright (C) 2009-2017 Lightbend Inc. + */ +package akka.cluster + +import akka.cluster.MemberStatus.Up +import akka.remote.testkit.{ MultiNodeConfig, MultiNodeSpec } +import akka.remote.transport.ThrottlerTransportAdapter.Direction +import com.typesafe.config.ConfigFactory + +import scala.concurrent.duration._ + +object MultiTeamMultiJvmSpec extends MultiNodeConfig { + val first = role("first") + val second = role("second") + val third = role("third") + val fourth = role("fourth") + val fifth = role("fifth") + + commonConfig(MultiNodeClusterSpec.clusterConfig) + + nodeConfig(first, second)(ConfigFactory.parseString( + """ + akka.cluster.team = "dc1" + akka.loglevel = INFO + """)) + + nodeConfig(third, fourth, fifth)(ConfigFactory.parseString( + """ + akka.cluster.team = "dc2" + akka.loglevel = INFO + """)) + + testTransport(on = true) +} + +class MultiTeamMultiJvmNode1 extends MultiTeamSpec +class MultiTeamMultiJvmNode2 extends MultiTeamSpec +class MultiTeamMultiJvmNode3 extends MultiTeamSpec +class MultiTeamMultiJvmNode4 extends MultiTeamSpec +class MultiTeamMultiJvmNode5 extends MultiTeamSpec + +abstract class MultiTeamSpec + extends MultiNodeSpec(MultiTeamMultiJvmSpec) + with MultiNodeClusterSpec { + + import MultiTeamMultiJvmSpec._ + + "A cluster with multiple cluster teams" must { + "be able to form" in { + + runOn(first) { + cluster.join(first) + } + runOn(second, third, fourth) { + cluster.join(first) + } + enterBarrier("form-cluster-join-attempt") + + runOn(first, second, third, fourth) { + within(20.seconds) { + awaitAssert(clusterView.members.filter(_.status == MemberStatus.Up) should have size (4)) + } + } + + enterBarrier("cluster started") + } + + "have a leader per team" in { + runOn(first, second) { + cluster.settings.Team should ===("dc1") + clusterView.leader shouldBe defined + val dc1 = Set(address(first), address(second)) + dc1 should contain(clusterView.leader.get) + } + runOn(third, fourth) { + cluster.settings.Team should ===("dc2") + clusterView.leader shouldBe defined + val dc2 = Set(address(third), address(fourth)) + dc2 should contain(clusterView.leader.get) + } + + enterBarrier("leader per team") + } + + "be able to have team member changes while there is inter-team unreachability" in within(20.seconds) { + runOn(first) { + testConductor.blackhole(first, third, Direction.Both).await + } + runOn(first, second, third, fourth) { + awaitAssert(clusterView.unreachableMembers should not be empty) + } + enterBarrier("inter-team unreachability") + + runOn(fifth) { + cluster.join(third) + } + + // should be able to join and become up since the + // unreachable is between dc1 and dc2, + within(10.seconds) { + awaitAssert(clusterView.members.filter(_.status == MemberStatus.Up) should have size (5)) + } + + runOn(first) { + testConductor.passThrough(first, third, Direction.Both).await + } + runOn(first, second, third, fourth) { + awaitAssert(clusterView.unreachableMembers should not be empty) + } + enterBarrier("inter-team unreachability end") + } + + "be able to have team member changes while there is unreachability in another team" in within(20.seconds) { + runOn(first) { + testConductor.blackhole(first, second, Direction.Both).await + } + runOn(first, second, third, fourth) { + awaitAssert(clusterView.unreachableMembers should not be empty) + } + enterBarrier("other-team-internal-unreachable") + + runOn(third) { + cluster.join(fifth) + // should be able to join and leave + // since the unreachable nodes are inside of dc1 + cluster.leave(fourth) + + awaitAssert(clusterView.members.map(_.address) should not contain (address(fourth))) + awaitAssert(clusterView.members.collect { case m if m.status == Up ⇒ m.address } should contain(address(fifth))) + } + + enterBarrier("other-team-internal-unreachable changed") + + runOn(first) { + testConductor.passThrough(first, second, Direction.Both).await + } + enterBarrier("other-team-internal-unreachable end") + } + + "be able to down a member of another team" in within(20.seconds) { + runOn(fifth) { + cluster.down(address(second)) + } + + runOn(first, third, fifth) { + awaitAssert(clusterView.members.map(_.address) should not contain (address(second))) + } + enterBarrier("cross-team-downed") + } + + } +} diff --git a/akka-cluster/src/multi-jvm/scala/akka/cluster/MultiTeamSplitBrainSpec.scala b/akka-cluster/src/multi-jvm/scala/akka/cluster/MultiTeamSplitBrainSpec.scala new file mode 100644 index 0000000000..772f2de585 --- /dev/null +++ b/akka-cluster/src/multi-jvm/scala/akka/cluster/MultiTeamSplitBrainSpec.scala @@ -0,0 +1,141 @@ +/** + * Copyright (C) 2009-2017 Lightbend Inc. + */ +package akka.cluster + +import akka.remote.testkit.{ MultiNodeConfig, MultiNodeSpec } +import akka.remote.transport.ThrottlerTransportAdapter.Direction +import com.typesafe.config.ConfigFactory + +import scala.concurrent.duration._ + +object MultiTeamSplitBrainMultiJvmSpec extends MultiNodeConfig { + val first = role("first") + val second = role("second") + val third = role("third") + val fourth = role("fourth") + + commonConfig(MultiNodeClusterSpec.clusterConfig) + + nodeConfig(first, second)(ConfigFactory.parseString( + """ + akka.cluster.team = "dc1" + akka.loglevel = INFO + """)) + + nodeConfig(third, fourth)(ConfigFactory.parseString( + """ + akka.cluster.team = "dc2" + akka.loglevel = INFO + """)) + + testTransport(on = true) +} + +class MultiTeamSplitBrainMultiJvmNode1 extends MultiTeamSpec +class MultiTeamSplitBrainMultiJvmNode2 extends MultiTeamSpec +class MultiTeamSplitBrainMultiJvmNode3 extends MultiTeamSpec +class MultiTeamSplitBrainMultiJvmNode4 extends MultiTeamSpec +class MultiTeamSplitBrainMultiJvmNode5 extends MultiTeamSpec + +abstract class MultiTeamSplitBrainSpec + extends MultiNodeSpec(MultiTeamSplitBrainMultiJvmSpec) + with MultiNodeClusterSpec { + + import MultiTeamSplitBrainMultiJvmSpec._ + + val dc1 = List(first, second) + val dc2 = List(third, fourth) + + def splitTeams(): Unit = { + runOn(first) { + for { + dc1Node ← dc1 + dc2Node ← dc2 + } { + testConductor.blackhole(dc1Node, dc2Node, Direction.Both).await + } + } + + runOn(dc1: _*) { + awaitAssert(clusterView.unreachableMembers.map(_.address) should ===(dc2.map(address))) + } + runOn(dc2: _*) { + awaitAssert(clusterView.unreachableMembers.map(_.address) should ===(dc1.map(address))) + } + + } + + def unsplitTeams(): Unit = { + runOn(first) { + for { + dc1Node ← dc1 + dc2Node ← dc2 + } { + testConductor.passThrough(dc1Node, dc2Node, Direction.Both).await + } + } + + awaitAllReachable() + } + + "A cluster with multiple cluster teams" must { + "be able to form two teams" in { + awaitClusterUp(first, second, third) + } + + "be able to have a team member join while there is inter-team split" in within(20.seconds) { + // introduce a split between teams + splitTeams() + enterBarrier("team-split-1") + + runOn(fourth) { + cluster.join(third) + } + enterBarrier("inter-team unreachability") + + // should be able to join and become up since the + // split is between dc1 and dc2 + runOn(third, fourth) { + awaitAssert(clusterView.members.collect { + case m if m.team == "dc2" && m.status == MemberStatus.Up ⇒ m.address + }) should ===(Set(address(third), address(fourth))) + } + enterBarrier("dc2-join-completed") + + unsplitTeams() + enterBarrier("team-unsplit-1") + + runOn(dc1: _*) { + awaitAssert(clusterView.members.collect { + case m if m.team == "dc2" && m.status == MemberStatus.Up ⇒ m.address + }) should ===(Set(address(third), address(fourth))) + } + + enterBarrier("inter-team-split-1-done") + } + + "be able to have team member leave while there is inter-team split" in within(20.seconds) { + splitTeams() + enterBarrier("team-split-2") + + runOn(fourth) { + cluster.leave(third) + } + + runOn(third, fourth) { + awaitAssert(clusterView.members.filter(_.address == address(fourth)) should ===(Set.empty)) + } + enterBarrier("node-4-left") + + unsplitTeams() + enterBarrier("team-unsplit-2") + + runOn(first, second) { + awaitAssert(clusterView.members.filter(_.address == address(fourth)) should ===(Set.empty)) + } + enterBarrier("inter-team-split-2-done") + } + + } +} diff --git a/akka-cluster/src/test/scala/akka/cluster/ClusterDomainEventPublisherSpec.scala b/akka-cluster/src/test/scala/akka/cluster/ClusterDomainEventPublisherSpec.scala index e9d1897fce..1d78f51a85 100644 --- a/akka-cluster/src/test/scala/akka/cluster/ClusterDomainEventPublisherSpec.scala +++ b/akka-cluster/src/test/scala/akka/cluster/ClusterDomainEventPublisherSpec.scala @@ -131,7 +131,10 @@ class ClusterDomainEventPublisherSpec extends AkkaSpec(ClusterDomainEventPublish publisher ! Subscribe(subscriber.ref, InitialStateAsSnapshot, Set(classOf[RoleLeaderChanged])) subscriber.expectMsgType[CurrentClusterState] publisher ! PublishChanges(Gossip(members = SortedSet(cJoining, dUp))) - subscriber.expectMsg(RoleLeaderChanged("GRP", Some(dUp.address))) + subscriber.expectMsgAllOf( + RoleLeaderChanged("GRP", Some(dUp.address)), + RoleLeaderChanged(ClusterSettings.TeamRolePrefix + ClusterSettings.DefaultTeam, Some(dUp.address)) + ) publisher ! PublishChanges(Gossip(members = SortedSet(cUp, dUp))) subscriber.expectMsg(RoleLeaderChanged("GRP", Some(cUp.address))) } diff --git a/akka-cluster/src/test/scala/akka/cluster/ClusterDomainEventSpec.scala b/akka-cluster/src/test/scala/akka/cluster/ClusterDomainEventSpec.scala index e474340b81..f3c5f54ab6 100644 --- a/akka-cluster/src/test/scala/akka/cluster/ClusterDomainEventSpec.scala +++ b/akka-cluster/src/test/scala/akka/cluster/ClusterDomainEventSpec.scala @@ -52,7 +52,7 @@ class ClusterDomainEventSpec extends WordSpec with Matchers { diffMemberEvents(g1, g2) should ===(Seq(MemberUp(bUp), MemberJoined(eJoining))) diffUnreachable(g1, g2, selfDummyAddress) should ===(Seq.empty) - diffSeen(g1, g2, selfDummyAddress) should ===(Seq(SeenChanged(convergence = true, seenBy = s2.map(_.address)))) + diffSeen(ClusterSettings.DefaultTeam, g1, g2, selfDummyAddress) should ===(Seq(SeenChanged(convergence = true, seenBy = s2.map(_.address)))) } "be produced for changed status of members" in { @@ -61,7 +61,7 @@ class ClusterDomainEventSpec extends WordSpec with Matchers { diffMemberEvents(g1, g2) should ===(Seq(MemberUp(aUp), MemberLeft(cLeaving), MemberJoined(eJoining))) diffUnreachable(g1, g2, selfDummyAddress) should ===(Seq.empty) - diffSeen(g1, g2, selfDummyAddress) should ===(Seq(SeenChanged(convergence = true, seenBy = s2.map(_.address)))) + diffSeen(ClusterSettings.DefaultTeam, g1, g2, selfDummyAddress) should ===(Seq(SeenChanged(convergence = true, seenBy = s2.map(_.address)))) } "be produced for members in unreachable" in { @@ -76,7 +76,7 @@ class ClusterDomainEventSpec extends WordSpec with Matchers { diffUnreachable(g1, g2, selfDummyAddress) should ===(Seq(UnreachableMember(bDown))) // never include self member in unreachable diffUnreachable(g1, g2, bDown.uniqueAddress) should ===(Seq()) - diffSeen(g1, g2, selfDummyAddress) should ===(Seq.empty) + diffSeen(ClusterSettings.DefaultTeam, g1, g2, selfDummyAddress) should ===(Seq.empty) } "be produced for members becoming reachable after unreachable" in { @@ -104,7 +104,7 @@ class ClusterDomainEventSpec extends WordSpec with Matchers { diffMemberEvents(g1, g2) should ===(Seq(MemberRemoved(dRemoved, Exiting))) diffUnreachable(g1, g2, selfDummyAddress) should ===(Seq.empty) - diffSeen(g1, g2, selfDummyAddress) should ===(Seq(SeenChanged(convergence = true, seenBy = s2.map(_.address)))) + diffSeen(ClusterSettings.DefaultTeam, g1, g2, selfDummyAddress) should ===(Seq(SeenChanged(convergence = true, seenBy = s2.map(_.address)))) } "be produced for convergence changes" in { @@ -113,10 +113,10 @@ class ClusterDomainEventSpec extends WordSpec with Matchers { diffMemberEvents(g1, g2) should ===(Seq.empty) diffUnreachable(g1, g2, selfDummyAddress) should ===(Seq.empty) - diffSeen(g1, g2, selfDummyAddress) should ===(Seq(SeenChanged(convergence = true, seenBy = Set(aUp.address, bUp.address)))) + diffSeen(ClusterSettings.DefaultTeam, g1, g2, selfDummyAddress) should ===(Seq(SeenChanged(convergence = true, seenBy = Set(aUp.address, bUp.address)))) diffMemberEvents(g2, g1) should ===(Seq.empty) diffUnreachable(g2, g1, selfDummyAddress) should ===(Seq.empty) - diffSeen(g2, g1, selfDummyAddress) should ===(Seq(SeenChanged(convergence = true, seenBy = Set(aUp.address, bUp.address, eJoining.address)))) + diffSeen(ClusterSettings.DefaultTeam, g2, g1, selfDummyAddress) should ===(Seq(SeenChanged(convergence = true, seenBy = Set(aUp.address, bUp.address, eJoining.address)))) } "be produced for leader changes" in { @@ -125,27 +125,38 @@ class ClusterDomainEventSpec extends WordSpec with Matchers { diffMemberEvents(g1, g2) should ===(Seq(MemberRemoved(aRemoved, Up))) diffUnreachable(g1, g2, selfDummyAddress) should ===(Seq.empty) - diffSeen(g1, g2, selfDummyAddress) should ===(Seq(SeenChanged(convergence = true, seenBy = s2.map(_.address)))) - diffLeader(g1, g2, selfDummyAddress) should ===(Seq(LeaderChanged(Some(bUp.address)))) + diffSeen(ClusterSettings.DefaultTeam, g1, g2, selfDummyAddress) should ===(Seq(SeenChanged(convergence = true, seenBy = s2.map(_.address)))) + diffLeader(ClusterSettings.DefaultTeam, g1, g2, selfDummyAddress) should ===(Seq(LeaderChanged(Some(bUp.address)))) } - "be produced for role leader changes" in { + "be produced for role leader changes in the same team" in { val g0 = Gossip.empty val g1 = Gossip(members = SortedSet(aUp, bUp, cUp, dLeaving, eJoining)) val g2 = Gossip(members = SortedSet(bUp, cUp, dExiting, eJoining)) - diffRolesLeader(g0, g1, selfDummyAddress) should ===( + diffRolesLeader(ClusterSettings.DefaultTeam, g0, g1, selfDummyAddress) should ===( Set( + // since this role is implicitly added + RoleLeaderChanged(ClusterSettings.TeamRolePrefix + ClusterSettings.DefaultTeam, Some(aUp.address)), RoleLeaderChanged("AA", Some(aUp.address)), RoleLeaderChanged("AB", Some(aUp.address)), RoleLeaderChanged("BB", Some(bUp.address)), RoleLeaderChanged("DD", Some(dLeaving.address)), RoleLeaderChanged("DE", Some(dLeaving.address)), RoleLeaderChanged("EE", Some(eUp.address)))) - diffRolesLeader(g1, g2, selfDummyAddress) should ===( + diffRolesLeader(ClusterSettings.DefaultTeam, g1, g2, selfDummyAddress) should ===( Set( + RoleLeaderChanged(ClusterSettings.TeamRolePrefix + ClusterSettings.DefaultTeam, Some(bUp.address)), RoleLeaderChanged("AA", None), RoleLeaderChanged("AB", Some(bUp.address)), RoleLeaderChanged("DE", Some(eJoining.address)))) } + + "not be produced for role leader changes in other teams" in { + val g0 = Gossip.empty + val g1 = Gossip(members = SortedSet(aUp, bUp, cUp, dLeaving, eJoining)) + val g2 = Gossip(members = SortedSet(bUp, cUp, dExiting, eJoining)) + diffRolesLeader("dc2", g0, g1, selfDummyAddress) should ===(Set.empty) + diffRolesLeader("dc2", g1, g2, selfDummyAddress) should ===(Set.empty) + } } } diff --git a/akka-cluster/src/test/scala/akka/cluster/GossipSpec.scala b/akka-cluster/src/test/scala/akka/cluster/GossipSpec.scala index 956814d9df..810555adfc 100644 --- a/akka-cluster/src/test/scala/akka/cluster/GossipSpec.scala +++ b/akka-cluster/src/test/scala/akka/cluster/GossipSpec.scala @@ -7,6 +7,8 @@ package akka.cluster import org.scalatest.WordSpec import org.scalatest.Matchers import akka.actor.Address +import akka.cluster.ClusterSettings.DefaultTeam + import scala.collection.immutable.SortedSet class GossipSpec extends WordSpec with Matchers { @@ -25,43 +27,55 @@ class GossipSpec extends WordSpec with Matchers { val e2 = TestMember(e1.address, Up) val e3 = TestMember(e1.address, Down) + val dc1a1 = TestMember(Address("akka.tcp", "sys", "a", 2552), Up, Set.empty, team = "dc1") + val dc1b1 = TestMember(Address("akka.tcp", "sys", "b", 2552), Up, Set.empty, team = "dc1") + val dc2c1 = TestMember(Address("akka.tcp", "sys", "c", 2552), Up, Set.empty, team = "dc2") + val dc2d1 = TestMember(Address("akka.tcp", "sys", "d", 2552), Up, Set.empty, team = "dc2") + val dc2d2 = TestMember(dc2d1.address, status = Down, roles = Set.empty, team = dc2d1.team) + "A Gossip" must { + "have correct test setup" in { + List(a1, a2, b1, b2, c1, c2, c3, d1, e1, e2, e3).foreach(m ⇒ + m.team should ===(DefaultTeam) + ) + } + "reach convergence when it's empty" in { - Gossip.empty.convergence(a1.uniqueAddress, Set.empty) should ===(true) + Gossip.empty.convergence(DefaultTeam, a1.uniqueAddress, Set.empty) should ===(true) } "reach convergence for one node" in { val g1 = Gossip(members = SortedSet(a1)).seen(a1.uniqueAddress) - g1.convergence(a1.uniqueAddress, Set.empty) should ===(true) + g1.convergence(DefaultTeam, a1.uniqueAddress, Set.empty) should ===(true) } "not reach convergence until all have seen version" in { val g1 = Gossip(members = SortedSet(a1, b1)).seen(a1.uniqueAddress) - g1.convergence(a1.uniqueAddress, Set.empty) should ===(false) + g1.convergence(DefaultTeam, a1.uniqueAddress, Set.empty) should ===(false) } "reach convergence for two nodes" in { val g1 = Gossip(members = SortedSet(a1, b1)).seen(a1.uniqueAddress).seen(b1.uniqueAddress) - g1.convergence(a1.uniqueAddress, Set.empty) should ===(true) + g1.convergence(DefaultTeam, a1.uniqueAddress, Set.empty) should ===(true) } "reach convergence, skipping joining" in { // e1 is joining val g1 = Gossip(members = SortedSet(a1, b1, e1)).seen(a1.uniqueAddress).seen(b1.uniqueAddress) - g1.convergence(a1.uniqueAddress, Set.empty) should ===(true) + g1.convergence(DefaultTeam, a1.uniqueAddress, Set.empty) should ===(true) } "reach convergence, skipping down" in { // e3 is down val g1 = Gossip(members = SortedSet(a1, b1, e3)).seen(a1.uniqueAddress).seen(b1.uniqueAddress) - g1.convergence(a1.uniqueAddress, Set.empty) should ===(true) + g1.convergence(DefaultTeam, a1.uniqueAddress, Set.empty) should ===(true) } "reach convergence, skipping Leaving with exitingConfirmed" in { // c1 is Leaving val g1 = Gossip(members = SortedSet(a1, b1, c1)).seen(a1.uniqueAddress).seen(b1.uniqueAddress) - g1.convergence(a1.uniqueAddress, Set(c1.uniqueAddress)) should ===(true) + g1.convergence(DefaultTeam, a1.uniqueAddress, Set(c1.uniqueAddress)) should ===(true) } "reach convergence, skipping unreachable Leaving with exitingConfirmed" in { @@ -69,16 +83,16 @@ class GossipSpec extends WordSpec with Matchers { val r1 = Reachability.empty.unreachable(b1.uniqueAddress, c1.uniqueAddress) val g1 = Gossip(members = SortedSet(a1, b1, c1), overview = GossipOverview(reachability = r1)) .seen(a1.uniqueAddress).seen(b1.uniqueAddress) - g1.convergence(a1.uniqueAddress, Set(c1.uniqueAddress)) should ===(true) + g1.convergence(DefaultTeam, a1.uniqueAddress, Set(c1.uniqueAddress)) should ===(true) } "not reach convergence when unreachable" in { val r1 = Reachability.empty.unreachable(b1.uniqueAddress, a1.uniqueAddress) val g1 = (Gossip(members = SortedSet(a1, b1), overview = GossipOverview(reachability = r1))) .seen(a1.uniqueAddress).seen(b1.uniqueAddress) - g1.convergence(b1.uniqueAddress, Set.empty) should ===(false) + g1.convergence(DefaultTeam, b1.uniqueAddress, Set.empty) should ===(false) // but from a1's point of view (it knows that itself is not unreachable) - g1.convergence(a1.uniqueAddress, Set.empty) should ===(true) + g1.convergence(DefaultTeam, a1.uniqueAddress, Set.empty) should ===(true) } "reach convergence when downed node has observed unreachable" in { @@ -86,7 +100,7 @@ class GossipSpec extends WordSpec with Matchers { val r1 = Reachability.empty.unreachable(e3.uniqueAddress, a1.uniqueAddress) val g1 = (Gossip(members = SortedSet(a1, b1, e3), overview = GossipOverview(reachability = r1))) .seen(a1.uniqueAddress).seen(b1.uniqueAddress).seen(e3.uniqueAddress) - g1.convergence(b1.uniqueAddress, Set.empty) should ===(true) + g1.convergence(DefaultTeam, b1.uniqueAddress, Set.empty) should ===(true) } "merge members by status priority" in { @@ -133,21 +147,37 @@ class GossipSpec extends WordSpec with Matchers { } "have leader as first member based on ordering, except Exiting status" in { - Gossip(members = SortedSet(c2, e2)).leader(c2.uniqueAddress) should ===(Some(c2.uniqueAddress)) - Gossip(members = SortedSet(c3, e2)).leader(c3.uniqueAddress) should ===(Some(e2.uniqueAddress)) - Gossip(members = SortedSet(c3)).leader(c3.uniqueAddress) should ===(Some(c3.uniqueAddress)) + Gossip(members = SortedSet(c2, e2)).teamLeader(DefaultTeam, c2.uniqueAddress) should ===(Some(c2.uniqueAddress)) + Gossip(members = SortedSet(c3, e2)).teamLeader(DefaultTeam, c3.uniqueAddress) should ===(Some(e2.uniqueAddress)) + Gossip(members = SortedSet(c3)).teamLeader(DefaultTeam, c3.uniqueAddress) should ===(Some(c3.uniqueAddress)) } "have leader as first reachable member based on ordering" in { val r1 = Reachability.empty.unreachable(e2.uniqueAddress, c2.uniqueAddress) val g1 = Gossip(members = SortedSet(c2, e2), overview = GossipOverview(reachability = r1)) - g1.leader(e2.uniqueAddress) should ===(Some(e2.uniqueAddress)) + g1.teamLeader(DefaultTeam, e2.uniqueAddress) should ===(Some(e2.uniqueAddress)) // but when c2 is selfUniqueAddress - g1.leader(c2.uniqueAddress) should ===(Some(c2.uniqueAddress)) + g1.teamLeader(DefaultTeam, c2.uniqueAddress) should ===(Some(c2.uniqueAddress)) } "not have Down member as leader" in { - Gossip(members = SortedSet(e3)).leader(e3.uniqueAddress) should ===(None) + Gossip(members = SortedSet(e3)).teamLeader(DefaultTeam, e3.uniqueAddress) should ===(None) + } + + "have a leader per team" in { + val g1 = Gossip(members = SortedSet(dc1a1, dc1b1, dc2c1, dc2d1)) + + // everybodys point of view is dc1a1 being leader of dc1 + g1.teamLeader("dc1", dc1a1.uniqueAddress) should ===(Some(dc1a1.uniqueAddress)) + g1.teamLeader("dc1", dc1b1.uniqueAddress) should ===(Some(dc1a1.uniqueAddress)) + g1.teamLeader("dc1", dc2c1.uniqueAddress) should ===(Some(dc1a1.uniqueAddress)) + g1.teamLeader("dc1", dc2d1.uniqueAddress) should ===(Some(dc1a1.uniqueAddress)) + + // and dc2c1 being leader of dc2 + g1.teamLeader("dc2", dc1a1.uniqueAddress) should ===(Some(dc2c1.uniqueAddress)) + g1.teamLeader("dc2", dc1b1.uniqueAddress) should ===(Some(dc2c1.uniqueAddress)) + g1.teamLeader("dc2", dc2c1.uniqueAddress) should ===(Some(dc2c1.uniqueAddress)) + g1.teamLeader("dc2", dc2d1.uniqueAddress) should ===(Some(dc2c1.uniqueAddress)) } "merge seen table correctly" in { @@ -182,5 +212,217 @@ class GossipSpec extends WordSpec with Matchers { val g3 = Gossip(members = SortedSet(a2, b1.copyUp(3), e2.copyUp(4))) g3.youngestMember should ===(e2) } + + "reach convergence per team" in { + val g = Gossip(members = SortedSet(dc1a1, dc1b1, dc2c1, dc2d1)) + .seen(dc1a1.uniqueAddress) + .seen(dc1b1.uniqueAddress) + .seen(dc2c1.uniqueAddress) + .seen(dc2d1.uniqueAddress) + g.teamLeader("dc1", dc1a1.uniqueAddress) should ===(Some(dc1a1.uniqueAddress)) + g.convergence("dc1", dc1a1.uniqueAddress, Set.empty) should ===(true) + + g.teamLeader("dc2", dc2c1.uniqueAddress) should ===(Some(dc2c1.uniqueAddress)) + g.convergence("dc2", dc2c1.uniqueAddress, Set.empty) should ===(true) + } + + "reach convergence per team even if members of another team has not seen the gossip" in { + val g = Gossip(members = SortedSet(dc1a1, dc1b1, dc2c1, dc2d1)) + .seen(dc1a1.uniqueAddress) + .seen(dc1b1.uniqueAddress) + .seen(dc2c1.uniqueAddress) + // dc2d1 has not seen the gossip + + // so dc1 can reach convergence + g.teamLeader("dc1", dc1a1.uniqueAddress) should ===(Some(dc1a1.uniqueAddress)) + g.convergence("dc1", dc1a1.uniqueAddress, Set.empty) should ===(true) + + // but dc2 cannot + g.teamLeader("dc2", dc2c1.uniqueAddress) should ===(Some(dc2c1.uniqueAddress)) + g.convergence("dc2", dc2c1.uniqueAddress, Set.empty) should ===(false) + } + + "reach convergence per team even if another team contains unreachable" in { + val r1 = Reachability.empty.unreachable(dc2c1.uniqueAddress, dc2d1.uniqueAddress) + + val g = Gossip(members = SortedSet(dc1a1, dc1b1, dc2c1, dc2d1), overview = GossipOverview(reachability = r1)) + .seen(dc1a1.uniqueAddress) + .seen(dc1b1.uniqueAddress) + .seen(dc2c1.uniqueAddress) + .seen(dc2d1.uniqueAddress) + + // this team doesn't care about dc2 having reachability problems and can reach convergence + g.teamLeader("dc1", dc1a1.uniqueAddress) should ===(Some(dc1a1.uniqueAddress)) + g.convergence("dc1", dc1a1.uniqueAddress, Set.empty) should ===(true) + + // this team is cannot reach convergence because of unreachability within the team + g.teamLeader("dc2", dc2c1.uniqueAddress) should ===(Some(dc2c1.uniqueAddress)) + g.convergence("dc2", dc2c1.uniqueAddress, Set.empty) should ===(false) + } + + "reach convergence per team even if there is unreachable nodes in another team" in { + val r1 = Reachability.empty + .unreachable(dc1a1.uniqueAddress, dc2d1.uniqueAddress) + .unreachable(dc2d1.uniqueAddress, dc1a1.uniqueAddress) + + val g = Gossip(members = SortedSet(dc1a1, dc1b1, dc2c1, dc2d1), overview = GossipOverview(reachability = r1)) + .seen(dc1a1.uniqueAddress) + .seen(dc1b1.uniqueAddress) + .seen(dc2c1.uniqueAddress) + .seen(dc2d1.uniqueAddress) + + // neither team is affected by the inter-team unreachability as far as convergence goes + g.teamLeader("dc1", dc1a1.uniqueAddress) should ===(Some(dc1a1.uniqueAddress)) + g.convergence("dc1", dc1a1.uniqueAddress, Set.empty) should ===(true) + + g.teamLeader("dc2", dc2c1.uniqueAddress) should ===(Some(dc2c1.uniqueAddress)) + g.convergence("dc2", dc2c1.uniqueAddress, Set.empty) should ===(true) + } + + "ignore cross team unreachability when determining inside of team reachability" in { + val r1 = Reachability.empty + .unreachable(dc1a1.uniqueAddress, dc2c1.uniqueAddress) + .unreachable(dc2c1.uniqueAddress, dc1a1.uniqueAddress) + + val g = Gossip(members = SortedSet(dc1a1, dc1b1, dc2c1, dc2d1), overview = GossipOverview(reachability = r1)) + + // inside of the teams we don't care about the cross team unreachability + g.isReachable(dc1a1.uniqueAddress, dc1b1.uniqueAddress) should ===(true) + g.isReachable(dc1b1.uniqueAddress, dc1a1.uniqueAddress) should ===(true) + g.isReachable(dc2c1.uniqueAddress, dc2d1.uniqueAddress) should ===(true) + g.isReachable(dc2d1.uniqueAddress, dc2c1.uniqueAddress) should ===(true) + + g.isReachableExcludingDownedObservers(dc1a1.team, dc1b1.uniqueAddress) should ===(true) + g.isReachableExcludingDownedObservers(dc1b1.team, dc1a1.uniqueAddress) should ===(true) + g.isReachableExcludingDownedObservers(dc2c1.team, dc2d1.uniqueAddress) should ===(true) + g.isReachableExcludingDownedObservers(dc2d1.team, dc2c1.uniqueAddress) should ===(true) + + // between teams it matters though + g.isReachable(dc1a1.uniqueAddress, dc2c1.uniqueAddress) should ===(false) + g.isReachable(dc2c1.uniqueAddress, dc1a1.uniqueAddress) should ===(false) + // this isReachable method only says false for specific unreachable entries between the nodes + g.isReachable(dc1b1.uniqueAddress, dc2c1.uniqueAddress) should ===(true) + g.isReachable(dc2d1.uniqueAddress, dc1a1.uniqueAddress) should ===(true) + + // this one looks at all unreachable-entries for the to-address + g.isReachableExcludingDownedObservers(dc1a1.team, dc2c1.uniqueAddress) should ===(false) + g.isReachableExcludingDownedObservers(dc1b1.team, dc2c1.uniqueAddress) should ===(false) + g.isReachableExcludingDownedObservers(dc2c1.team, dc1a1.uniqueAddress) should ===(false) + g.isReachableExcludingDownedObservers(dc2d1.team, dc1a1.uniqueAddress) should ===(false) + + // between the two other nodes there is no unreachability + g.isReachable(dc1b1.uniqueAddress, dc2d1.uniqueAddress) should ===(true) + g.isReachable(dc2d1.uniqueAddress, dc1b1.uniqueAddress) should ===(true) + + g.isReachableExcludingDownedObservers(dc1b1.team, dc2d1.uniqueAddress) should ===(true) + g.isReachableExcludingDownedObservers(dc2d1.team, dc1b1.uniqueAddress) should ===(true) + } + + "not returning a downed team leader" in { + val g = Gossip(members = SortedSet(dc1a1.copy(Down), dc1b1)) + g.leaderOf("dc1", g.members, dc1b1.uniqueAddress) should ===(Some(dc1b1.uniqueAddress)) + } + + "ignore cross team unreachability when determining team leader" in { + val r1 = Reachability.empty + .unreachable(dc1a1.uniqueAddress, dc2d1.uniqueAddress) + .unreachable(dc2d1.uniqueAddress, dc1a1.uniqueAddress) + + val g = Gossip(members = SortedSet(dc1a1, dc1b1, dc2c1, dc2d1), overview = GossipOverview(reachability = r1)) + + g.leaderOf("dc1", g.members, dc1a1.uniqueAddress) should ===(Some(dc1a1.uniqueAddress)) + g.leaderOf("dc1", g.members, dc1b1.uniqueAddress) should ===(Some(dc1a1.uniqueAddress)) + g.leaderOf("dc1", g.members, dc2c1.uniqueAddress) should ===(Some(dc1a1.uniqueAddress)) + g.leaderOf("dc1", g.members, dc2d1.uniqueAddress) should ===(Some(dc1a1.uniqueAddress)) + + g.leaderOf("dc2", g.members, dc1a1.uniqueAddress) should ===(Some(dc2c1.uniqueAddress)) + g.leaderOf("dc2", g.members, dc1b1.uniqueAddress) should ===(Some(dc2c1.uniqueAddress)) + g.leaderOf("dc2", g.members, dc2c1.uniqueAddress) should ===(Some(dc2c1.uniqueAddress)) + g.leaderOf("dc2", g.members, dc2d1.uniqueAddress) should ===(Some(dc2c1.uniqueAddress)) + } + + // TODO test coverage for when leaderOf returns None - I have not been able to figure it out + + "clear out a bunch of stuff when removing a node" in { + val g = Gossip(members = SortedSet(dc1a1, dc1b1, dc2d2)) + .remove(dc1b1.uniqueAddress, System.currentTimeMillis()) + + g.seenBy should not contain (dc1b1.uniqueAddress) + g.overview.reachability.records.exists(_.observer == dc1b1.uniqueAddress) should be(false) + g.overview.reachability.records.exists(_.subject == dc1b1.uniqueAddress) should be(false) + g.version.versions should have size (0) + + // sort order should be kept + g.members.toList should ===(List(dc1a1, dc2d2)) + } + + "not reintroduce members from out-of-team gossip when merging" in { + // dc1 does not know about any unreachability nor that the node has been downed + val gdc1 = Gossip(members = SortedSet(dc1a1, dc1b1, dc2c1, dc2d1)) + + // dc2 has downed the dc2d1 node, seen it as unreachable and removed it + val gdc2 = Gossip(members = SortedSet(dc1a1, dc1b1, dc2c1, dc2d1)) + .remove(dc2d1.uniqueAddress, System.currentTimeMillis()) + + gdc2.tombstones.keys should contain(dc2d1.uniqueAddress) + gdc2.members should not contain (dc2d1) + gdc2.overview.reachability.records.filter(r ⇒ r.subject == dc2d1.uniqueAddress || r.observer == dc2d1.uniqueAddress) should be(empty) + gdc2.overview.reachability.versions.keys should not contain (dc2d1.uniqueAddress) + + // when we merge the two, it should not be reintroduced + val merged1 = gdc2 merge gdc1 + merged1.members should ===(SortedSet(dc1a1, dc1b1, dc2c1)) + + merged1.tombstones.keys should contain(dc2d1.uniqueAddress) + merged1.members should not contain (dc2d1) + merged1.overview.reachability.records.filter(r ⇒ r.subject == dc2d1.uniqueAddress || r.observer == dc2d1.uniqueAddress) should be(empty) + merged1.overview.reachability.versions.keys should not contain (dc2d1.uniqueAddress) + + } + + "prune old tombstones" in { + val timestamp = 352684800 + val g = Gossip(members = SortedSet(dc1a1, dc1b1)) + .remove(dc1b1.uniqueAddress, timestamp) + + g.tombstones.keys should contain(dc1b1.uniqueAddress) + + val pruned = g.pruneTombstones(timestamp + 1) + + // when we merge the two, it should not be reintroduced + pruned.tombstones.keys should not contain (dc1b1.uniqueAddress) + } + + "mark a node as down" in { + val g = Gossip(members = SortedSet(dc1a1, dc1b1)) + .seen(dc1a1.uniqueAddress) + .seen(dc1b1.uniqueAddress) + .markAsDown(dc1b1) + + g.member(dc1b1.uniqueAddress).status should ===(MemberStatus.Down) + g.overview.seen should not contain (dc1b1.uniqueAddress) + + // obviously the other member should be unaffected + g.member(dc1a1.uniqueAddress).status should ===(dc1a1.status) + g.overview.seen should contain(dc1a1.uniqueAddress) + } + + "update members" in { + val joining = TestMember(Address("akka.tcp", "sys", "d", 2552), Joining, Set.empty, team = "dc2") + val g = Gossip(members = SortedSet(dc1a1, joining)) + + g.member(joining.uniqueAddress).status should ===(Joining) + val oldMembers = g.members + + val updated = g.update(SortedSet(joining.copy(status = Up))) + + updated.member(joining.uniqueAddress).status should ===(Up) + + // obviously the other member should be unaffected + updated.member(dc1a1.uniqueAddress).status should ===(dc1a1.status) + + // order should be kept + updated.members.toList.map(_.uniqueAddress) should ===(List(dc1a1.uniqueAddress, joining.uniqueAddress)) + } } } diff --git a/akka-cluster/src/test/scala/akka/cluster/TestMember.scala b/akka-cluster/src/test/scala/akka/cluster/TestMember.scala index ef5299fa8a..028a727f33 100644 --- a/akka-cluster/src/test/scala/akka/cluster/TestMember.scala +++ b/akka-cluster/src/test/scala/akka/cluster/TestMember.scala @@ -9,6 +9,6 @@ object TestMember { def apply(address: Address, status: MemberStatus): Member = apply(address, status, Set.empty) - def apply(address: Address, status: MemberStatus, roles: Set[String]): Member = - new Member(UniqueAddress(address, 0L), Int.MaxValue, status, roles) + def apply(address: Address, status: MemberStatus, roles: Set[String], team: ClusterSettings.Team = ClusterSettings.DefaultTeam): Member = + new Member(UniqueAddress(address, 0L), Int.MaxValue, status, roles + (ClusterSettings.TeamRolePrefix + team)) } diff --git a/akka-cluster/src/test/scala/akka/cluster/protobuf/ClusterMessageSerializerSpec.scala b/akka-cluster/src/test/scala/akka/cluster/protobuf/ClusterMessageSerializerSpec.scala index 4bc6c998f0..db327a7fbc 100644 --- a/akka-cluster/src/test/scala/akka/cluster/protobuf/ClusterMessageSerializerSpec.scala +++ b/akka-cluster/src/test/scala/akka/cluster/protobuf/ClusterMessageSerializerSpec.scala @@ -21,7 +21,7 @@ class ClusterMessageSerializerSpec extends AkkaSpec( val ref = serializer.fromBinary(blob, obj.getClass) obj match { case env: GossipEnvelope ⇒ - val env2 = obj.asInstanceOf[GossipEnvelope] + val env2 = ref.asInstanceOf[GossipEnvelope] env2.from should ===(env.from) env2.to should ===(env.to) env2.gossip should ===(env.gossip) @@ -65,9 +65,11 @@ class ClusterMessageSerializerSpec extends AkkaSpec( val g2 = (g1 :+ node3 :+ node4).seen(a1.uniqueAddress).seen(c1.uniqueAddress) val reachability3 = Reachability.empty.unreachable(a1.uniqueAddress, e1.uniqueAddress).unreachable(b1.uniqueAddress, e1.uniqueAddress) val g3 = g2.copy(members = SortedSet(a1, b1, c1, d1, e1), overview = g2.overview.copy(reachability = reachability3)) + val g4 = g1.remove(d1.uniqueAddress, 352684800) checkSerialization(GossipEnvelope(a1.uniqueAddress, uniqueAddress2, g1)) checkSerialization(GossipEnvelope(a1.uniqueAddress, uniqueAddress2, g2)) checkSerialization(GossipEnvelope(a1.uniqueAddress, uniqueAddress2, g3)) + checkSerialization(GossipEnvelope(a1.uniqueAddress, uniqueAddress2, g4)) checkSerialization(GossipStatus(a1.uniqueAddress, g1.version)) checkSerialization(GossipStatus(a1.uniqueAddress, g2.version)) diff --git a/project/MiMa.scala b/project/MiMa.scala index 6c15c4a27c..8eb9783025 100644 --- a/project/MiMa.scala +++ b/project/MiMa.scala @@ -1229,7 +1229,28 @@ object MiMa extends AutoPlugin { // #23023 added a new overload with implementation to trait, so old transport implementations compiled against // older versions will be missing the method. We accept that incompatibility for now. - ProblemFilters.exclude[ReversedMissingMethodProblem]("akka.remote.transport.AssociationHandle.disassociate") + ProblemFilters.exclude[ReversedMissingMethodProblem]("akka.remote.transport.AssociationHandle.disassociate"), + + // #23228 single leader per cluster team + ProblemFilters.exclude[DirectMissingMethodProblem]("akka.cluster.Gossip.apply"), + ProblemFilters.exclude[DirectMissingMethodProblem]("akka.cluster.Gossip.copy"), + ProblemFilters.exclude[DirectMissingMethodProblem]("akka.cluster.Gossip.this"), + ProblemFilters.exclude[DirectMissingMethodProblem]("akka.cluster.Gossip.convergence"), + ProblemFilters.exclude[DirectMissingMethodProblem]("akka.cluster.Gossip.isLeader"), + ProblemFilters.exclude[DirectMissingMethodProblem]("akka.cluster.Gossip.leader"), + ProblemFilters.exclude[DirectMissingMethodProblem]("akka.cluster.Gossip.leaderOf"), + ProblemFilters.exclude[DirectMissingMethodProblem]("akka.cluster.Gossip.roleLeader"), + ProblemFilters.exclude[DirectMissingMethodProblem]("akka.cluster.ClusterCoreDaemon.NumberOfGossipsBeforeShutdownWhenLeaderExits"), + ProblemFilters.exclude[DirectMissingMethodProblem]("akka.cluster.ClusterCoreDaemon.vclockName"), + ProblemFilters.exclude[DirectMissingMethodProblem]("akka.cluster.ClusterCoreDaemon.MaxGossipsBeforeShuttingDownMyself"), + ProblemFilters.exclude[DirectMissingMethodProblem]("akka.cluster.ClusterEvent.diffLeader"), + ProblemFilters.exclude[DirectMissingMethodProblem]("akka.cluster.ClusterEvent.diffRolesLeader"), + ProblemFilters.exclude[DirectMissingMethodProblem]("akka.cluster.ClusterEvent.diffSeen"), + ProblemFilters.exclude[ReversedMissingMethodProblem]("akka.cluster.protobuf.msg.ClusterMessages#GossipOrBuilder.getTombstonesCount"), + ProblemFilters.exclude[ReversedMissingMethodProblem]("akka.cluster.protobuf.msg.ClusterMessages#GossipOrBuilder.getTombstones"), + ProblemFilters.exclude[ReversedMissingMethodProblem]("akka.cluster.protobuf.msg.ClusterMessages#GossipOrBuilder.getTombstonesList"), + ProblemFilters.exclude[ReversedMissingMethodProblem]("akka.cluster.protobuf.msg.ClusterMessages#GossipOrBuilder.getTombstonesOrBuilderList"), + ProblemFilters.exclude[ReversedMissingMethodProblem]("akka.cluster.protobuf.msg.ClusterMessages#GossipOrBuilder.getTombstonesOrBuilder") ) ) From ccea5a0eac2075c1c1f64d28ef69f2faf65ffecf Mon Sep 17 00:00:00 2001 From: Arnout Engelen Date: Mon, 26 Jun 2017 16:03:06 +0200 Subject: [PATCH 04/34] Make cluster singleton DC aware, #23230 --- .../singleton/ClusterSingletonManager.scala | 6 +- .../singleton/ClusterSingletonProxy.scala | 6 +- .../singleton/TeamSingletonManagerSpec.scala | 110 ++++++++++++++++++ 3 files changed, 118 insertions(+), 4 deletions(-) create mode 100644 akka-cluster-tools/src/multi-jvm/scala/akka/cluster/singleton/TeamSingletonManagerSpec.scala diff --git a/akka-cluster-tools/src/main/scala/akka/cluster/singleton/ClusterSingletonManager.scala b/akka-cluster-tools/src/main/scala/akka/cluster/singleton/ClusterSingletonManager.scala index 1e5f81c304..3578d22abe 100644 --- a/akka-cluster-tools/src/main/scala/akka/cluster/singleton/ClusterSingletonManager.scala +++ b/akka-cluster-tools/src/main/scala/akka/cluster/singleton/ClusterSingletonManager.scala @@ -256,10 +256,12 @@ object ClusterSingletonManager { } override def postStop(): Unit = cluster.unsubscribe(self) - def matchingRole(member: Member): Boolean = role match { + private val selfTeam = "team-" + cluster.settings.Team + + def matchingRole(member: Member): Boolean = member.hasRole(selfTeam) && (role match { case None ⇒ true case Some(r) ⇒ member.hasRole(r) - } + }) def trackChange(block: () ⇒ Unit): Unit = { val before = membersByAge.headOption diff --git a/akka-cluster-tools/src/main/scala/akka/cluster/singleton/ClusterSingletonProxy.scala b/akka-cluster-tools/src/main/scala/akka/cluster/singleton/ClusterSingletonProxy.scala index 3ea07b53fe..0cb2c06b56 100644 --- a/akka-cluster-tools/src/main/scala/akka/cluster/singleton/ClusterSingletonProxy.scala +++ b/akka-cluster-tools/src/main/scala/akka/cluster/singleton/ClusterSingletonProxy.scala @@ -162,10 +162,12 @@ final class ClusterSingletonProxy(singletonManagerPath: String, settings: Cluste identifyTimer = None } - def matchingRole(member: Member): Boolean = role match { + private val selfTeam = "team-" + cluster.settings.Team + + def matchingRole(member: Member): Boolean = member.hasRole(selfTeam) && (role match { case None ⇒ true case Some(r) ⇒ member.hasRole(r) - } + }) def handleInitial(state: CurrentClusterState): Unit = { trackChange { diff --git a/akka-cluster-tools/src/multi-jvm/scala/akka/cluster/singleton/TeamSingletonManagerSpec.scala b/akka-cluster-tools/src/multi-jvm/scala/akka/cluster/singleton/TeamSingletonManagerSpec.scala new file mode 100644 index 0000000000..f3b4ee65c3 --- /dev/null +++ b/akka-cluster-tools/src/multi-jvm/scala/akka/cluster/singleton/TeamSingletonManagerSpec.scala @@ -0,0 +1,110 @@ +/** + * Copyright (C) 2017 Lightbend Inc. + */ +package akka.cluster.singleton + +import scala.concurrent.duration._ + +import com.typesafe.config.ConfigFactory + +import akka.actor.{ Actor, ActorLogging, Address, PoisonPill, Props } +import akka.cluster.Cluster + +import akka.testkit.ImplicitSender +import akka.remote.testkit.{ MultiNodeConfig, MultiNodeSpec, STMultiNodeSpec } + +object TeamSingletonManagerSpec extends MultiNodeConfig { + val controller = role("controller") + val first = role("first") + val second = role("second") + val third = role("third") + + commonConfig(ConfigFactory.parseString(""" + akka.loglevel = INFO + akka.actor.provider = "cluster" + akka.actor.serialize-creators = off + akka.remote.log-remote-lifecycle-events = off""")) + + nodeConfig(controller)( + ConfigFactory.parseString("akka.cluster.team = one\n" + + "akka.cluster.roles = [ ]")) + nodeConfig(first) { + ConfigFactory.parseString("akka.cluster.team = one\n" + + "akka.cluster.roles = [ worker ]") + } + nodeConfig(second)( + ConfigFactory.parseString("akka.cluster.team = two\n" + + "akka.cluster.roles = [ worker ]")) + nodeConfig(third)( + ConfigFactory.parseString("akka.cluster.team = two\n" + + "akka.cluster.roles = [ worker ]")) +} + +class TeamSingletonManagerMultiJvmNode1 extends TeamSingletonManagerSpec +class TeamSingletonManagerMultiJvmNode2 extends TeamSingletonManagerSpec +class TeamSingletonManagerMultiJvmNode3 extends TeamSingletonManagerSpec +class TeamSingletonManagerMultiJvmNode4 extends TeamSingletonManagerSpec + +class TeamSingleton extends Actor with ActorLogging { + import TeamSingleton._ + + val cluster = Cluster(context.system) + + override def receive: Receive = { + case Ping ⇒ + sender() ! Pong(cluster.settings.Team, cluster.selfAddress, cluster.selfRoles) + } +} +object TeamSingleton { + case object Ping + case class Pong(fromTeam: String, fromAddress: Address, roles: Set[String]) +} + +abstract class TeamSingletonManagerSpec extends MultiNodeSpec(TeamSingletonManagerSpec) with STMultiNodeSpec with ImplicitSender { + import TeamSingletonManagerSpec._ + + override def initialParticipants = roles.size + + val cluster = Cluster(system) + cluster.join(node(controller).address) + enterBarrier("nodes-joined") + + val worker = "worker" + + "A SingletonManager in a team" must { + "start a singleton instance for each team" in { + + runOn(first, second, third) { + system.actorOf( + ClusterSingletonManager.props( + Props[TeamSingleton](), + PoisonPill, + ClusterSingletonManagerSettings(system).withRole(worker)), + "singletonManager") + } + + val proxy = system.actorOf(ClusterSingletonProxy.props( + "/user/singletonManager", + ClusterSingletonProxySettings(system).withRole(worker))) + + enterBarrier("managers-started") + + proxy ! TeamSingleton.Ping + val pong = expectMsgType[TeamSingleton.Pong](10.seconds) + + enterBarrier("pongs-received") + + pong.fromTeam should equal(Cluster(system).settings.Team) + pong.roles should contain(worker) + runOn(controller, first) { + pong.roles should contain("team-one") + } + runOn(second, third) { + pong.roles should contain("team-two") + } + + enterBarrier("after-1") + } + + } +} From 2044c1712be3acf0dab3608169315b9e07cd4560 Mon Sep 17 00:00:00 2001 From: Patrik Nordwall Date: Mon, 26 Jun 2017 16:28:44 +0200 Subject: [PATCH 05/34] Support cross team in ClusterSingletonProxy, #23230 --- .../singleton/ClusterSingletonManager.scala | 9 ++-- .../singleton/ClusterSingletonProxy.scala | 27 +++++++++--- .../singleton/TeamSingletonManagerSpec.scala | 41 +++++++++++++------ 3 files changed, 54 insertions(+), 23 deletions(-) diff --git a/akka-cluster-tools/src/main/scala/akka/cluster/singleton/ClusterSingletonManager.scala b/akka-cluster-tools/src/main/scala/akka/cluster/singleton/ClusterSingletonManager.scala index 3578d22abe..a0e9f1c868 100644 --- a/akka-cluster-tools/src/main/scala/akka/cluster/singleton/ClusterSingletonManager.scala +++ b/akka-cluster-tools/src/main/scala/akka/cluster/singleton/ClusterSingletonManager.scala @@ -30,6 +30,7 @@ import akka.Done import akka.actor.CoordinatedShutdown import akka.pattern.ask import akka.util.Timeout +import akka.cluster.ClusterSettings object ClusterSingletonManagerSettings { @@ -256,12 +257,10 @@ object ClusterSingletonManager { } override def postStop(): Unit = cluster.unsubscribe(self) - private val selfTeam = "team-" + cluster.settings.Team + private val selfTeam = ClusterSettings.TeamRolePrefix + cluster.settings.Team - def matchingRole(member: Member): Boolean = member.hasRole(selfTeam) && (role match { - case None ⇒ true - case Some(r) ⇒ member.hasRole(r) - }) + def matchingRole(member: Member): Boolean = + member.hasRole(selfTeam) && role.forall(member.hasRole) def trackChange(block: () ⇒ Unit): Unit = { val before = membersByAge.headOption diff --git a/akka-cluster-tools/src/main/scala/akka/cluster/singleton/ClusterSingletonProxy.scala b/akka-cluster-tools/src/main/scala/akka/cluster/singleton/ClusterSingletonProxy.scala index 0cb2c06b56..a31dc53035 100644 --- a/akka-cluster-tools/src/main/scala/akka/cluster/singleton/ClusterSingletonProxy.scala +++ b/akka-cluster-tools/src/main/scala/akka/cluster/singleton/ClusterSingletonProxy.scala @@ -19,6 +19,7 @@ import com.typesafe.config.Config import akka.actor.NoSerializationVerificationNeeded import akka.event.Logging import akka.util.MessageBuffer +import akka.cluster.ClusterSettings object ClusterSingletonProxySettings { @@ -63,6 +64,7 @@ object ClusterSingletonProxySettings { /** * @param singletonName The actor name of the singleton actor that is started by the [[ClusterSingletonManager]]. * @param role The role of the cluster nodes where the singleton can be deployed. If None, then any node will do. + * @param team The team of the cluster nodes where the singleton is running. If None then the same team as current node. * @param singletonIdentificationInterval Interval at which the proxy will try to resolve the singleton instance. * @param bufferSize If the location of the singleton is unknown the proxy will buffer this number of messages * and deliver them when the singleton is identified. When the buffer is full old messages will be dropped @@ -72,9 +74,18 @@ object ClusterSingletonProxySettings { final class ClusterSingletonProxySettings( val singletonName: String, val role: Option[String], + val team: Option[String], val singletonIdentificationInterval: FiniteDuration, val bufferSize: Int) extends NoSerializationVerificationNeeded { + // for backwards compatibility + def this( + singletonName: String, + role: Option[String], + singletonIdentificationInterval: FiniteDuration, + bufferSize: Int) = + this(singletonName, role, None, singletonIdentificationInterval, bufferSize) + require(bufferSize >= 0 && bufferSize <= 10000, "bufferSize must be >= 0 and <= 10000") def withSingletonName(name: String): ClusterSingletonProxySettings = copy(singletonName = name) @@ -83,6 +94,8 @@ final class ClusterSingletonProxySettings( def withRole(role: Option[String]): ClusterSingletonProxySettings = copy(role = role) + def withTeam(team: String): ClusterSingletonProxySettings = copy(team = Some(team)) + def withSingletonIdentificationInterval(singletonIdentificationInterval: FiniteDuration): ClusterSingletonProxySettings = copy(singletonIdentificationInterval = singletonIdentificationInterval) @@ -92,9 +105,10 @@ final class ClusterSingletonProxySettings( private def copy( singletonName: String = singletonName, role: Option[String] = role, + team: Option[String] = team, singletonIdentificationInterval: FiniteDuration = singletonIdentificationInterval, bufferSize: Int = bufferSize): ClusterSingletonProxySettings = - new ClusterSingletonProxySettings(singletonName, role, singletonIdentificationInterval, bufferSize) + new ClusterSingletonProxySettings(singletonName, role, team, singletonIdentificationInterval, bufferSize) } object ClusterSingletonProxy { @@ -162,12 +176,13 @@ final class ClusterSingletonProxy(singletonManagerPath: String, settings: Cluste identifyTimer = None } - private val selfTeam = "team-" + cluster.settings.Team + private val targetTeam = settings.team match { + case Some(t) ⇒ ClusterSettings.TeamRolePrefix + t + case None ⇒ ClusterSettings.TeamRolePrefix + cluster.settings.Team + } - def matchingRole(member: Member): Boolean = member.hasRole(selfTeam) && (role match { - case None ⇒ true - case Some(r) ⇒ member.hasRole(r) - }) + def matchingRole(member: Member): Boolean = + member.hasRole(targetTeam) && role.forall(member.hasRole) def handleInitial(state: CurrentClusterState): Unit = { trackChange { diff --git a/akka-cluster-tools/src/multi-jvm/scala/akka/cluster/singleton/TeamSingletonManagerSpec.scala b/akka-cluster-tools/src/multi-jvm/scala/akka/cluster/singleton/TeamSingletonManagerSpec.scala index f3b4ee65c3..dd63e8ae70 100644 --- a/akka-cluster-tools/src/multi-jvm/scala/akka/cluster/singleton/TeamSingletonManagerSpec.scala +++ b/akka-cluster-tools/src/multi-jvm/scala/akka/cluster/singleton/TeamSingletonManagerSpec.scala @@ -25,19 +25,22 @@ object TeamSingletonManagerSpec extends MultiNodeConfig { akka.actor.serialize-creators = off akka.remote.log-remote-lifecycle-events = off""")) - nodeConfig(controller)( - ConfigFactory.parseString("akka.cluster.team = one\n" + - "akka.cluster.roles = [ ]")) - nodeConfig(first) { - ConfigFactory.parseString("akka.cluster.team = one\n" + - "akka.cluster.roles = [ worker ]") + nodeConfig(controller) { + ConfigFactory.parseString(""" + akka.cluster.team = one + akka.cluster.roles = []""") + } + + nodeConfig(first) { + ConfigFactory.parseString(""" + akka.cluster.team = one + akka.cluster.roles = [ worker ]""") + } + nodeConfig(second, third) { + ConfigFactory.parseString(""" + akka.cluster.team = two + akka.cluster.roles = [ worker ]""") } - nodeConfig(second)( - ConfigFactory.parseString("akka.cluster.team = two\n" + - "akka.cluster.roles = [ worker ]")) - nodeConfig(third)( - ConfigFactory.parseString("akka.cluster.team = two\n" + - "akka.cluster.roles = [ worker ]")) } class TeamSingletonManagerMultiJvmNode1 extends TeamSingletonManagerSpec @@ -106,5 +109,19 @@ abstract class TeamSingletonManagerSpec extends MultiNodeSpec(TeamSingletonManag enterBarrier("after-1") } + "be able to use proxy across different team" in { + runOn(third) { + val proxy = system.actorOf(ClusterSingletonProxy.props( + "/user/singletonManager", + ClusterSingletonProxySettings(system).withRole(worker).withTeam("one"))) + proxy ! TeamSingleton.Ping + val pong = expectMsgType[TeamSingleton.Pong](10.seconds) + pong.fromTeam should ===("one") + pong.roles should contain(worker) + pong.roles should contain("team-one") + } + enterBarrier("after-1") + } + } } From 0251886111d0cb40da6e1a7354c277a60b6313ac Mon Sep 17 00:00:00 2001 From: Johannes Rudolph Date: Tue, 4 Jul 2017 12:55:11 +0200 Subject: [PATCH 06/34] =clu add comments for Reachability methods --- .../scala/akka/cluster/Reachability.scala | 34 ++++++++++++++++--- 1 file changed, 30 insertions(+), 4 deletions(-) diff --git a/akka-cluster/src/main/scala/akka/cluster/Reachability.scala b/akka-cluster/src/main/scala/akka/cluster/Reachability.scala index 9b8c39e336..808fb28e3a 100644 --- a/akka-cluster/src/main/scala/akka/cluster/Reachability.scala +++ b/akka-cluster/src/main/scala/akka/cluster/Reachability.scala @@ -42,6 +42,11 @@ private[cluster] object Reachability { * record, and thereby it is always possible to determine which record is newest when * merging two instances. * + * By default, each observer treats every other node as reachable. That allows to + * introduce the invariant that if an observer sees all nodes as reachable, no + * records should be kept at all. Therefore, in a running cluster with full + * reachability, no records need to be kept at all. + * * Aggregated status of a subject node is defined as (in this order): * - Terminated if any observer node considers it as Terminated * - Unreachable if any observer node considers it as Unreachable @@ -56,6 +61,8 @@ private[cluster] class Reachability private ( import Reachability._ private class Cache { + // `allUnreachable` contains all nodes that have been observed as Unreachable by at least one other node + // `allTerminated` contains all nodes that have been observed as Terminated by at least one other node val (observerRowsMap, allUnreachable, allTerminated) = { if (records.isEmpty) { val observerRowsMap = Map.empty[UniqueAddress, Map[UniqueAddress, Reachability.Record]] @@ -119,15 +126,19 @@ private[cluster] class Reachability private ( val newVersions = versions.updated(observer, v) val newRecord = Record(observer, subject, status, v) observerRows(observer) match { + // don't record Reachable observation if nothing has been noted so far case None if status == Reachable ⇒ this + // otherwise, create new instance including this first observation case None ⇒ new Reachability(records :+ newRecord, newVersions) + // otherwise, update old observations case Some(oldObserverRows) ⇒ oldObserverRows.get(subject) match { case None ⇒ if (status == Reachable && oldObserverRows.forall { case (_, r) ⇒ r.status == Reachable }) { + // FIXME: how should we have gotten into this state? // all Reachable, prune by removing the records of the observer, and bump the version new Reachability(records.filterNot(_.observer == observer), newVersions) } else @@ -159,6 +170,10 @@ private[cluster] class Reachability private ( (this.observerRows(observer), other.observerRows(observer)) match { case (None, None) ⇒ case (Some(rows1), Some(rows2)) ⇒ + // We throw away a complete set of records based on the version here. Couldn't we lose records here? No, + // because the observer gossips always the complete set of records. (That's hard to see in the model, because + // records also contain the version number for which they were introduced but actually the version number + // corresponds to the whole set of records of one observer at one point in time. val rows = if (observerVersion1 > observerVersion2) rows1 else rows2 recordBuilder ++= rows.collect { case (_, r) if allowed(r.subject) ⇒ r } case (Some(rows1), None) ⇒ @@ -209,27 +224,38 @@ private[cluster] class Reachability private ( else Reachable /** - * @return true if there is no observer that has marked node unreachable or terminated + * @return true if the given node is seen as Reachable, i.e. there's no negative (Unreachable, Terminated) observation + * record known for that the node. */ def isReachable(node: UniqueAddress): Boolean = isAllReachable || !allUnreachableOrTerminated.contains(node) /** - * @return true if there is no specific entry saying observer observed subject as unreachable + * @return true if the given observer node can reach the subject node. */ def isReachable(observer: UniqueAddress, subject: UniqueAddress): Boolean = status(observer, subject) == Reachable + /** + * @return true if there's no negative (Unreachable, Terminated) observation record at all for + * any node + */ def isAllReachable: Boolean = records.isEmpty /** - * Doesn't include terminated + * @return all nodes that are Unreachable (i.e. they have been reported as Unreachable by at least one other node). + * This does not include nodes observed to be Terminated. */ def allUnreachable: Set[UniqueAddress] = cache.allUnreachable + /** + * @return all nodes that are Unreachable or Terminated (i.e. they have been reported as Unreachable or Terminated + * by at least one other node). + */ def allUnreachableOrTerminated: Set[UniqueAddress] = cache.allUnreachableOrTerminated /** - * Doesn't include terminated + * @return all nodes that have been observed as Unreachable by the given observer. + * This doesn't include nodes observed as Terminated. */ def allUnreachableFrom(observer: UniqueAddress): Set[UniqueAddress] = observerRows(observer) match { From 8fc21cb530453c3ae29ffa580bd98aefeeb3d4b5 Mon Sep 17 00:00:00 2001 From: Johannes Rudolph Date: Tue, 4 Jul 2017 12:55:30 +0200 Subject: [PATCH 07/34] =clu fix Reachability.equals wrt versions --- akka-cluster/src/main/scala/akka/cluster/Reachability.scala | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/akka-cluster/src/main/scala/akka/cluster/Reachability.scala b/akka-cluster/src/main/scala/akka/cluster/Reachability.scala index 808fb28e3a..3afe0bc8ab 100644 --- a/akka-cluster/src/main/scala/akka/cluster/Reachability.scala +++ b/akka-cluster/src/main/scala/akka/cluster/Reachability.scala @@ -290,7 +290,7 @@ private[cluster] class Reachability private ( // only used for testing override def equals(obj: Any): Boolean = obj match { case other: Reachability ⇒ - records.size == other.records.size && versions == versions && + records.size == other.records.size && versions == other.versions && cache.observerRowsMap == other.cache.observerRowsMap case _ ⇒ false } From 58db22ca1e703c79e14094e90d680cda95d38efa Mon Sep 17 00:00:00 2001 From: Arnout Engelen Date: Tue, 4 Jul 2017 05:52:03 -0700 Subject: [PATCH 08/34] Introduce missing team role if necessary (#23276) * Introduce missing team role if necessary (#23243) When receiving gossip from a node that did not contain any team information (such as gossip from a node running a previous version of Akka), add the default team role during deserialization. * Simpler implementation of adding default role * More efficient `rolesFromProto` Now actually outperforms the previous implementation. Still room for improvement as this probably checks for duplicates in the set on each add, but creating our own array-backed set here is probably going overboard :). * Fixes following rebase --- .../protobuf/ClusterMessageSerializer.scala | 18 ++++++++++++- .../scala/akka/cluster/QuickRestartSpec.scala | 2 +- .../ClusterMessageSerializerSpec.scala | 26 ++++++++++++------- 3 files changed, 35 insertions(+), 11 deletions(-) diff --git a/akka-cluster/src/main/scala/akka/cluster/protobuf/ClusterMessageSerializer.scala b/akka-cluster/src/main/scala/akka/cluster/protobuf/ClusterMessageSerializer.scala index 4896c706e3..25a9e69eef 100644 --- a/akka-cluster/src/main/scala/akka/cluster/protobuf/ClusterMessageSerializer.scala +++ b/akka-cluster/src/main/scala/akka/cluster/protobuf/ClusterMessageSerializer.scala @@ -346,7 +346,23 @@ class ClusterMessageSerializer(val system: ExtendedActorSystem) extends BaseSeri def memberFromProto(member: cm.Member) = new Member(addressMapping(member.getAddressIndex), member.getUpNumber, memberStatusFromInt(member.getStatus.getNumber), - member.getRolesIndexesList.asScala.map(roleMapping(_))(breakOut)) + rolesFromProto(member.getRolesIndexesList.asScala)) + + def rolesFromProto(roleIndexes: Seq[Integer]): Set[String] = { + var containsDc = false + var roles = Set.empty[String] + + for { + roleIndex ← roleIndexes + role = roleMapping(roleIndex) + } { + if (role.startsWith(ClusterSettings.TeamRolePrefix)) containsDc = true + roles += role + } + + if (!containsDc) roles + (ClusterSettings.TeamRolePrefix + "default") + else roles + } def tombstoneFromProto(tombstone: cm.Tombstone): (UniqueAddress, Long) = (addressMapping(tombstone.getAddressIndex), tombstone.getTimestamp) diff --git a/akka-cluster/src/multi-jvm/scala/akka/cluster/QuickRestartSpec.scala b/akka-cluster/src/multi-jvm/scala/akka/cluster/QuickRestartSpec.scala index df14986ac1..9da57760ca 100644 --- a/akka-cluster/src/multi-jvm/scala/akka/cluster/QuickRestartSpec.scala +++ b/akka-cluster/src/multi-jvm/scala/akka/cluster/QuickRestartSpec.scala @@ -91,7 +91,7 @@ abstract class QuickRestartSpec Cluster(system).state.members.size should ===(totalNumberOfNodes) Cluster(system).state.members.map(_.status == MemberStatus.Up) // use the role to test that it is the new incarnation that joined, sneaky - Cluster(system).state.members.flatMap(_.roles) should ===(Set(s"round-$n", "team-default")) + Cluster(system).state.members.flatMap(_.roles) should ===(Set(s"round-$n", ClusterSettings.TeamRolePrefix + "default")) } } enterBarrier("members-up-" + n) diff --git a/akka-cluster/src/test/scala/akka/cluster/protobuf/ClusterMessageSerializerSpec.scala b/akka-cluster/src/test/scala/akka/cluster/protobuf/ClusterMessageSerializerSpec.scala index db327a7fbc..fd4c323309 100644 --- a/akka-cluster/src/test/scala/akka/cluster/protobuf/ClusterMessageSerializerSpec.scala +++ b/akka-cluster/src/test/scala/akka/cluster/protobuf/ClusterMessageSerializerSpec.scala @@ -16,16 +16,18 @@ class ClusterMessageSerializerSpec extends AkkaSpec( val serializer = new ClusterMessageSerializer(system.asInstanceOf[ExtendedActorSystem]) - def checkSerialization(obj: AnyRef): Unit = { + def roundtrip[T <: AnyRef](obj: T): T = { val blob = serializer.toBinary(obj) - val ref = serializer.fromBinary(blob, obj.getClass) - obj match { - case env: GossipEnvelope ⇒ - val env2 = ref.asInstanceOf[GossipEnvelope] + serializer.fromBinary(blob, obj.getClass).asInstanceOf[T] + } + + def checkSerialization(obj: AnyRef): Unit = { + (obj, roundtrip(obj)) match { + case (env: GossipEnvelope, env2: GossipEnvelope) ⇒ env2.from should ===(env.from) env2.to should ===(env.to) env2.gossip should ===(env.gossip) - case _ ⇒ + case (_, ref) ⇒ ref should ===(obj) } @@ -35,10 +37,10 @@ class ClusterMessageSerializerSpec extends AkkaSpec( val a1 = TestMember(Address("akka.tcp", "sys", "a", 2552), Joining, Set.empty) val b1 = TestMember(Address("akka.tcp", "sys", "b", 2552), Up, Set("r1")) - val c1 = TestMember(Address("akka.tcp", "sys", "c", 2552), Leaving, Set("r2")) - val d1 = TestMember(Address("akka.tcp", "sys", "d", 2552), Exiting, Set("r1", "r2")) + val c1 = TestMember(Address("akka.tcp", "sys", "c", 2552), Leaving, Set.empty, "foo") + val d1 = TestMember(Address("akka.tcp", "sys", "d", 2552), Exiting, Set("r1"), "foo") val e1 = TestMember(Address("akka.tcp", "sys", "e", 2552), Down, Set("r3")) - val f1 = TestMember(Address("akka.tcp", "sys", "f", 2552), Removed, Set("r2", "r3")) + val f1 = TestMember(Address("akka.tcp", "sys", "f", 2552), Removed, Set("r3"), "foo") "ClusterMessages" must { @@ -77,6 +79,12 @@ class ClusterMessageSerializerSpec extends AkkaSpec( checkSerialization(InternalClusterAction.Welcome(uniqueAddress, g2)) } + + "add a default team role if none is present" in { + val env = roundtrip(GossipEnvelope(a1.uniqueAddress, d1.uniqueAddress, Gossip(SortedSet(a1, d1)))) + env.gossip.members.head.roles should be(Set(ClusterSettings.TeamRolePrefix + "default")) + env.gossip.members.tail.head.roles should be(Set("r1", ClusterSettings.TeamRolePrefix + "foo")) + } } "Cluster router pool" must { "be serializable" in { From e0fe0bc49ec63c605b9e5a7cb1eb281fc4923673 Mon Sep 17 00:00:00 2001 From: Patrik Nordwall Date: Mon, 26 Jun 2017 15:03:33 +0200 Subject: [PATCH 09/34] Make cluster sharding DC aware, #23231 * Sharding only within own team (coordinator is singleton) * the ddata Replicator used by Sharding must also be only within own team * added support for Set of roles in ddata Replicator so that can be used by sharding to specify role + team * Sharding proxy can route to sharding in another team --- .../cluster/sharding/ClusterSharding.scala | 106 ++++++++- .../akka/cluster/sharding/ShardRegion.scala | 17 +- .../sharding/ClusterShardingSpec.scala | 1 + .../sharding/TeamClusterShardingSpec.scala | 218 ++++++++++++++++++ .../singleton/ClusterSingletonProxy.scala | 4 +- .../singleton/TeamSingletonManagerSpec.scala | 7 +- .../scala/akka/cluster/ClusterSettings.scala | 1 + .../akka/cluster/ClusterConfigSpec.scala | 4 +- .../akka/cluster/ddata/DistributedData.scala | 2 +- .../scala/akka/cluster/ddata/Replicator.scala | 88 +++++-- .../cluster/ddata/DurablePruningSpec.scala | 8 + project/MiMa.scala | 8 + 12 files changed, 417 insertions(+), 47 deletions(-) create mode 100644 akka-cluster-sharding/src/multi-jvm/scala/akka/cluster/sharding/TeamClusterShardingSpec.scala diff --git a/akka-cluster-sharding/src/main/scala/akka/cluster/sharding/ClusterSharding.scala b/akka-cluster-sharding/src/main/scala/akka/cluster/sharding/ClusterSharding.scala index 3998ba0bfe..4779a79e48 100644 --- a/akka-cluster-sharding/src/main/scala/akka/cluster/sharding/ClusterSharding.scala +++ b/akka-cluster-sharding/src/main/scala/akka/cluster/sharding/ClusterSharding.scala @@ -29,6 +29,7 @@ import akka.cluster.ddata.ReplicatorSettings import akka.cluster.ddata.Replicator import scala.util.control.NonFatal import akka.actor.Status +import akka.cluster.ClusterSettings /** * This extension provides sharding functionality of actors in a cluster. @@ -341,16 +342,53 @@ class ClusterSharding(system: ExtendedActorSystem) extends Extension { typeName: String, role: Option[String], extractEntityId: ShardRegion.ExtractEntityId, + extractShardId: ShardRegion.ExtractShardId): ActorRef = + startProxy(typeName, role, team = None, extractEntityId, extractShardId) + + /** + * Scala API: Register a named entity type `ShardRegion` on this node that will run in proxy only mode, + * i.e. it will delegate messages to other `ShardRegion` actors on other nodes, but not host any + * entity actors itself. The [[ShardRegion]] actor for this type can later be retrieved with the + * [[#shardRegion]] method. + * + * Some settings can be configured as described in the `akka.cluster.sharding` section + * of the `reference.conf`. + * + * @param typeName the name of the entity type + * @param role specifies that this entity type is located on cluster nodes with a specific role. + * If the role is not specified all nodes in the cluster are used. + * @param team The team of the cluster nodes where the cluster sharding is running. + * If None then the same team as current node. + * @param extractEntityId partial function to extract the entity id and the message to send to the + * entity from the incoming message, if the partial function does not match the message will + * be `unhandled`, i.e. posted as `Unhandled` messages on the event stream + * @param extractShardId function to determine the shard id for an incoming message, only messages + * that passed the `extractEntityId` will be used + * @return the actor ref of the [[ShardRegion]] that is to be responsible for the shard + */ + def startProxy( + typeName: String, + role: Option[String], + team: Option[String], + extractEntityId: ShardRegion.ExtractEntityId, extractShardId: ShardRegion.ExtractShardId): ActorRef = { implicit val timeout = system.settings.CreationTimeout val settings = ClusterShardingSettings(system).withRole(role) - val startMsg = StartProxy(typeName, settings, extractEntityId, extractShardId) + val startMsg = StartProxy(typeName, team, settings, extractEntityId, extractShardId) val Started(shardRegion) = Await.result(guardian ? startMsg, timeout.duration) - regions.put(typeName, shardRegion) + // it must be possible to start several proxies, one per team + regions.put(proxyName(typeName, team), shardRegion) shardRegion } + private def proxyName(typeName: String, team: Option[String]): String = { + team match { + case None ⇒ typeName + case Some(t) ⇒ typeName + "-" + t + } + } + /** * Java/Scala API: Register a named entity type `ShardRegion` on this node that will run in proxy only mode, * i.e. it will delegate messages to other `ShardRegion` actors on other nodes, but not host any @@ -370,9 +408,34 @@ class ClusterSharding(system: ExtendedActorSystem) extends Extension { def startProxy( typeName: String, role: Optional[String], + messageExtractor: ShardRegion.MessageExtractor): ActorRef = + startProxy(typeName, role, team = Optional.empty(), messageExtractor) + + /** + * Java/Scala API: Register a named entity type `ShardRegion` on this node that will run in proxy only mode, + * i.e. it will delegate messages to other `ShardRegion` actors on other nodes, but not host any + * entity actors itself. The [[ShardRegion]] actor for this type can later be retrieved with the + * [[#shardRegion]] method. + * + * Some settings can be configured as described in the `akka.cluster.sharding` section + * of the `reference.conf`. + * + * @param typeName the name of the entity type + * @param role specifies that this entity type is located on cluster nodes with a specific role. + * If the role is not specified all nodes in the cluster are used. + * @param team The team of the cluster nodes where the cluster sharding is running. + * If None then the same team as current node. + * @param messageExtractor functions to extract the entity id, shard id, and the message to send to the + * entity from the incoming message + * @return the actor ref of the [[ShardRegion]] that is to be responsible for the shard + */ + def startProxy( + typeName: String, + role: Optional[String], + team: Optional[String], messageExtractor: ShardRegion.MessageExtractor): ActorRef = { - startProxy(typeName, Option(role.orElse(null)), + startProxy(typeName, Option(role.orElse(null)), Option(team.orElse(null)), extractEntityId = { case msg if messageExtractor.entityId(msg) ne null ⇒ (messageExtractor.entityId(msg), messageExtractor.entityMessage(msg)) @@ -383,14 +446,28 @@ class ClusterSharding(system: ExtendedActorSystem) extends Extension { /** * Retrieve the actor reference of the [[ShardRegion]] actor responsible for the named entity type. - * The entity type must be registered with the [[#start]] method before it can be used here. - * Messages to the entity is always sent via the `ShardRegion`. + * The entity type must be registered with the [[#start]] or [[#startProxy]] method before it + * can be used here. Messages to the entity is always sent via the `ShardRegion`. */ def shardRegion(typeName: String): ActorRef = regions.get(typeName) match { case null ⇒ throw new IllegalArgumentException(s"Shard type [$typeName] must be started first") case ref ⇒ ref } + /** + * Retrieve the actor reference of the [[ShardRegion]] actor that will act as a proxy to the + * named entity type running in another team. A proxy within the same team can be accessed + * with [[#shardRegion]] instead of this method. The entity type must be registered with the + * [[#startProxy]] method before it can be used here. Messages to the entity is always sent + * via the `ShardRegion`. + */ + def shardRegionProxy(typeName: String, team: String): ActorRef = { + regions.get(proxyName(typeName, Some(team))) match { + case null ⇒ throw new IllegalArgumentException(s"Shard type [$typeName] must be started first") + case ref ⇒ ref + } + } + } /** @@ -402,7 +479,7 @@ private[akka] object ClusterShardingGuardian { extractEntityId: ShardRegion.ExtractEntityId, extractShardId: ShardRegion.ExtractShardId, allocationStrategy: ShardAllocationStrategy, handOffStopMessage: Any) extends NoSerializationVerificationNeeded - final case class StartProxy(typeName: String, settings: ClusterShardingSettings, + final case class StartProxy(typeName: String, team: Option[String], settings: ClusterShardingSettings, extractEntityId: ShardRegion.ExtractEntityId, extractShardId: ShardRegion.ExtractShardId) extends NoSerializationVerificationNeeded final case class Started(shardRegion: ActorRef) extends NoSerializationVerificationNeeded @@ -441,7 +518,9 @@ private[akka] class ClusterShardingGuardian extends Actor { case Some(r) ⇒ URLEncoder.encode(r, ByteString.UTF_8) + "Replicator" case None ⇒ "replicator" } - val ref = context.actorOf(Replicator.props(replicatorSettings.withRole(settings.role)), name) + // Use members within the team and with the given role (if any) + val replicatorRoles = Set(ClusterSettings.TeamRolePrefix + cluster.settings.Team) ++ settings.role + val ref = context.actorOf(Replicator.props(replicatorSettings.withRoles(replicatorRoles)), name) replicatorByRole = replicatorByRole.updated(settings.role, ref) ref } @@ -505,22 +584,29 @@ private[akka] class ClusterShardingGuardian extends Actor { sender() ! Status.Failure(e) } - case StartProxy(typeName, settings, extractEntityId, extractShardId) ⇒ + case StartProxy(typeName, team, settings, extractEntityId, extractShardId) ⇒ try { + val encName = URLEncoder.encode(typeName, ByteString.UTF_8) val cName = coordinatorSingletonManagerName(encName) val cPath = coordinatorPath(encName) - val shardRegion = context.child(encName).getOrElse { + // it must be possible to start several proxies, one per team + val actorName = team match { + case None ⇒ encName + case Some(t) ⇒ URLEncoder.encode(typeName + "-" + t, ByteString.UTF_8) + } + val shardRegion = context.child(actorName).getOrElse { context.actorOf( ShardRegion.proxyProps( typeName = typeName, + team = team, settings = settings, coordinatorPath = cPath, extractEntityId = extractEntityId, extractShardId = extractShardId, replicator = context.system.deadLetters, majorityMinCap).withDispatcher(context.props.dispatcher), - name = encName) + name = actorName) } sender() ! Started(shardRegion) } catch { diff --git a/akka-cluster-sharding/src/main/scala/akka/cluster/sharding/ShardRegion.scala b/akka-cluster-sharding/src/main/scala/akka/cluster/sharding/ShardRegion.scala index 3afe6746f7..2d9b8eb0e4 100644 --- a/akka-cluster-sharding/src/main/scala/akka/cluster/sharding/ShardRegion.scala +++ b/akka-cluster-sharding/src/main/scala/akka/cluster/sharding/ShardRegion.scala @@ -20,6 +20,7 @@ import scala.concurrent.Future import scala.reflect.ClassTag import scala.concurrent.Promise import akka.Done +import akka.cluster.ClusterSettings /** * @see [[ClusterSharding$ ClusterSharding extension]] @@ -40,7 +41,7 @@ object ShardRegion { handOffStopMessage: Any, replicator: ActorRef, majorityMinCap: Int): Props = - Props(new ShardRegion(typeName, Some(entityProps), settings, coordinatorPath, extractEntityId, + Props(new ShardRegion(typeName, Some(entityProps), team = None, settings, coordinatorPath, extractEntityId, extractShardId, handOffStopMessage, replicator, majorityMinCap)).withDeploy(Deploy.local) /** @@ -50,13 +51,14 @@ object ShardRegion { */ private[akka] def proxyProps( typeName: String, + team: Option[String], settings: ClusterShardingSettings, coordinatorPath: String, extractEntityId: ShardRegion.ExtractEntityId, extractShardId: ShardRegion.ExtractShardId, replicator: ActorRef, majorityMinCap: Int): Props = - Props(new ShardRegion(typeName, None, settings, coordinatorPath, extractEntityId, extractShardId, + Props(new ShardRegion(typeName, None, team, settings, coordinatorPath, extractEntityId, extractShardId, PoisonPill, replicator, majorityMinCap)).withDeploy(Deploy.local) /** @@ -365,6 +367,7 @@ object ShardRegion { private[akka] class ShardRegion( typeName: String, entityProps: Option[Props], + team: Option[String], settings: ClusterShardingSettings, coordinatorPath: String, extractEntityId: ShardRegion.ExtractEntityId, @@ -419,11 +422,15 @@ private[akka] class ShardRegion( retryTask.cancel() } - def matchingRole(member: Member): Boolean = role match { - case None ⇒ true - case Some(r) ⇒ member.hasRole(r) + // when using proxy the team can be different that the own team + private val targetTeamRole = team match { + case Some(t) ⇒ ClusterSettings.TeamRolePrefix + t + case None ⇒ ClusterSettings.TeamRolePrefix + cluster.settings.Team } + def matchingRole(member: Member): Boolean = + member.hasRole(targetTeamRole) && role.forall(member.hasRole) + def coordinatorSelection: Option[ActorSelection] = membersByAge.headOption.map(m ⇒ context.actorSelection(RootActorPath(m.address) + coordinatorPath)) diff --git a/akka-cluster-sharding/src/multi-jvm/scala/akka/cluster/sharding/ClusterShardingSpec.scala b/akka-cluster-sharding/src/multi-jvm/scala/akka/cluster/sharding/ClusterShardingSpec.scala index 84d550e2f8..77ae4d0c2f 100644 --- a/akka-cluster-sharding/src/multi-jvm/scala/akka/cluster/sharding/ClusterShardingSpec.scala +++ b/akka-cluster-sharding/src/multi-jvm/scala/akka/cluster/sharding/ClusterShardingSpec.scala @@ -460,6 +460,7 @@ abstract class ClusterShardingSpec(config: ClusterShardingSpecConfig) extends Mu val proxy = system.actorOf( ShardRegion.proxyProps( typeName = "counter", + team = None, settings, coordinatorPath = "/user/counterCoordinator/singleton/coordinator", extractEntityId = extractEntityId, diff --git a/akka-cluster-sharding/src/multi-jvm/scala/akka/cluster/sharding/TeamClusterShardingSpec.scala b/akka-cluster-sharding/src/multi-jvm/scala/akka/cluster/sharding/TeamClusterShardingSpec.scala new file mode 100644 index 0000000000..9710faccd4 --- /dev/null +++ b/akka-cluster-sharding/src/multi-jvm/scala/akka/cluster/sharding/TeamClusterShardingSpec.scala @@ -0,0 +1,218 @@ +/** + * Copyright (C) 2017 Lightbend Inc. + */ +package akka.cluster.sharding + +import scala.concurrent.duration._ + +import akka.actor.Actor +import akka.actor.ActorRef +import akka.actor.Address +import akka.actor.Props +import akka.cluster.Cluster +import akka.cluster.ClusterEvent._ +import akka.cluster.MemberStatus +import akka.cluster.sharding.ShardRegion.CurrentRegions +import akka.cluster.sharding.ShardRegion.GetCurrentRegions +import akka.remote.testconductor.RoleName +import akka.remote.testkit.MultiNodeConfig +import akka.remote.testkit.MultiNodeSpec +import akka.remote.testkit.STMultiNodeSpec +import akka.testkit._ +import com.typesafe.config.ConfigFactory + +object TeamClusterShardingSpec { + sealed trait EntityMsg { + def id: String + } + final case class Ping(id: String) extends EntityMsg + final case class GetCount(id: String) extends EntityMsg + + class Entity extends Actor { + var count = 0 + def receive = { + case Ping(_) ⇒ + count += 1 + sender() ! self + case GetCount(_) ⇒ + sender() ! count + } + } + + val extractEntityId: ShardRegion.ExtractEntityId = { + case m: EntityMsg ⇒ (m.id, m) + } + + val extractShardId: ShardRegion.ExtractShardId = { + case m: EntityMsg ⇒ m.id.charAt(0).toString + } +} + +object TeamClusterShardingSpecConfig extends MultiNodeConfig { + val first = role("first") + val second = role("second") + val third = role("third") + val fourth = role("fourth") + + commonConfig(ConfigFactory.parseString(s""" + akka.loglevel = INFO + akka.actor.provider = "cluster" + akka.remote.log-remote-lifecycle-events = off + akka.cluster.auto-down-unreachable-after = 0s + """)) + + nodeConfig(first, second) { + ConfigFactory.parseString("akka.cluster.team = DC1") + } + + nodeConfig(third, fourth) { + ConfigFactory.parseString("akka.cluster.team = DC2") + } +} + +class TeamClusterShardingMultiJvmNode1 extends TeamClusterShardingSpec +class TeamClusterShardingMultiJvmNode2 extends TeamClusterShardingSpec +class TeamClusterShardingMultiJvmNode3 extends TeamClusterShardingSpec +class TeamClusterShardingMultiJvmNode4 extends TeamClusterShardingSpec + +abstract class TeamClusterShardingSpec extends MultiNodeSpec(TeamClusterShardingSpecConfig) + with STMultiNodeSpec with ImplicitSender { + import TeamClusterShardingSpec._ + import TeamClusterShardingSpecConfig._ + + override def initialParticipants = roles.size + + val cluster = Cluster(system) + + def join(from: RoleName, to: RoleName): Unit = { + runOn(from) { + cluster join node(to).address + startSharding() + within(15.seconds) { + awaitAssert(cluster.state.members.exists { m ⇒ + m.uniqueAddress == cluster.selfUniqueAddress && m.status == MemberStatus.Up + } should be(true)) + } + } + enterBarrier(from.name + "-joined") + } + + def startSharding(): Unit = { + ClusterSharding(system).start( + typeName = "Entity", + entityProps = Props[Entity], + settings = ClusterShardingSettings(system), + extractEntityId = extractEntityId, + extractShardId = extractShardId) + } + + lazy val region = ClusterSharding(system).shardRegion("Entity") + + private def fillAddress(a: Address): Address = + if (a.hasLocalScope) Cluster(system).selfAddress else a + + private def assertCurrentRegions(expected: Set[Address]): Unit = { + awaitAssert({ + val p = TestProbe() + region.tell(GetCurrentRegions, p.ref) + p.expectMsg(CurrentRegions(expected)) + }, 10.seconds) + } + + s"Cluster sharding with teams" must { + "join cluster" in within(20.seconds) { + join(first, first) + join(second, first) + join(third, first) + join(fourth, first) + + awaitAssert({ + Cluster(system).state.members.size should ===(4) + Cluster(system).state.members.map(_.status) should ===(Set(MemberStatus.Up)) + }, 10.seconds) + + runOn(first, second) { + assertCurrentRegions(Set(first, second).map(r ⇒ node(r).address)) + } + runOn(third, fourth) { + assertCurrentRegions(Set(third, fourth).map(r ⇒ node(r).address)) + } + + enterBarrier("after-1") + } + + "initialize shards" in { + runOn(first) { + val locations = (for (n ← 1 to 10) yield { + val id = n.toString + region ! Ping(id) + id → expectMsgType[ActorRef] + }).toMap + val firstAddress = node(first).address + val secondAddress = node(second).address + val hosts = locations.values.map(ref ⇒ fillAddress(ref.path.address)).toSet + hosts should ===(Set(firstAddress, secondAddress)) + } + runOn(third) { + val locations = (for (n ← 1 to 10) yield { + val id = n.toString + region ! Ping(id) + val ref1 = expectMsgType[ActorRef] + region ! Ping(id) + val ref2 = expectMsgType[ActorRef] + ref1 should ===(ref2) + id → ref1 + }).toMap + val thirdAddress = node(third).address + val fourthAddress = node(fourth).address + val hosts = locations.values.map(ref ⇒ fillAddress(ref.path.address)).toSet + hosts should ===(Set(thirdAddress, fourthAddress)) + } + enterBarrier("after-2") + } + + "not mix entities in different teams" in { + runOn(second) { + region ! GetCount("5") + expectMsg(1) + } + runOn(fourth) { + region ! GetCount("5") + expectMsg(2) + } + enterBarrier("after-3") + } + + "allow proxy within same team" in { + runOn(second) { + val proxy = ClusterSharding(system).startProxy( + typeName = "Entity", + role = None, + team = None, // by default use own team + extractEntityId = extractEntityId, + extractShardId = extractShardId) + + proxy ! GetCount("5") + expectMsg(1) + } + enterBarrier("after-4") + } + + "allow proxy across different teams" in { + runOn(second) { + val proxy = ClusterSharding(system).startProxy( + typeName = "Entity", + role = None, + team = Some("DC2"), // proxy to other DC + extractEntityId = extractEntityId, + extractShardId = extractShardId) + + proxy ! GetCount("5") + expectMsg(2) + } + enterBarrier("after-5") + } + + } +} + diff --git a/akka-cluster-tools/src/main/scala/akka/cluster/singleton/ClusterSingletonProxy.scala b/akka-cluster-tools/src/main/scala/akka/cluster/singleton/ClusterSingletonProxy.scala index a31dc53035..3e51c493f5 100644 --- a/akka-cluster-tools/src/main/scala/akka/cluster/singleton/ClusterSingletonProxy.scala +++ b/akka-cluster-tools/src/main/scala/akka/cluster/singleton/ClusterSingletonProxy.scala @@ -176,13 +176,13 @@ final class ClusterSingletonProxy(singletonManagerPath: String, settings: Cluste identifyTimer = None } - private val targetTeam = settings.team match { + private val targetTeamRole = settings.team match { case Some(t) ⇒ ClusterSettings.TeamRolePrefix + t case None ⇒ ClusterSettings.TeamRolePrefix + cluster.settings.Team } def matchingRole(member: Member): Boolean = - member.hasRole(targetTeam) && role.forall(member.hasRole) + member.hasRole(targetTeamRole) && role.forall(member.hasRole) def handleInitial(state: CurrentClusterState): Unit = { trackChange { diff --git a/akka-cluster-tools/src/multi-jvm/scala/akka/cluster/singleton/TeamSingletonManagerSpec.scala b/akka-cluster-tools/src/multi-jvm/scala/akka/cluster/singleton/TeamSingletonManagerSpec.scala index dd63e8ae70..4cecea2f3c 100644 --- a/akka-cluster-tools/src/multi-jvm/scala/akka/cluster/singleton/TeamSingletonManagerSpec.scala +++ b/akka-cluster-tools/src/multi-jvm/scala/akka/cluster/singleton/TeamSingletonManagerSpec.scala @@ -12,6 +12,7 @@ import akka.cluster.Cluster import akka.testkit.ImplicitSender import akka.remote.testkit.{ MultiNodeConfig, MultiNodeSpec, STMultiNodeSpec } +import akka.cluster.ClusterSettings object TeamSingletonManagerSpec extends MultiNodeConfig { val controller = role("controller") @@ -100,10 +101,10 @@ abstract class TeamSingletonManagerSpec extends MultiNodeSpec(TeamSingletonManag pong.fromTeam should equal(Cluster(system).settings.Team) pong.roles should contain(worker) runOn(controller, first) { - pong.roles should contain("team-one") + pong.roles should contain(ClusterSettings.TeamRolePrefix + "one") } runOn(second, third) { - pong.roles should contain("team-two") + pong.roles should contain(ClusterSettings.TeamRolePrefix + "two") } enterBarrier("after-1") @@ -118,7 +119,7 @@ abstract class TeamSingletonManagerSpec extends MultiNodeSpec(TeamSingletonManag val pong = expectMsgType[TeamSingleton.Pong](10.seconds) pong.fromTeam should ===("one") pong.roles should contain(worker) - pong.roles should contain("team-one") + pong.roles should contain(ClusterSettings.TeamRolePrefix + "one") } enterBarrier("after-1") } diff --git a/akka-cluster/src/main/scala/akka/cluster/ClusterSettings.scala b/akka-cluster/src/main/scala/akka/cluster/ClusterSettings.scala index f6c8fca61d..ddc7b7717d 100644 --- a/akka-cluster/src/main/scala/akka/cluster/ClusterSettings.scala +++ b/akka-cluster/src/main/scala/akka/cluster/ClusterSettings.scala @@ -34,6 +34,7 @@ object ClusterSettings { } final class ClusterSettings(val config: Config, val systemName: String) { + import ClusterSettings._ import ClusterSettings._ private val cc = config.getConfig("akka.cluster") diff --git a/akka-cluster/src/test/scala/akka/cluster/ClusterConfigSpec.scala b/akka-cluster/src/test/scala/akka/cluster/ClusterConfigSpec.scala index 171e67d42c..3792ec722f 100644 --- a/akka-cluster/src/test/scala/akka/cluster/ClusterConfigSpec.scala +++ b/akka-cluster/src/test/scala/akka/cluster/ClusterConfigSpec.scala @@ -46,7 +46,7 @@ class ClusterConfigSpec extends AkkaSpec { MinNrOfMembers should ===(1) MinNrOfMembersOfRole should ===(Map.empty[String, Int]) Team should ===("default") - Roles should ===(Set("team-default")) + Roles should ===(Set(ClusterSettings.TeamRolePrefix + "default")) JmxEnabled should ===(true) UseDispatcher should ===(Dispatchers.DefaultDispatcherId) GossipDifferentViewProbability should ===(0.8 +- 0.0001) @@ -66,7 +66,7 @@ class ClusterConfigSpec extends AkkaSpec { |} """.stripMargin).withFallback(ConfigFactory.load()), system.name) import settings._ - Roles should ===(Set("hamlet", "team-blue")) + Roles should ===(Set("hamlet", ClusterSettings.TeamRolePrefix + "blue")) Team should ===("blue") } } diff --git a/akka-distributed-data/src/main/scala/akka/cluster/ddata/DistributedData.scala b/akka-distributed-data/src/main/scala/akka/cluster/ddata/DistributedData.scala index c941f4a97b..9ef8823242 100644 --- a/akka-distributed-data/src/main/scala/akka/cluster/ddata/DistributedData.scala +++ b/akka-distributed-data/src/main/scala/akka/cluster/ddata/DistributedData.scala @@ -34,7 +34,7 @@ class DistributedData(system: ExtendedActorSystem) extends Extension { * Returns true if this member is not tagged with the role configured for the * replicas. */ - def isTerminated: Boolean = Cluster(system).isTerminated || !settings.role.forall(Cluster(system).selfRoles.contains) + def isTerminated: Boolean = Cluster(system).isTerminated || !settings.roles.subsetOf(Cluster(system).selfRoles) /** * `ActorRef` of the [[Replicator]] . diff --git a/akka-distributed-data/src/main/scala/akka/cluster/ddata/Replicator.scala b/akka-distributed-data/src/main/scala/akka/cluster/ddata/Replicator.scala index 795a1fce2b..151ef0a5ed 100644 --- a/akka-distributed-data/src/main/scala/akka/cluster/ddata/Replicator.scala +++ b/akka-distributed-data/src/main/scala/akka/cluster/ddata/Replicator.scala @@ -48,6 +48,9 @@ import akka.actor.Cancellable import scala.util.control.NonFatal import akka.cluster.ddata.Key.KeyId import akka.annotation.InternalApi +import scala.collection.immutable.TreeSet +import akka.cluster.MemberStatus +import scala.annotation.varargs object ReplicatorSettings { @@ -98,8 +101,8 @@ object ReplicatorSettings { } /** - * @param role Replicas are running on members tagged with this role. - * All members are used if undefined. + * @param roles Replicas are running on members tagged with these roles. + * The member must have all given roles. All members are used if empty. * @param gossipInterval How often the Replicator should send out gossip information. * @param notifySubscribersInterval How often the subscribers will be notified * of changes, if any. @@ -124,7 +127,7 @@ object ReplicatorSettings { * in the `Set`. */ final class ReplicatorSettings( - val role: Option[String], + val roles: Set[String], val gossipInterval: FiniteDuration, val notifySubscribersInterval: FiniteDuration, val maxDeltaElements: Int, @@ -138,10 +141,29 @@ final class ReplicatorSettings( val deltaCrdtEnabled: Boolean, val maxDeltaSize: Int) { + // for backwards compatibility + def this( + role: Option[String], + gossipInterval: FiniteDuration, + notifySubscribersInterval: FiniteDuration, + maxDeltaElements: Int, + dispatcher: String, + pruningInterval: FiniteDuration, + maxPruningDissemination: FiniteDuration, + durableStoreProps: Either[(String, Config), Props], + durableKeys: Set[KeyId], + pruningMarkerTimeToLive: FiniteDuration, + durablePruningMarkerTimeToLive: FiniteDuration, + deltaCrdtEnabled: Boolean, + maxDeltaSize: Int) = + this(role.toSet, gossipInterval, notifySubscribersInterval, maxDeltaElements, dispatcher, pruningInterval, + maxPruningDissemination, durableStoreProps, durableKeys, pruningMarkerTimeToLive, durablePruningMarkerTimeToLive, + deltaCrdtEnabled, maxDeltaSize) + // For backwards compatibility def this(role: Option[String], gossipInterval: FiniteDuration, notifySubscribersInterval: FiniteDuration, maxDeltaElements: Int, dispatcher: String, pruningInterval: FiniteDuration, maxPruningDissemination: FiniteDuration) = - this(role, gossipInterval, notifySubscribersInterval, maxDeltaElements, dispatcher, pruningInterval, + this(roles = role.toSet, gossipInterval, notifySubscribersInterval, maxDeltaElements, dispatcher, pruningInterval, maxPruningDissemination, Right(Props.empty), Set.empty, 6.hours, 10.days, true, 200) // For backwards compatibility @@ -161,9 +183,20 @@ final class ReplicatorSettings( maxPruningDissemination, durableStoreProps, durableKeys, pruningMarkerTimeToLive, durablePruningMarkerTimeToLive, deltaCrdtEnabled, 200) - def withRole(role: String): ReplicatorSettings = copy(role = ReplicatorSettings.roleOption(role)) + def withRole(role: String): ReplicatorSettings = copy(roles = ReplicatorSettings.roleOption(role).toSet) - def withRole(role: Option[String]): ReplicatorSettings = copy(role = role) + def withRole(role: Option[String]): ReplicatorSettings = copy(roles = role.toSet) + + @varargs + def withRoles(roles: String*): ReplicatorSettings = copy(roles = roles.toSet) + + /** + * INTERNAL API + */ + @InternalApi private[akka] def withRoles(roles: Set[String]): ReplicatorSettings = copy(roles = roles) + + // for backwards compatibility + def role: Option[String] = roles.headOption def withGossipInterval(gossipInterval: FiniteDuration): ReplicatorSettings = copy(gossipInterval = gossipInterval) @@ -216,7 +249,7 @@ final class ReplicatorSettings( copy(maxDeltaSize = maxDeltaSize) private def copy( - role: Option[String] = role, + roles: Set[String] = roles, gossipInterval: FiniteDuration = gossipInterval, notifySubscribersInterval: FiniteDuration = notifySubscribersInterval, maxDeltaElements: Int = maxDeltaElements, @@ -229,7 +262,7 @@ final class ReplicatorSettings( durablePruningMarkerTimeToLive: FiniteDuration = durablePruningMarkerTimeToLive, deltaCrdtEnabled: Boolean = deltaCrdtEnabled, maxDeltaSize: Int = maxDeltaSize): ReplicatorSettings = - new ReplicatorSettings(role, gossipInterval, notifySubscribersInterval, maxDeltaElements, dispatcher, + new ReplicatorSettings(roles, gossipInterval, notifySubscribersInterval, maxDeltaElements, dispatcher, pruningInterval, maxPruningDissemination, durableStoreProps, durableKeys, pruningMarkerTimeToLive, durablePruningMarkerTimeToLive, deltaCrdtEnabled, maxDeltaSize) } @@ -988,8 +1021,8 @@ final class Replicator(settings: ReplicatorSettings) extends Actor with ActorLog require(!cluster.isTerminated, "Cluster node must not be terminated") require( - role.forall(cluster.selfRoles.contains), - s"This cluster member [${selfAddress}] doesn't have the role [$role]") + roles.subsetOf(cluster.selfRoles), + s"This cluster member [${selfAddress}] doesn't have all the roles [${roles.mkString(", ")}]") //Start periodic gossip to random nodes in cluster import context.dispatcher @@ -1057,8 +1090,10 @@ final class Replicator(settings: ReplicatorSettings) extends Actor with ActorLog var weaklyUpNodes: Set[Address] = Set.empty var removedNodes: Map[UniqueAddress, Long] = Map.empty - var leader: Option[Address] = None - def isLeader: Boolean = leader.exists(_ == selfAddress) + // all nodes sorted with the leader first + var leader: TreeSet[Member] = TreeSet.empty(Member.leaderStatusOrdering) + def isLeader: Boolean = + leader.nonEmpty && leader.head.address == selfAddress && leader.head.status == MemberStatus.Up // for pruning timeouts are based on clock that is only increased when all nodes are reachable var previousClockTime = System.nanoTime() @@ -1099,9 +1134,9 @@ final class Replicator(settings: ReplicatorSettings) extends Actor with ActorLog override def preStart(): Unit = { if (hasDurableKeys) durableStore ! LoadAll - val leaderChangedClass = if (role.isDefined) classOf[RoleLeaderChanged] else classOf[LeaderChanged] + // not using LeaderChanged/RoleLeaderChanged because here we need one node independent of team cluster.subscribe(self, initialStateMode = InitialStateAsEvents, - classOf[MemberEvent], classOf[ReachabilityEvent], leaderChangedClass) + classOf[MemberEvent], classOf[ReachabilityEvent]) } override def postStop(): Unit = { @@ -1113,7 +1148,7 @@ final class Replicator(settings: ReplicatorSettings) extends Actor with ActorLog clockTask.cancel() } - def matchingRole(m: Member): Boolean = role.forall(m.hasRole) + def matchingRole(m: Member): Boolean = roles.subsetOf(m.roles) override val supervisorStrategy = { def fromDurableStore: Boolean = sender() == durableStore && sender() != context.system.deadLetters @@ -1204,11 +1239,9 @@ final class Replicator(settings: ReplicatorSettings) extends Actor with ActorLog case MemberWeaklyUp(m) ⇒ receiveWeaklyUpMemberUp(m) case MemberUp(m) ⇒ receiveMemberUp(m) case MemberRemoved(m, _) ⇒ receiveMemberRemoved(m) - case _: MemberEvent ⇒ // not of interest + case evt: MemberEvent ⇒ receiveOtherMemberEvent(evt.member) case UnreachableMember(m) ⇒ receiveUnreachable(m) case ReachableMember(m) ⇒ receiveReachable(m) - case LeaderChanged(leader) ⇒ receiveLeaderChanged(leader, None) - case RoleLeaderChanged(role, leader) ⇒ receiveLeaderChanged(leader, Some(role)) case GetKeyIds ⇒ receiveGetKeyIds() case Delete(key, consistency, req) ⇒ receiveDelete(key, consistency, req) case RemovedNodePruningTick ⇒ receiveRemovedNodePruningTick() @@ -1695,15 +1728,19 @@ final class Replicator(settings: ReplicatorSettings) extends Actor with ActorLog weaklyUpNodes += m.address def receiveMemberUp(m: Member): Unit = - if (matchingRole(m) && m.address != selfAddress) { - nodes += m.address - weaklyUpNodes -= m.address + if (matchingRole(m)) { + leader += m + if (m.address != selfAddress) { + nodes += m.address + weaklyUpNodes -= m.address + } } def receiveMemberRemoved(m: Member): Unit = { if (m.address == selfAddress) context stop self else if (matchingRole(m)) { + leader -= m nodes -= m.address weaklyUpNodes -= m.address log.debug("adding removed node [{}] from MemberRemoved", m.uniqueAddress) @@ -1713,15 +1750,18 @@ final class Replicator(settings: ReplicatorSettings) extends Actor with ActorLog } } + def receiveOtherMemberEvent(m: Member): Unit = + if (matchingRole(m)) { + // update changed status + leader = (leader - m) + m + } + def receiveUnreachable(m: Member): Unit = if (matchingRole(m)) unreachable += m.address def receiveReachable(m: Member): Unit = if (matchingRole(m)) unreachable -= m.address - def receiveLeaderChanged(leaderOption: Option[Address], roleOption: Option[String]): Unit = - if (roleOption == role) leader = leaderOption - def receiveClockTick(): Unit = { val now = System.nanoTime() if (unreachable.isEmpty) diff --git a/akka-distributed-data/src/multi-jvm/scala/akka/cluster/ddata/DurablePruningSpec.scala b/akka-distributed-data/src/multi-jvm/scala/akka/cluster/ddata/DurablePruningSpec.scala index 553f0d727f..daa222b28a 100644 --- a/akka-distributed-data/src/multi-jvm/scala/akka/cluster/ddata/DurablePruningSpec.scala +++ b/akka-distributed-data/src/multi-jvm/scala/akka/cluster/ddata/DurablePruningSpec.scala @@ -16,6 +16,7 @@ import com.typesafe.config.ConfigFactory import akka.actor.ActorSystem import akka.actor.ActorRef import scala.concurrent.Await +import akka.cluster.MemberStatus object DurablePruningSpec extends MultiNodeConfig { val first = role("first") @@ -73,6 +74,13 @@ class DurablePruningSpec extends MultiNodeSpec(DurablePruningSpec) with STMultiN val replicator2 = startReplicator(sys2) val probe2 = TestProbe()(sys2) Cluster(sys2).join(node(first).address) + awaitAssert({ + Cluster(system).state.members.size should ===(4) + Cluster(system).state.members.map(_.status) should ===(Set(MemberStatus.Up)) + Cluster(sys2).state.members.size should ===(4) + Cluster(sys2).state.members.map(_.status) should ===(Set(MemberStatus.Up)) + }, 10.seconds) + enterBarrier("joined") within(5.seconds) { awaitAssert { diff --git a/project/MiMa.scala b/project/MiMa.scala index 8eb9783025..5c55c43c01 100644 --- a/project/MiMa.scala +++ b/project/MiMa.scala @@ -1221,6 +1221,14 @@ object MiMa extends AutoPlugin { // #22881 Make sure connections are aborted correctly on Windows ProblemFilters.exclude[ReversedMissingMethodProblem]("akka.io.ChannelRegistration.cancel"), + // #23231 multi-DC Sharding + ProblemFilters.exclude[IncompatibleResultTypeProblem]("akka.cluster.ddata.Replicator.leader"), + ProblemFilters.exclude[DirectMissingMethodProblem]("akka.cluster.ddata.Replicator.receiveLeaderChanged"), + ProblemFilters.exclude[IncompatibleMethTypeProblem]("akka.cluster.ddata.Replicator.leader_="), + FilterAnyProblemStartingWith("akka.cluster.sharding.ClusterShardingGuardian"), + ProblemFilters.exclude[DirectMissingMethodProblem]("akka.cluster.sharding.ShardRegion.proxyProps"), + ProblemFilters.exclude[DirectMissingMethodProblem]("akka.cluster.sharding.ShardRegion.this"), + // #23144 recoverWithRetries cleanup ProblemFilters.exclude[DirectMissingMethodProblem]("akka.stream.impl.fusing.RecoverWith.InfiniteRetries"), From bb9549263ee98fb5c5d3b8fcbd8ab01a674e43c9 Mon Sep 17 00:00:00 2001 From: Patrik Nordwall Date: Tue, 4 Jul 2017 17:11:21 +0200 Subject: [PATCH 10/34] Rename team to data center, #23275 --- .../cluster/sharding/ClusterSharding.scala | 49 +++---- .../akka/cluster/sharding/ShardRegion.scala | 19 +-- .../sharding/ClusterShardingSpec.scala | 2 +- ...scala => MultiDcClusterShardingSpec.scala} | 34 ++--- .../singleton/ClusterSingletonManager.scala | 4 +- .../singleton/ClusterSingletonProxy.scala | 27 ++-- ...cala => MultiDcSingletonManagerSpec.scala} | 58 ++++---- .../src/main/resources/reference.conf | 13 +- .../src/main/scala/akka/cluster/Cluster.scala | 16 +-- .../scala/akka/cluster/ClusterDaemon.scala | 40 +++--- .../scala/akka/cluster/ClusterEvent.scala | 57 ++++---- .../scala/akka/cluster/ClusterReadView.scala | 4 +- .../scala/akka/cluster/ClusterSettings.scala | 15 +- .../src/main/scala/akka/cluster/Gossip.scala | 93 ++++++------- .../src/main/scala/akka/cluster/Member.scala | 18 +-- .../protobuf/ClusterMessageSerializer.scala | 4 +- .../scala/akka/cluster/MBeanSpec.scala | 8 +- ...terSpec.scala => MultiDcClusterSpec.scala} | 50 +++---- ...Spec.scala => MultiDcSplitBrainSpec.scala} | 62 ++++----- .../scala/akka/cluster/QuickRestartSpec.scala | 2 +- .../akka/cluster/ClusterConfigSpec.scala | 10 +- .../ClusterDomainEventPublisherSpec.scala | 2 +- .../akka/cluster/ClusterDomainEventSpec.scala | 28 ++-- .../test/scala/akka/cluster/GossipSpec.scala | 131 +++++++++--------- .../test/scala/akka/cluster/TestMember.scala | 4 +- .../ClusterMessageSerializerSpec.scala | 13 +- .../scala/akka/cluster/ddata/Replicator.scala | 2 +- project/MiMa.scala | 2 +- 28 files changed, 382 insertions(+), 385 deletions(-) rename akka-cluster-sharding/src/multi-jvm/scala/akka/cluster/sharding/{TeamClusterShardingSpec.scala => MultiDcClusterShardingSpec.scala} (83%) rename akka-cluster-tools/src/multi-jvm/scala/akka/cluster/singleton/{TeamSingletonManagerSpec.scala => MultiDcSingletonManagerSpec.scala} (57%) rename akka-cluster/src/multi-jvm/scala/akka/cluster/{MultiTeamClusterSpec.scala => MultiDcClusterSpec.scala} (70%) rename akka-cluster/src/multi-jvm/scala/akka/cluster/{MultiTeamSplitBrainSpec.scala => MultiDcSplitBrainSpec.scala} (59%) diff --git a/akka-cluster-sharding/src/main/scala/akka/cluster/sharding/ClusterSharding.scala b/akka-cluster-sharding/src/main/scala/akka/cluster/sharding/ClusterSharding.scala index 4779a79e48..5f9678d799 100644 --- a/akka-cluster-sharding/src/main/scala/akka/cluster/sharding/ClusterSharding.scala +++ b/akka-cluster-sharding/src/main/scala/akka/cluster/sharding/ClusterSharding.scala @@ -30,6 +30,7 @@ import akka.cluster.ddata.Replicator import scala.util.control.NonFatal import akka.actor.Status import akka.cluster.ClusterSettings +import akka.cluster.ClusterSettings.DataCenter /** * This extension provides sharding functionality of actors in a cluster. @@ -343,7 +344,7 @@ class ClusterSharding(system: ExtendedActorSystem) extends Extension { role: Option[String], extractEntityId: ShardRegion.ExtractEntityId, extractShardId: ShardRegion.ExtractShardId): ActorRef = - startProxy(typeName, role, team = None, extractEntityId, extractShardId) + startProxy(typeName, role, dataCenter = None, extractEntityId, extractShardId) /** * Scala API: Register a named entity type `ShardRegion` on this node that will run in proxy only mode, @@ -357,8 +358,8 @@ class ClusterSharding(system: ExtendedActorSystem) extends Extension { * @param typeName the name of the entity type * @param role specifies that this entity type is located on cluster nodes with a specific role. * If the role is not specified all nodes in the cluster are used. - * @param team The team of the cluster nodes where the cluster sharding is running. - * If None then the same team as current node. + * @param dataCenter The data center of the cluster nodes where the cluster sharding is running. + * If None then the same data center as current node. * @param extractEntityId partial function to extract the entity id and the message to send to the * entity from the incoming message, if the partial function does not match the message will * be `unhandled`, i.e. posted as `Unhandled` messages on the event stream @@ -369,21 +370,21 @@ class ClusterSharding(system: ExtendedActorSystem) extends Extension { def startProxy( typeName: String, role: Option[String], - team: Option[String], + dataCenter: Option[DataCenter], extractEntityId: ShardRegion.ExtractEntityId, extractShardId: ShardRegion.ExtractShardId): ActorRef = { implicit val timeout = system.settings.CreationTimeout val settings = ClusterShardingSettings(system).withRole(role) - val startMsg = StartProxy(typeName, team, settings, extractEntityId, extractShardId) + val startMsg = StartProxy(typeName, dataCenter, settings, extractEntityId, extractShardId) val Started(shardRegion) = Await.result(guardian ? startMsg, timeout.duration) - // it must be possible to start several proxies, one per team - regions.put(proxyName(typeName, team), shardRegion) + // it must be possible to start several proxies, one per data center + regions.put(proxyName(typeName, dataCenter), shardRegion) shardRegion } - private def proxyName(typeName: String, team: Option[String]): String = { - team match { + private def proxyName(typeName: String, dataCenter: Option[DataCenter]): String = { + dataCenter match { case None ⇒ typeName case Some(t) ⇒ typeName + "-" + t } @@ -409,7 +410,7 @@ class ClusterSharding(system: ExtendedActorSystem) extends Extension { typeName: String, role: Optional[String], messageExtractor: ShardRegion.MessageExtractor): ActorRef = - startProxy(typeName, role, team = Optional.empty(), messageExtractor) + startProxy(typeName, role, dataCenter = Optional.empty(), messageExtractor) /** * Java/Scala API: Register a named entity type `ShardRegion` on this node that will run in proxy only mode, @@ -423,8 +424,8 @@ class ClusterSharding(system: ExtendedActorSystem) extends Extension { * @param typeName the name of the entity type * @param role specifies that this entity type is located on cluster nodes with a specific role. * If the role is not specified all nodes in the cluster are used. - * @param team The team of the cluster nodes where the cluster sharding is running. - * If None then the same team as current node. + * @param dataCenter The data center of the cluster nodes where the cluster sharding is running. + * If None then the same data center as current node. * @param messageExtractor functions to extract the entity id, shard id, and the message to send to the * entity from the incoming message * @return the actor ref of the [[ShardRegion]] that is to be responsible for the shard @@ -432,10 +433,10 @@ class ClusterSharding(system: ExtendedActorSystem) extends Extension { def startProxy( typeName: String, role: Optional[String], - team: Optional[String], + dataCenter: Optional[String], messageExtractor: ShardRegion.MessageExtractor): ActorRef = { - startProxy(typeName, Option(role.orElse(null)), Option(team.orElse(null)), + startProxy(typeName, Option(role.orElse(null)), Option(dataCenter.orElse(null)), extractEntityId = { case msg if messageExtractor.entityId(msg) ne null ⇒ (messageExtractor.entityId(msg), messageExtractor.entityMessage(msg)) @@ -456,13 +457,13 @@ class ClusterSharding(system: ExtendedActorSystem) extends Extension { /** * Retrieve the actor reference of the [[ShardRegion]] actor that will act as a proxy to the - * named entity type running in another team. A proxy within the same team can be accessed + * named entity type running in another data center. A proxy within the same data center can be accessed * with [[#shardRegion]] instead of this method. The entity type must be registered with the * [[#startProxy]] method before it can be used here. Messages to the entity is always sent * via the `ShardRegion`. */ - def shardRegionProxy(typeName: String, team: String): ActorRef = { - regions.get(proxyName(typeName, Some(team))) match { + def shardRegionProxy(typeName: String, dataCenter: DataCenter): ActorRef = { + regions.get(proxyName(typeName, Some(dataCenter))) match { case null ⇒ throw new IllegalArgumentException(s"Shard type [$typeName] must be started first") case ref ⇒ ref } @@ -479,7 +480,7 @@ private[akka] object ClusterShardingGuardian { extractEntityId: ShardRegion.ExtractEntityId, extractShardId: ShardRegion.ExtractShardId, allocationStrategy: ShardAllocationStrategy, handOffStopMessage: Any) extends NoSerializationVerificationNeeded - final case class StartProxy(typeName: String, team: Option[String], settings: ClusterShardingSettings, + final case class StartProxy(typeName: String, dataCenter: Option[DataCenter], settings: ClusterShardingSettings, extractEntityId: ShardRegion.ExtractEntityId, extractShardId: ShardRegion.ExtractShardId) extends NoSerializationVerificationNeeded final case class Started(shardRegion: ActorRef) extends NoSerializationVerificationNeeded @@ -518,8 +519,8 @@ private[akka] class ClusterShardingGuardian extends Actor { case Some(r) ⇒ URLEncoder.encode(r, ByteString.UTF_8) + "Replicator" case None ⇒ "replicator" } - // Use members within the team and with the given role (if any) - val replicatorRoles = Set(ClusterSettings.TeamRolePrefix + cluster.settings.Team) ++ settings.role + // Use members within the data center and with the given role (if any) + val replicatorRoles = Set(ClusterSettings.DcRolePrefix + cluster.settings.DataCenter) ++ settings.role val ref = context.actorOf(Replicator.props(replicatorSettings.withRoles(replicatorRoles)), name) replicatorByRole = replicatorByRole.updated(settings.role, ref) ref @@ -584,14 +585,14 @@ private[akka] class ClusterShardingGuardian extends Actor { sender() ! Status.Failure(e) } - case StartProxy(typeName, team, settings, extractEntityId, extractShardId) ⇒ + case StartProxy(typeName, dataCenter, settings, extractEntityId, extractShardId) ⇒ try { val encName = URLEncoder.encode(typeName, ByteString.UTF_8) val cName = coordinatorSingletonManagerName(encName) val cPath = coordinatorPath(encName) - // it must be possible to start several proxies, one per team - val actorName = team match { + // it must be possible to start several proxies, one per data center + val actorName = dataCenter match { case None ⇒ encName case Some(t) ⇒ URLEncoder.encode(typeName + "-" + t, ByteString.UTF_8) } @@ -599,7 +600,7 @@ private[akka] class ClusterShardingGuardian extends Actor { context.actorOf( ShardRegion.proxyProps( typeName = typeName, - team = team, + dataCenter = dataCenter, settings = settings, coordinatorPath = cPath, extractEntityId = extractEntityId, diff --git a/akka-cluster-sharding/src/main/scala/akka/cluster/sharding/ShardRegion.scala b/akka-cluster-sharding/src/main/scala/akka/cluster/sharding/ShardRegion.scala index 2d9b8eb0e4..8de68d4cd4 100644 --- a/akka-cluster-sharding/src/main/scala/akka/cluster/sharding/ShardRegion.scala +++ b/akka-cluster-sharding/src/main/scala/akka/cluster/sharding/ShardRegion.scala @@ -21,6 +21,7 @@ import scala.reflect.ClassTag import scala.concurrent.Promise import akka.Done import akka.cluster.ClusterSettings +import akka.cluster.ClusterSettings.DataCenter /** * @see [[ClusterSharding$ ClusterSharding extension]] @@ -41,7 +42,7 @@ object ShardRegion { handOffStopMessage: Any, replicator: ActorRef, majorityMinCap: Int): Props = - Props(new ShardRegion(typeName, Some(entityProps), team = None, settings, coordinatorPath, extractEntityId, + Props(new ShardRegion(typeName, Some(entityProps), dataCenter = None, settings, coordinatorPath, extractEntityId, extractShardId, handOffStopMessage, replicator, majorityMinCap)).withDeploy(Deploy.local) /** @@ -51,14 +52,14 @@ object ShardRegion { */ private[akka] def proxyProps( typeName: String, - team: Option[String], + dataCenter: Option[DataCenter], settings: ClusterShardingSettings, coordinatorPath: String, extractEntityId: ShardRegion.ExtractEntityId, extractShardId: ShardRegion.ExtractShardId, replicator: ActorRef, majorityMinCap: Int): Props = - Props(new ShardRegion(typeName, None, team, settings, coordinatorPath, extractEntityId, extractShardId, + Props(new ShardRegion(typeName, None, dataCenter, settings, coordinatorPath, extractEntityId, extractShardId, PoisonPill, replicator, majorityMinCap)).withDeploy(Deploy.local) /** @@ -367,7 +368,7 @@ object ShardRegion { private[akka] class ShardRegion( typeName: String, entityProps: Option[Props], - team: Option[String], + dataCenter: Option[DataCenter], settings: ClusterShardingSettings, coordinatorPath: String, extractEntityId: ShardRegion.ExtractEntityId, @@ -422,14 +423,14 @@ private[akka] class ShardRegion( retryTask.cancel() } - // when using proxy the team can be different that the own team - private val targetTeamRole = team match { - case Some(t) ⇒ ClusterSettings.TeamRolePrefix + t - case None ⇒ ClusterSettings.TeamRolePrefix + cluster.settings.Team + // when using proxy the data center can be different from the own data center + private val targetDcRole = dataCenter match { + case Some(t) ⇒ ClusterSettings.DcRolePrefix + t + case None ⇒ ClusterSettings.DcRolePrefix + cluster.settings.DataCenter } def matchingRole(member: Member): Boolean = - member.hasRole(targetTeamRole) && role.forall(member.hasRole) + member.hasRole(targetDcRole) && role.forall(member.hasRole) def coordinatorSelection: Option[ActorSelection] = membersByAge.headOption.map(m ⇒ context.actorSelection(RootActorPath(m.address) + coordinatorPath)) diff --git a/akka-cluster-sharding/src/multi-jvm/scala/akka/cluster/sharding/ClusterShardingSpec.scala b/akka-cluster-sharding/src/multi-jvm/scala/akka/cluster/sharding/ClusterShardingSpec.scala index 77ae4d0c2f..be7b312764 100644 --- a/akka-cluster-sharding/src/multi-jvm/scala/akka/cluster/sharding/ClusterShardingSpec.scala +++ b/akka-cluster-sharding/src/multi-jvm/scala/akka/cluster/sharding/ClusterShardingSpec.scala @@ -460,7 +460,7 @@ abstract class ClusterShardingSpec(config: ClusterShardingSpecConfig) extends Mu val proxy = system.actorOf( ShardRegion.proxyProps( typeName = "counter", - team = None, + dataCenter = None, settings, coordinatorPath = "/user/counterCoordinator/singleton/coordinator", extractEntityId = extractEntityId, diff --git a/akka-cluster-sharding/src/multi-jvm/scala/akka/cluster/sharding/TeamClusterShardingSpec.scala b/akka-cluster-sharding/src/multi-jvm/scala/akka/cluster/sharding/MultiDcClusterShardingSpec.scala similarity index 83% rename from akka-cluster-sharding/src/multi-jvm/scala/akka/cluster/sharding/TeamClusterShardingSpec.scala rename to akka-cluster-sharding/src/multi-jvm/scala/akka/cluster/sharding/MultiDcClusterShardingSpec.scala index 9710faccd4..54df6e3ebf 100644 --- a/akka-cluster-sharding/src/multi-jvm/scala/akka/cluster/sharding/TeamClusterShardingSpec.scala +++ b/akka-cluster-sharding/src/multi-jvm/scala/akka/cluster/sharding/MultiDcClusterShardingSpec.scala @@ -21,7 +21,7 @@ import akka.remote.testkit.STMultiNodeSpec import akka.testkit._ import com.typesafe.config.ConfigFactory -object TeamClusterShardingSpec { +object MultiDcClusterShardingSpec { sealed trait EntityMsg { def id: String } @@ -48,7 +48,7 @@ object TeamClusterShardingSpec { } } -object TeamClusterShardingSpecConfig extends MultiNodeConfig { +object MultiDcClusterShardingSpecConfig extends MultiNodeConfig { val first = role("first") val second = role("second") val third = role("third") @@ -62,23 +62,23 @@ object TeamClusterShardingSpecConfig extends MultiNodeConfig { """)) nodeConfig(first, second) { - ConfigFactory.parseString("akka.cluster.team = DC1") + ConfigFactory.parseString("akka.cluster.data-center = DC1") } nodeConfig(third, fourth) { - ConfigFactory.parseString("akka.cluster.team = DC2") + ConfigFactory.parseString("akka.cluster.data-center = DC2") } } -class TeamClusterShardingMultiJvmNode1 extends TeamClusterShardingSpec -class TeamClusterShardingMultiJvmNode2 extends TeamClusterShardingSpec -class TeamClusterShardingMultiJvmNode3 extends TeamClusterShardingSpec -class TeamClusterShardingMultiJvmNode4 extends TeamClusterShardingSpec +class MultiDcClusterShardingMultiJvmNode1 extends MultiDcClusterShardingSpec +class MultiDcClusterShardingMultiJvmNode2 extends MultiDcClusterShardingSpec +class MultiDcClusterShardingMultiJvmNode3 extends MultiDcClusterShardingSpec +class MultiDcClusterShardingMultiJvmNode4 extends MultiDcClusterShardingSpec -abstract class TeamClusterShardingSpec extends MultiNodeSpec(TeamClusterShardingSpecConfig) +abstract class MultiDcClusterShardingSpec extends MultiNodeSpec(MultiDcClusterShardingSpecConfig) with STMultiNodeSpec with ImplicitSender { - import TeamClusterShardingSpec._ - import TeamClusterShardingSpecConfig._ + import MultiDcClusterShardingSpec._ + import MultiDcClusterShardingSpecConfig._ override def initialParticipants = roles.size @@ -119,7 +119,7 @@ abstract class TeamClusterShardingSpec extends MultiNodeSpec(TeamClusterSharding }, 10.seconds) } - s"Cluster sharding with teams" must { + s"Cluster sharding in multi data center cluster" must { "join cluster" in within(20.seconds) { join(first, first) join(second, first) @@ -171,7 +171,7 @@ abstract class TeamClusterShardingSpec extends MultiNodeSpec(TeamClusterSharding enterBarrier("after-2") } - "not mix entities in different teams" in { + "not mix entities in different data centers" in { runOn(second) { region ! GetCount("5") expectMsg(1) @@ -183,12 +183,12 @@ abstract class TeamClusterShardingSpec extends MultiNodeSpec(TeamClusterSharding enterBarrier("after-3") } - "allow proxy within same team" in { + "allow proxy within same data center" in { runOn(second) { val proxy = ClusterSharding(system).startProxy( typeName = "Entity", role = None, - team = None, // by default use own team + dataCenter = None, // by default use own DC extractEntityId = extractEntityId, extractShardId = extractShardId) @@ -198,12 +198,12 @@ abstract class TeamClusterShardingSpec extends MultiNodeSpec(TeamClusterSharding enterBarrier("after-4") } - "allow proxy across different teams" in { + "allow proxy across different data centers" in { runOn(second) { val proxy = ClusterSharding(system).startProxy( typeName = "Entity", role = None, - team = Some("DC2"), // proxy to other DC + dataCenter = Some("DC2"), // proxy to other DC extractEntityId = extractEntityId, extractShardId = extractShardId) diff --git a/akka-cluster-tools/src/main/scala/akka/cluster/singleton/ClusterSingletonManager.scala b/akka-cluster-tools/src/main/scala/akka/cluster/singleton/ClusterSingletonManager.scala index a0e9f1c868..535a929636 100644 --- a/akka-cluster-tools/src/main/scala/akka/cluster/singleton/ClusterSingletonManager.scala +++ b/akka-cluster-tools/src/main/scala/akka/cluster/singleton/ClusterSingletonManager.scala @@ -257,10 +257,10 @@ object ClusterSingletonManager { } override def postStop(): Unit = cluster.unsubscribe(self) - private val selfTeam = ClusterSettings.TeamRolePrefix + cluster.settings.Team + private val selfDc = ClusterSettings.DcRolePrefix + cluster.settings.DataCenter def matchingRole(member: Member): Boolean = - member.hasRole(selfTeam) && role.forall(member.hasRole) + member.hasRole(selfDc) && role.forall(member.hasRole) def trackChange(block: () ⇒ Unit): Unit = { val before = membersByAge.headOption diff --git a/akka-cluster-tools/src/main/scala/akka/cluster/singleton/ClusterSingletonProxy.scala b/akka-cluster-tools/src/main/scala/akka/cluster/singleton/ClusterSingletonProxy.scala index 3e51c493f5..0c017d31dd 100644 --- a/akka-cluster-tools/src/main/scala/akka/cluster/singleton/ClusterSingletonProxy.scala +++ b/akka-cluster-tools/src/main/scala/akka/cluster/singleton/ClusterSingletonProxy.scala @@ -20,6 +20,7 @@ import akka.actor.NoSerializationVerificationNeeded import akka.event.Logging import akka.util.MessageBuffer import akka.cluster.ClusterSettings +import akka.cluster.ClusterSettings.DataCenter object ClusterSingletonProxySettings { @@ -64,7 +65,7 @@ object ClusterSingletonProxySettings { /** * @param singletonName The actor name of the singleton actor that is started by the [[ClusterSingletonManager]]. * @param role The role of the cluster nodes where the singleton can be deployed. If None, then any node will do. - * @param team The team of the cluster nodes where the singleton is running. If None then the same team as current node. + * @param dataCenter The data center of the cluster nodes where the singleton is running. If None then the same data center as current node. * @param singletonIdentificationInterval Interval at which the proxy will try to resolve the singleton instance. * @param bufferSize If the location of the singleton is unknown the proxy will buffer this number of messages * and deliver them when the singleton is identified. When the buffer is full old messages will be dropped @@ -74,7 +75,7 @@ object ClusterSingletonProxySettings { final class ClusterSingletonProxySettings( val singletonName: String, val role: Option[String], - val team: Option[String], + val dataCenter: Option[DataCenter], val singletonIdentificationInterval: FiniteDuration, val bufferSize: Int) extends NoSerializationVerificationNeeded { @@ -94,7 +95,7 @@ final class ClusterSingletonProxySettings( def withRole(role: Option[String]): ClusterSingletonProxySettings = copy(role = role) - def withTeam(team: String): ClusterSingletonProxySettings = copy(team = Some(team)) + def withDataCenter(dataCenter: DataCenter): ClusterSingletonProxySettings = copy(dataCenter = Some(dataCenter)) def withSingletonIdentificationInterval(singletonIdentificationInterval: FiniteDuration): ClusterSingletonProxySettings = copy(singletonIdentificationInterval = singletonIdentificationInterval) @@ -103,12 +104,12 @@ final class ClusterSingletonProxySettings( copy(bufferSize = bufferSize) private def copy( - singletonName: String = singletonName, - role: Option[String] = role, - team: Option[String] = team, - singletonIdentificationInterval: FiniteDuration = singletonIdentificationInterval, - bufferSize: Int = bufferSize): ClusterSingletonProxySettings = - new ClusterSingletonProxySettings(singletonName, role, team, singletonIdentificationInterval, bufferSize) + singletonName: String = singletonName, + role: Option[String] = role, + dataCenter: Option[DataCenter] = dataCenter, + singletonIdentificationInterval: FiniteDuration = singletonIdentificationInterval, + bufferSize: Int = bufferSize): ClusterSingletonProxySettings = + new ClusterSingletonProxySettings(singletonName, role, dataCenter, singletonIdentificationInterval, bufferSize) } object ClusterSingletonProxy { @@ -176,13 +177,13 @@ final class ClusterSingletonProxy(singletonManagerPath: String, settings: Cluste identifyTimer = None } - private val targetTeamRole = settings.team match { - case Some(t) ⇒ ClusterSettings.TeamRolePrefix + t - case None ⇒ ClusterSettings.TeamRolePrefix + cluster.settings.Team + private val targetDcRole = settings.dataCenter match { + case Some(t) ⇒ ClusterSettings.DcRolePrefix + t + case None ⇒ ClusterSettings.DcRolePrefix + cluster.settings.DataCenter } def matchingRole(member: Member): Boolean = - member.hasRole(targetTeamRole) && role.forall(member.hasRole) + member.hasRole(targetDcRole) && role.forall(member.hasRole) def handleInitial(state: CurrentClusterState): Unit = { trackChange { diff --git a/akka-cluster-tools/src/multi-jvm/scala/akka/cluster/singleton/TeamSingletonManagerSpec.scala b/akka-cluster-tools/src/multi-jvm/scala/akka/cluster/singleton/MultiDcSingletonManagerSpec.scala similarity index 57% rename from akka-cluster-tools/src/multi-jvm/scala/akka/cluster/singleton/TeamSingletonManagerSpec.scala rename to akka-cluster-tools/src/multi-jvm/scala/akka/cluster/singleton/MultiDcSingletonManagerSpec.scala index 4cecea2f3c..0a37bc1749 100644 --- a/akka-cluster-tools/src/multi-jvm/scala/akka/cluster/singleton/TeamSingletonManagerSpec.scala +++ b/akka-cluster-tools/src/multi-jvm/scala/akka/cluster/singleton/MultiDcSingletonManagerSpec.scala @@ -14,7 +14,7 @@ import akka.testkit.ImplicitSender import akka.remote.testkit.{ MultiNodeConfig, MultiNodeSpec, STMultiNodeSpec } import akka.cluster.ClusterSettings -object TeamSingletonManagerSpec extends MultiNodeConfig { +object MultiDcSingletonManagerSpec extends MultiNodeConfig { val controller = role("controller") val first = role("first") val second = role("second") @@ -28,44 +28,44 @@ object TeamSingletonManagerSpec extends MultiNodeConfig { nodeConfig(controller) { ConfigFactory.parseString(""" - akka.cluster.team = one + akka.cluster.data-center = one akka.cluster.roles = []""") } nodeConfig(first) { ConfigFactory.parseString(""" - akka.cluster.team = one + akka.cluster.data-center = one akka.cluster.roles = [ worker ]""") } nodeConfig(second, third) { ConfigFactory.parseString(""" - akka.cluster.team = two + akka.cluster.data-center = two akka.cluster.roles = [ worker ]""") } } -class TeamSingletonManagerMultiJvmNode1 extends TeamSingletonManagerSpec -class TeamSingletonManagerMultiJvmNode2 extends TeamSingletonManagerSpec -class TeamSingletonManagerMultiJvmNode3 extends TeamSingletonManagerSpec -class TeamSingletonManagerMultiJvmNode4 extends TeamSingletonManagerSpec +class MultiDcSingletonManagerMultiJvmNode1 extends MultiDcSingletonManagerSpec +class MultiDcSingletonManagerMultiJvmNode2 extends MultiDcSingletonManagerSpec +class MultiDcSingletonManagerMultiJvmNode3 extends MultiDcSingletonManagerSpec +class MultiDcSingletonManagerMultiJvmNode4 extends MultiDcSingletonManagerSpec -class TeamSingleton extends Actor with ActorLogging { - import TeamSingleton._ +class MultiDcSingleton extends Actor with ActorLogging { + import MultiDcSingleton._ val cluster = Cluster(context.system) override def receive: Receive = { case Ping ⇒ - sender() ! Pong(cluster.settings.Team, cluster.selfAddress, cluster.selfRoles) + sender() ! Pong(cluster.settings.DataCenter, cluster.selfAddress, cluster.selfRoles) } } -object TeamSingleton { +object MultiDcSingleton { case object Ping - case class Pong(fromTeam: String, fromAddress: Address, roles: Set[String]) + case class Pong(fromDc: String, fromAddress: Address, roles: Set[String]) } -abstract class TeamSingletonManagerSpec extends MultiNodeSpec(TeamSingletonManagerSpec) with STMultiNodeSpec with ImplicitSender { - import TeamSingletonManagerSpec._ +abstract class MultiDcSingletonManagerSpec extends MultiNodeSpec(MultiDcSingletonManagerSpec) with STMultiNodeSpec with ImplicitSender { + import MultiDcSingletonManagerSpec._ override def initialParticipants = roles.size @@ -75,13 +75,13 @@ abstract class TeamSingletonManagerSpec extends MultiNodeSpec(TeamSingletonManag val worker = "worker" - "A SingletonManager in a team" must { - "start a singleton instance for each team" in { + "A SingletonManager in a multi data center cluster" must { + "start a singleton instance for each data center" in { runOn(first, second, third) { system.actorOf( ClusterSingletonManager.props( - Props[TeamSingleton](), + Props[MultiDcSingleton](), PoisonPill, ClusterSingletonManagerSettings(system).withRole(worker)), "singletonManager") @@ -93,33 +93,33 @@ abstract class TeamSingletonManagerSpec extends MultiNodeSpec(TeamSingletonManag enterBarrier("managers-started") - proxy ! TeamSingleton.Ping - val pong = expectMsgType[TeamSingleton.Pong](10.seconds) + proxy ! MultiDcSingleton.Ping + val pong = expectMsgType[MultiDcSingleton.Pong](10.seconds) enterBarrier("pongs-received") - pong.fromTeam should equal(Cluster(system).settings.Team) + pong.fromDc should equal(Cluster(system).settings.DataCenter) pong.roles should contain(worker) runOn(controller, first) { - pong.roles should contain(ClusterSettings.TeamRolePrefix + "one") + pong.roles should contain(ClusterSettings.DcRolePrefix + "one") } runOn(second, third) { - pong.roles should contain(ClusterSettings.TeamRolePrefix + "two") + pong.roles should contain(ClusterSettings.DcRolePrefix + "two") } enterBarrier("after-1") } - "be able to use proxy across different team" in { + "be able to use proxy across different data centers" in { runOn(third) { val proxy = system.actorOf(ClusterSingletonProxy.props( "/user/singletonManager", - ClusterSingletonProxySettings(system).withRole(worker).withTeam("one"))) - proxy ! TeamSingleton.Ping - val pong = expectMsgType[TeamSingleton.Pong](10.seconds) - pong.fromTeam should ===("one") + ClusterSingletonProxySettings(system).withRole(worker).withDataCenter("one"))) + proxy ! MultiDcSingleton.Ping + val pong = expectMsgType[MultiDcSingleton.Pong](10.seconds) + pong.fromDc should ===("one") pong.roles should contain(worker) - pong.roles should contain(ClusterSettings.TeamRolePrefix + "one") + pong.roles should contain(ClusterSettings.DcRolePrefix + "one") } enterBarrier("after-1") } diff --git a/akka-cluster/src/main/resources/reference.conf b/akka-cluster/src/main/resources/reference.conf index ad9e3414bd..2bb2bf6766 100644 --- a/akka-cluster/src/main/resources/reference.conf +++ b/akka-cluster/src/main/resources/reference.conf @@ -65,17 +65,18 @@ akka { # move 'WeaklyUp' members to 'Up' status once convergence has been reached. allow-weakly-up-members = on - # Teams are used to make islands of the cluster that are colocated. This can be used - # to make the cluster aware that it is running across multiple availability zones or regions. - # The team is added to the list of roles of the node with the prefix "team-". - team = "default" + # Defines which data center this node belongs to. It is typically used to make islands of the + # cluster that are colocated. This can be used to make the cluster aware that it is running + # across multiple availability zones or regions. It can also be used for other logical + # grouping of nodes. + data-center = "default" # The roles of this member. List of strings, e.g. roles = ["A", "B"]. # The roles are part of the membership information and can be used by # routers or other services to distribute work to certain member types, # e.g. front-end and back-end nodes. - # Roles are not allowed to start with "team-" as that is reserved for the - # special role assigned from the team a node belongs to (see above) + # Roles are not allowed to start with "dc-" as that is reserved for the + # special role assigned from the data-center a node belongs to (see above) roles = [] # Run the coordinated shutdown from phase 'cluster-shutdown' when the cluster diff --git a/akka-cluster/src/main/scala/akka/cluster/Cluster.scala b/akka-cluster/src/main/scala/akka/cluster/Cluster.scala index 558f1420f8..3576cc6f0f 100644 --- a/akka-cluster/src/main/scala/akka/cluster/Cluster.scala +++ b/akka-cluster/src/main/scala/akka/cluster/Cluster.scala @@ -421,31 +421,31 @@ class Cluster(val system: ExtendedActorSystem) extends Extension { def logInfo(message: String): Unit = if (LogInfo) - if (settings.Team == ClusterSettings.DefaultTeam) + if (settings.DataCenter == ClusterSettings.DefaultDataCenter) log.info("Cluster Node [{}] - {}", selfAddress, message) else - log.info("Cluster Node [{}] team [{}] - {}", selfAddress, settings.Team, message) + log.info("Cluster Node [{}] dc [{}] - {}", selfAddress, settings.DataCenter, message) def logInfo(template: String, arg1: Any): Unit = if (LogInfo) - if (settings.Team == ClusterSettings.DefaultTeam) + if (settings.DataCenter == ClusterSettings.DefaultDataCenter) log.info("Cluster Node [{}] - " + template, selfAddress, arg1) else - log.info("Cluster Node [{}] team [{}] - " + template, selfAddress, settings.Team, arg1) + log.info("Cluster Node [{}] dc [{}] - " + template, selfAddress, settings.DataCenter, arg1) def logInfo(template: String, arg1: Any, arg2: Any): Unit = if (LogInfo) - if (settings.Team == ClusterSettings.DefaultTeam) + if (settings.DataCenter == ClusterSettings.DefaultDataCenter) log.info("Cluster Node [{}] - " + template, selfAddress, arg1, arg2) else - log.info("Cluster Node [{}] team [{}] - " + template, selfAddress, settings.Team, arg1, arg2) + log.info("Cluster Node [{}] dc [{}] - " + template, selfAddress, settings.DataCenter, arg1, arg2) def logInfo(template: String, arg1: Any, arg2: Any, arg3: Any): Unit = if (LogInfo) - if (settings.Team == ClusterSettings.DefaultTeam) + if (settings.DataCenter == ClusterSettings.DefaultDataCenter) log.info("Cluster Node [{}] - " + template, selfAddress, arg1, arg2, arg3) else - log.info("Cluster Node [{}] team [" + settings.Team + "] - " + template, selfAddress, arg1, arg2, arg3) + log.info("Cluster Node [{}] dc [" + settings.DataCenter + "] - " + template, selfAddress, arg1, arg2, arg3) } } diff --git a/akka-cluster/src/main/scala/akka/cluster/ClusterDaemon.scala b/akka-cluster/src/main/scala/akka/cluster/ClusterDaemon.scala index b0e1e38130..ae5815c978 100644 --- a/akka-cluster/src/main/scala/akka/cluster/ClusterDaemon.scala +++ b/akka-cluster/src/main/scala/akka/cluster/ClusterDaemon.scala @@ -330,7 +330,7 @@ private[cluster] class ClusterCoreDaemon(publisher: ActorRef) extends Actor with } var exitingConfirmed = Set.empty[UniqueAddress] - def selfTeam = cluster.settings.Team + def selfDc = cluster.settings.DataCenter /** * Looks up and returns the remote cluster command connection for the specific address. @@ -681,10 +681,10 @@ private[cluster] class ClusterCoreDaemon(publisher: ActorRef) extends Actor with // send ExitingConfirmed to two potential leaders val membersExceptSelf = latestGossip.members.filter(_.uniqueAddress != selfUniqueAddress) - latestGossip.leaderOf(selfTeam, membersExceptSelf, selfUniqueAddress) match { + latestGossip.leaderOf(selfDc, membersExceptSelf, selfUniqueAddress) match { case Some(node1) ⇒ clusterCore(node1.address) ! ExitingConfirmed(selfUniqueAddress) - latestGossip.leaderOf(selfTeam, membersExceptSelf.filterNot(_.uniqueAddress == node1), selfUniqueAddress) match { + latestGossip.leaderOf(selfDc, membersExceptSelf.filterNot(_.uniqueAddress == node1), selfUniqueAddress) match { case Some(node2) ⇒ clusterCore(node2.address) ! ExitingConfirmed(selfUniqueAddress) case None ⇒ // no more potential leader @@ -723,7 +723,7 @@ private[cluster] class ClusterCoreDaemon(publisher: ActorRef) extends Actor with val localMembers = localGossip.members val localOverview = localGossip.overview val localSeen = localOverview.seen - val localReachability = localGossip.teamReachability(selfTeam) + val localReachability = localGossip.dcReachability(selfDc) // check if the node to DOWN is in the `members` set localMembers.find(_.address == address) match { @@ -1004,11 +1004,11 @@ private[cluster] class ClusterCoreDaemon(publisher: ActorRef) extends Actor with * Runs periodic leader actions, such as member status transitions, assigning partitions etc. */ def leaderActions(): Unit = { - if (latestGossip.isTeamLeader(selfTeam, selfUniqueAddress, selfUniqueAddress)) { - // only run the leader actions if we are the LEADER of the team + if (latestGossip.isDcLeader(selfDc, selfUniqueAddress, selfUniqueAddress)) { + // only run the leader actions if we are the LEADER of the data center val firstNotice = 20 val periodicNotice = 60 - if (latestGossip.convergence(selfTeam, selfUniqueAddress, exitingConfirmed)) { + if (latestGossip.convergence(selfDc, selfUniqueAddress, exitingConfirmed)) { if (leaderActionCounter >= firstNotice) logInfo("Leader can perform its duties again") leaderActionCounter = 0 @@ -1021,9 +1021,9 @@ private[cluster] class ClusterCoreDaemon(publisher: ActorRef) extends Actor with if (leaderActionCounter == firstNotice || leaderActionCounter % periodicNotice == 0) logInfo( "Leader can currently not perform its duties, reachability status: [{}], member status: [{}]", - latestGossip.teamReachabilityExcludingDownedObservers(selfTeam), + latestGossip.dcReachabilityExcludingDownedObservers(selfDc), latestGossip.members.collect { - case m if m.team == selfTeam ⇒ + case m if m.dataCenter == selfDc ⇒ s"${m.address} ${m.status} seen=${latestGossip.seenByNode(m.uniqueAddress)}" }.mkString(", ")) } @@ -1036,8 +1036,8 @@ private[cluster] class ClusterCoreDaemon(publisher: ActorRef) extends Actor with if (latestGossip.member(selfUniqueAddress).status == Down) { // When all reachable have seen the state this member will shutdown itself when it has // status Down. The down commands should spread before we shutdown. - val unreachable = latestGossip.teamReachability(selfTeam).allUnreachableOrTerminated - val downed = latestGossip.teamMembers(selfTeam).collect { case m if m.status == Down ⇒ m.uniqueAddress } + val unreachable = latestGossip.dcReachability(selfDc).allUnreachableOrTerminated + val downed = latestGossip.dcMembers(selfDc).collect { case m if m.status == Down ⇒ m.uniqueAddress } if (downed.forall(node ⇒ unreachable(node) || latestGossip.seenByNode(node))) { // the reason for not shutting down immediately is to give the gossip a chance to spread // the downing information to other downed nodes, so that they can shutdown themselves @@ -1072,14 +1072,14 @@ private[cluster] class ClusterCoreDaemon(publisher: ActorRef) extends Actor with def leaderActionsOnConvergence(): Unit = { val removedUnreachable = for { - node ← latestGossip.teamReachability(selfTeam).allUnreachableOrTerminated + node ← latestGossip.dcReachability(selfDc).allUnreachableOrTerminated m = latestGossip.member(node) - if m.team == selfTeam && Gossip.removeUnreachableWithMemberStatus(m.status) + if m.dataCenter == selfDc && Gossip.removeUnreachableWithMemberStatus(m.status) } yield m val removedExitingConfirmed = exitingConfirmed.filter { n ⇒ val member = latestGossip.member(n) - member.team == selfTeam && member.status == Exiting + member.dataCenter == selfDc && member.status == Exiting } val changedMembers = { @@ -1090,7 +1090,7 @@ private[cluster] class ClusterCoreDaemon(publisher: ActorRef) extends Actor with var upNumber = 0 { - case m if m.team == selfTeam && isJoiningToUp(m) ⇒ + case m if m.dataCenter == selfDc && isJoiningToUp(m) ⇒ // Move JOINING => UP (once all nodes have seen that this node is JOINING, i.e. we have a convergence) // and minimum number of nodes have joined the cluster if (upNumber == 0) { @@ -1103,7 +1103,7 @@ private[cluster] class ClusterCoreDaemon(publisher: ActorRef) extends Actor with } m.copyUp(upNumber) - case m if m.team == selfTeam && m.status == Leaving ⇒ + case m if m.dataCenter == selfDc && m.status == Leaving ⇒ // Move LEAVING => EXITING (once we have a convergence on LEAVING) m copy (status = Exiting) } @@ -1158,10 +1158,10 @@ private[cluster] class ClusterCoreDaemon(publisher: ActorRef) extends Actor with val enoughMembers: Boolean = isMinNrOfMembersFulfilled def isJoiningToWeaklyUp(m: Member): Boolean = - m.team == selfTeam && + m.dataCenter == selfDc && m.status == Joining && enoughMembers && - latestGossip.teamReachabilityExcludingDownedObservers(selfTeam).isReachable(m.uniqueAddress) + latestGossip.dcReachabilityExcludingDownedObservers(selfDc).isReachable(m.uniqueAddress) val changedMembers = localMembers.collect { case m if isJoiningToWeaklyUp(m) ⇒ m.copy(status = WeaklyUp) } @@ -1269,7 +1269,7 @@ private[cluster] class ClusterCoreDaemon(publisher: ActorRef) extends Actor with clusterCore(node.address) ! GossipStatus(selfUniqueAddress, latestGossip.version) def validNodeForGossip(node: UniqueAddress): Boolean = - node != selfUniqueAddress && latestGossip.isReachableExcludingDownedObservers(selfTeam, node) + node != selfUniqueAddress && latestGossip.isReachableExcludingDownedObservers(selfDc, node) def updateLatestGossip(newGossip: Gossip): Unit = { // Updating the vclock version for the changes @@ -1295,7 +1295,7 @@ private[cluster] class ClusterCoreDaemon(publisher: ActorRef) extends Actor with def publish(newGossip: Gossip): Unit = { if (cluster.settings.Debug.VerboseGossipLogging) - log.debug("Cluster Node [{}] team [{}] - New gossip published [{}]", selfAddress, cluster.settings.Team, newGossip) + log.debug("Cluster Node [{}] dc [{}] - New gossip published [{}]", selfAddress, cluster.settings.DataCenter, newGossip) publisher ! PublishChanges(newGossip) if (PublishStatsInterval == Duration.Zero) publishInternalStats() diff --git a/akka-cluster/src/main/scala/akka/cluster/ClusterEvent.scala b/akka-cluster/src/main/scala/akka/cluster/ClusterEvent.scala index 9b50818a6a..c2728d8eb1 100644 --- a/akka-cluster/src/main/scala/akka/cluster/ClusterEvent.scala +++ b/akka-cluster/src/main/scala/akka/cluster/ClusterEvent.scala @@ -7,7 +7,7 @@ import language.postfixOps import scala.collection.immutable import scala.collection.immutable.VectorBuilder import akka.actor.{ Actor, ActorLogging, ActorRef, Address } -import akka.cluster.ClusterSettings.Team +import akka.cluster.ClusterSettings.DataCenter import akka.cluster.ClusterEvent._ import akka.cluster.MemberStatus._ import akka.event.EventStream @@ -58,7 +58,7 @@ object ClusterEvent { /** * Current snapshot state of the cluster. Sent to new subscriber. * - * @param leader leader of the team of this node + * @param leader leader of the data center of this node */ final case class CurrentClusterState( members: immutable.SortedSet[Member] = immutable.SortedSet.empty, @@ -88,17 +88,17 @@ object ClusterEvent { scala.collection.JavaConverters.setAsJavaSetConverter(seenBy).asJava /** - * Java API: get address of current team leader, or null if none + * Java API: get address of current data center leader, or null if none */ def getLeader: Address = leader orNull /** - * get address of current leader, if any, within the team that has the given role + * get address of current leader, if any, within the data center that has the given role */ def roleLeader(role: String): Option[Address] = roleLeaderMap.getOrElse(role, None) /** - * Java API: get address of current leader, if any, within the team that has the given role + * Java API: get address of current leader, if any, within the data center that has the given role * or null if no such node exists */ def getRoleLeader(role: String): Address = roleLeaderMap.get(role).flatten.orNull @@ -115,15 +115,15 @@ object ClusterEvent { scala.collection.JavaConverters.setAsJavaSetConverter(allRoles).asJava /** - * All teams in the cluster + * All data centers in the cluster */ - def allTeams: Set[String] = members.map(_.team)(breakOut) + def allDataCenters: Set[String] = members.map(_.dataCenter)(breakOut) /** - * Java API: All teams in the cluster + * Java API: All data centers in the cluster */ - def getAllTeams: java.util.Set[String] = - scala.collection.JavaConverters.setAsJavaSetConverter(allTeams).asJava + def getAllDataCenters: java.util.Set[String] = + scala.collection.JavaConverters.setAsJavaSetConverter(allDataCenters).asJava } @@ -189,7 +189,7 @@ object ClusterEvent { } /** - * Leader of the cluster team of this node changed. Published when the state change + * Leader of the cluster data center of this node changed. Published when the state change * is first seen on a node. */ final case class LeaderChanged(leader: Option[Address]) extends ClusterDomainEvent { @@ -201,7 +201,8 @@ object ClusterEvent { } /** - * First member (leader) of the members within a role set (in the same team as this node, if cluster teams are used) changed. + * First member (leader) of the members within a role set (in the same data center as this node, + * if data centers are used) changed. * Published when the state change is first seen on a node. */ final case class RoleLeaderChanged(role: String, leader: Option[Address]) extends ClusterDomainEvent { @@ -318,9 +319,9 @@ object ClusterEvent { * INTERNAL API */ @InternalApi - private[cluster] def diffLeader(team: Team, oldGossip: Gossip, newGossip: Gossip, selfUniqueAddress: UniqueAddress): immutable.Seq[LeaderChanged] = { - val newLeader = newGossip.teamLeader(team, selfUniqueAddress) - if (newLeader != oldGossip.teamLeader(team, selfUniqueAddress)) List(LeaderChanged(newLeader.map(_.address))) + private[cluster] def diffLeader(dc: DataCenter, oldGossip: Gossip, newGossip: Gossip, selfUniqueAddress: UniqueAddress): immutable.Seq[LeaderChanged] = { + val newLeader = newGossip.dcLeader(dc, selfUniqueAddress) + if (newLeader != oldGossip.dcLeader(dc, selfUniqueAddress)) List(LeaderChanged(newLeader.map(_.address))) else Nil } @@ -328,11 +329,11 @@ object ClusterEvent { * INTERNAL API */ @InternalApi - private[cluster] def diffRolesLeader(team: Team, oldGossip: Gossip, newGossip: Gossip, selfUniqueAddress: UniqueAddress): Set[RoleLeaderChanged] = { + private[cluster] def diffRolesLeader(dc: DataCenter, oldGossip: Gossip, newGossip: Gossip, selfUniqueAddress: UniqueAddress): Set[RoleLeaderChanged] = { for { role ← oldGossip.allRoles union newGossip.allRoles - newLeader = newGossip.roleLeader(team, role, selfUniqueAddress) - if newLeader != oldGossip.roleLeader(team, role, selfUniqueAddress) + newLeader = newGossip.roleLeader(dc, role, selfUniqueAddress) + if newLeader != oldGossip.roleLeader(dc, role, selfUniqueAddress) } yield RoleLeaderChanged(role, newLeader.map(_.address)) } @@ -340,12 +341,12 @@ object ClusterEvent { * INTERNAL API */ @InternalApi - private[cluster] def diffSeen(team: Team, oldGossip: Gossip, newGossip: Gossip, selfUniqueAddress: UniqueAddress): immutable.Seq[SeenChanged] = + private[cluster] def diffSeen(dc: DataCenter, oldGossip: Gossip, newGossip: Gossip, selfUniqueAddress: UniqueAddress): immutable.Seq[SeenChanged] = if (newGossip eq oldGossip) Nil else { - val newConvergence = newGossip.convergence(team, selfUniqueAddress, Set.empty) + val newConvergence = newGossip.convergence(dc, selfUniqueAddress, Set.empty) val newSeenBy = newGossip.seenBy - if (newConvergence != oldGossip.convergence(team, selfUniqueAddress, Set.empty) || newSeenBy != oldGossip.seenBy) + if (newConvergence != oldGossip.convergence(dc, selfUniqueAddress, Set.empty) || newSeenBy != oldGossip.seenBy) List(SeenChanged(newConvergence, newSeenBy.map(_.address))) else Nil } @@ -372,7 +373,7 @@ private[cluster] final class ClusterDomainEventPublisher extends Actor with Acto val cluster = Cluster(context.system) val selfUniqueAddress = cluster.selfUniqueAddress var latestGossip: Gossip = Gossip.empty - def selfTeam = cluster.settings.Team + def selfDc = cluster.settings.DataCenter override def preRestart(reason: Throwable, message: Option[Any]) { // don't postStop when restarted, no children to stop @@ -407,11 +408,9 @@ private[cluster] final class ClusterDomainEventPublisher extends Actor with Acto members = latestGossip.members, unreachable = unreachable, seenBy = latestGossip.seenBy.map(_.address), - leader = latestGossip.teamLeader(selfTeam, selfUniqueAddress).map(_.address), + leader = latestGossip.dcLeader(selfDc, selfUniqueAddress).map(_.address), roleLeaderMap = latestGossip.allRoles.map(r ⇒ - r → latestGossip.roleLeader(selfTeam, r, selfUniqueAddress).map(_.address) - )(collection.breakOut) - ) + r → latestGossip.roleLeader(selfDc, r, selfUniqueAddress).map(_.address))(collection.breakOut)) receiver ! state } @@ -446,10 +445,10 @@ private[cluster] final class ClusterDomainEventPublisher extends Actor with Acto diffMemberEvents(oldGossip, newGossip) foreach pub diffUnreachable(oldGossip, newGossip, selfUniqueAddress) foreach pub diffReachable(oldGossip, newGossip, selfUniqueAddress) foreach pub - diffLeader(selfTeam, oldGossip, newGossip, selfUniqueAddress) foreach pub - diffRolesLeader(selfTeam, oldGossip, newGossip, selfUniqueAddress) foreach pub + diffLeader(selfDc, oldGossip, newGossip, selfUniqueAddress) foreach pub + diffRolesLeader(selfDc, oldGossip, newGossip, selfUniqueAddress) foreach pub // publish internal SeenState for testing purposes - diffSeen(selfTeam, oldGossip, newGossip, selfUniqueAddress) foreach pub + diffSeen(selfDc, oldGossip, newGossip, selfUniqueAddress) foreach pub diffReachability(oldGossip, newGossip) foreach pub } diff --git a/akka-cluster/src/main/scala/akka/cluster/ClusterReadView.scala b/akka-cluster/src/main/scala/akka/cluster/ClusterReadView.scala index 10f7cb309a..20799cfb11 100644 --- a/akka-cluster/src/main/scala/akka/cluster/ClusterReadView.scala +++ b/akka-cluster/src/main/scala/akka/cluster/ClusterReadView.scala @@ -109,12 +109,12 @@ private[akka] class ClusterReadView(cluster: Cluster) extends Closeable { def status: MemberStatus = self.status /** - * Is this node the current team leader + * Is this node the current data center leader */ def isLeader: Boolean = leader.contains(selfAddress) /** - * Get the address of the current team leader + * Get the address of the current data center leader */ def leader: Option[Address] = state.leader diff --git a/akka-cluster/src/main/scala/akka/cluster/ClusterSettings.scala b/akka-cluster/src/main/scala/akka/cluster/ClusterSettings.scala index ddc7b7717d..b7106526ca 100644 --- a/akka-cluster/src/main/scala/akka/cluster/ClusterSettings.scala +++ b/akka-cluster/src/main/scala/akka/cluster/ClusterSettings.scala @@ -18,18 +18,18 @@ import scala.concurrent.duration.FiniteDuration import akka.japi.Util.immutableSeq object ClusterSettings { - type Team = String + type DataCenter = String /** * INTERNAL API. */ @InternalApi - private[akka] val TeamRolePrefix = "team-" + private[akka] val DcRolePrefix = "dc-" /** * INTERNAL API. */ @InternalApi - private[akka] val DefaultTeam: Team = "default" + private[akka] val DefaultDataCenter: DataCenter = "default" } @@ -116,14 +116,13 @@ final class ClusterSettings(val config: Config, val systemName: String) { val AllowWeaklyUpMembers = cc.getBoolean("allow-weakly-up-members") - val Team: Team = cc.getString("team") + val DataCenter: DataCenter = cc.getString("data-center") val Roles: Set[String] = { val configuredRoles = (immutableSeq(cc.getStringList("roles")).toSet) requiring ( - _.forall(!_.startsWith(TeamRolePrefix)), - s"Roles must not start with '${TeamRolePrefix}' as that is reserved for the cluster team setting" - ) + _.forall(!_.startsWith(DcRolePrefix)), + s"Roles must not start with '${DcRolePrefix}' as that is reserved for the cluster data-center setting") - configuredRoles + s"$TeamRolePrefix$Team" + configuredRoles + s"$DcRolePrefix$DataCenter" } val MinNrOfMembers: Int = { cc.getInt("min-nr-of-members") diff --git a/akka-cluster/src/main/scala/akka/cluster/Gossip.scala b/akka-cluster/src/main/scala/akka/cluster/Gossip.scala index 6bc18f38a2..a5e835113c 100644 --- a/akka-cluster/src/main/scala/akka/cluster/Gossip.scala +++ b/akka-cluster/src/main/scala/akka/cluster/Gossip.scala @@ -5,7 +5,7 @@ package akka.cluster import scala.collection.{ SortedSet, immutable } -import ClusterSettings.Team +import ClusterSettings.DataCenter import MemberStatus._ import akka.annotation.InternalApi @@ -169,32 +169,31 @@ private[cluster] final case class Gossip( } /** - * Checks if we have a cluster convergence. If there are any in team node pairs that cannot reach each other + * Checks if we have a cluster convergence. If there are any in data center node pairs that cannot reach each other * then we can't have a convergence until those nodes reach each other again or one of them is downed * * @return true if convergence have been reached and false if not */ - def convergence(team: Team, selfUniqueAddress: UniqueAddress, exitingConfirmed: Set[UniqueAddress]): Boolean = { - // Find cluster members in the team that are unreachable from other members of the team - // excluding observations from members outside of the team, that have status DOWN or is passed in as confirmed exiting. - val unreachableInTeam = teamReachabilityExcludingDownedObservers(team).allUnreachableOrTerminated.collect { + def convergence(dc: DataCenter, selfUniqueAddress: UniqueAddress, exitingConfirmed: Set[UniqueAddress]): Boolean = { + // Find cluster members in the data center that are unreachable from other members of the data center + // excluding observations from members outside of the data center, that have status DOWN or is passed in as confirmed exiting. + val unreachableInDc = dcReachabilityExcludingDownedObservers(dc).allUnreachableOrTerminated.collect { case node if node != selfUniqueAddress && !exitingConfirmed(node) ⇒ member(node) } - // If another member in the team that is UP or LEAVING and has not seen this gossip or is exiting + // If another member in the data center that is UP or LEAVING and has not seen this gossip or is exiting // convergence cannot be reached - def teamMemberHinderingConvergenceExists = + def memberHinderingConvergenceExists = members.exists(member ⇒ - member.team == team && + member.dataCenter == dc && Gossip.convergenceMemberStatus(member.status) && - !(seenByNode(member.uniqueAddress) || exitingConfirmed(member.uniqueAddress)) - ) + !(seenByNode(member.uniqueAddress) || exitingConfirmed(member.uniqueAddress))) - // unreachables outside of the team or with status DOWN or EXITING does not affect convergence + // unreachables outside of the data center or with status DOWN or EXITING does not affect convergence def allUnreachablesCanBeIgnored = - unreachableInTeam.forall(unreachable ⇒ Gossip.convergenceSkipUnreachableWithMemberStatus(unreachable.status)) + unreachableInDc.forall(unreachable ⇒ Gossip.convergenceSkipUnreachableWithMemberStatus(unreachable.status)) - allUnreachablesCanBeIgnored && !teamMemberHinderingConvergenceExists + allUnreachablesCanBeIgnored && !memberHinderingConvergenceExists } lazy val reachabilityExcludingDownedObservers: Reachability = { @@ -203,77 +202,77 @@ private[cluster] final case class Gossip( } /** - * @return Reachability excluding observations from nodes outside of the team, but including observed unreachable - * nodes outside of the team + * @return Reachability excluding observations from nodes outside of the data center, but including observed unreachable + * nodes outside of the data center */ - def teamReachability(team: Team): Reachability = - overview.reachability.removeObservers(members.collect { case m if m.team != team ⇒ m.uniqueAddress }) + def dcReachability(dc: DataCenter): Reachability = + overview.reachability.removeObservers(members.collect { case m if m.dataCenter != dc ⇒ m.uniqueAddress }) /** - * @return reachability for team nodes, with observations from outside the team or from downed nodes filtered out + * @return reachability for data center nodes, with observations from outside the data center or from downed nodes filtered out */ - def teamReachabilityExcludingDownedObservers(team: Team): Reachability = { - val membersToExclude = members.collect { case m if m.status == Down || m.team != team ⇒ m.uniqueAddress } - overview.reachability.removeObservers(membersToExclude).remove(members.collect { case m if m.team != team ⇒ m.uniqueAddress }) + def dcReachabilityExcludingDownedObservers(dc: DataCenter): Reachability = { + val membersToExclude = members.collect { case m if m.status == Down || m.dataCenter != dc ⇒ m.uniqueAddress } + overview.reachability.removeObservers(membersToExclude).remove(members.collect { case m if m.dataCenter != dc ⇒ m.uniqueAddress }) } - def teamMembers(team: Team): SortedSet[Member] = - members.filter(_.team == team) + def dcMembers(dc: DataCenter): SortedSet[Member] = + members.filter(_.dataCenter == dc) - def isTeamLeader(team: Team, node: UniqueAddress, selfUniqueAddress: UniqueAddress): Boolean = - teamLeader(team, selfUniqueAddress).contains(node) + def isDcLeader(dc: DataCenter, node: UniqueAddress, selfUniqueAddress: UniqueAddress): Boolean = + dcLeader(dc, selfUniqueAddress).contains(node) - def teamLeader(team: Team, selfUniqueAddress: UniqueAddress): Option[UniqueAddress] = - leaderOf(team, members, selfUniqueAddress) + def dcLeader(dc: DataCenter, selfUniqueAddress: UniqueAddress): Option[UniqueAddress] = + leaderOf(dc, members, selfUniqueAddress) - def roleLeader(team: Team, role: String, selfUniqueAddress: UniqueAddress): Option[UniqueAddress] = - leaderOf(team, members.filter(_.hasRole(role)), selfUniqueAddress) + def roleLeader(dc: DataCenter, role: String, selfUniqueAddress: UniqueAddress): Option[UniqueAddress] = + leaderOf(dc, members.filter(_.hasRole(role)), selfUniqueAddress) - def leaderOf(team: Team, mbrs: immutable.SortedSet[Member], selfUniqueAddress: UniqueAddress): Option[UniqueAddress] = { - val reachability = teamReachability(team) + def leaderOf(dc: DataCenter, mbrs: immutable.SortedSet[Member], selfUniqueAddress: UniqueAddress): Option[UniqueAddress] = { + val reachability = dcReachability(dc) - val reachableTeamMembers = - if (reachability.isAllReachable) mbrs.filter(m ⇒ m.team == team && m.status != Down) + val reachableMembersInDc = + if (reachability.isAllReachable) mbrs.filter(m ⇒ m.dataCenter == dc && m.status != Down) else mbrs.filter(m ⇒ - m.team == team && + m.dataCenter == dc && m.status != Down && (reachability.isReachable(m.uniqueAddress) || m.uniqueAddress == selfUniqueAddress)) - if (reachableTeamMembers.isEmpty) None - else reachableTeamMembers.find(m ⇒ Gossip.leaderMemberStatus(m.status)) - .orElse(Some(reachableTeamMembers.min(Member.leaderStatusOrdering))) + if (reachableMembersInDc.isEmpty) None + else reachableMembersInDc.find(m ⇒ Gossip.leaderMemberStatus(m.status)) + .orElse(Some(reachableMembersInDc.min(Member.leaderStatusOrdering))) .map(_.uniqueAddress) } - def allTeams: Set[Team] = members.map(_.team) + def allDataCenters: Set[DataCenter] = members.map(_.dataCenter) def allRoles: Set[String] = members.flatMap(_.roles) def isSingletonCluster: Boolean = members.size == 1 /** - * @return true if toAddress should be reachable from the fromTeam in general, within a team - * this means only caring about team-local observations, across teams it means caring - * about all observations for the toAddress. + * @return true if toAddress should be reachable from the fromDc in general, within a data center + * this means only caring about data center local observations, across data centers it + * means caring about all observations for the toAddress. */ - def isReachableExcludingDownedObservers(fromTeam: Team, toAddress: UniqueAddress): Boolean = + def isReachableExcludingDownedObservers(fromDc: DataCenter, toAddress: UniqueAddress): Boolean = if (!hasMember(toAddress)) false else { val to = member(toAddress) - // if member is in the same team, we ignore cross-team unreachability - if (fromTeam == to.team) teamReachabilityExcludingDownedObservers(fromTeam).isReachable(toAddress) + // if member is in the same data center, we ignore cross data center unreachability + if (fromDc == to.dataCenter) dcReachabilityExcludingDownedObservers(fromDc).isReachable(toAddress) // if not it is enough that any non-downed node observed it as unreachable else reachabilityExcludingDownedObservers.isReachable(toAddress) } /** * @return true if fromAddress should be able to reach toAddress based on the unreachability data and their - * respective teams + * respective data centers */ def isReachable(fromAddress: UniqueAddress, toAddress: UniqueAddress): Boolean = if (!hasMember(toAddress)) false else { - // as it looks for specific unreachable entires for the node pair we don't have to filter on team + // as it looks for specific unreachable entires for the node pair we don't have to filter on data center overview.reachability.isReachable(fromAddress, toAddress) } diff --git a/akka-cluster/src/main/scala/akka/cluster/Member.scala b/akka-cluster/src/main/scala/akka/cluster/Member.scala index 79b6ac7b77..4fb0fbc73e 100644 --- a/akka-cluster/src/main/scala/akka/cluster/Member.scala +++ b/akka-cluster/src/main/scala/akka/cluster/Member.scala @@ -7,7 +7,7 @@ package akka.cluster import akka.actor.Address import MemberStatus._ import akka.annotation.InternalApi -import akka.cluster.ClusterSettings.Team +import akka.cluster.ClusterSettings.DataCenter import scala.runtime.AbstractFunction2 @@ -24,9 +24,9 @@ class Member private[cluster] ( val status: MemberStatus, val roles: Set[String]) extends Serializable { - lazy val team: String = roles.find(_.startsWith(ClusterSettings.TeamRolePrefix)) - .getOrElse(throw new IllegalStateException("Team undefined, should not be possible")) - .substring(ClusterSettings.TeamRolePrefix.length) + lazy val dataCenter: DataCenter = roles.find(_.startsWith(ClusterSettings.DcRolePrefix)) + .getOrElse(throw new IllegalStateException("DataCenter undefined, should not be possible")) + .substring(ClusterSettings.DcRolePrefix.length) def address: Address = uniqueAddress.address @@ -36,10 +36,10 @@ class Member private[cluster] ( case _ ⇒ false } override def toString = - if (team == ClusterSettings.DefaultTeam) + if (dataCenter == ClusterSettings.DefaultDataCenter) s"Member(address = $address, status = $status)" else - s"Member(address = $address, team = $team, status = $status)" + s"Member(address = $address, dataCenter = $dataCenter, status = $status)" def hasRole(role: String): Boolean = roles.contains(role) @@ -54,8 +54,8 @@ class Member private[cluster] ( * member. It is only correct when comparing two existing members in a * cluster. A member that joined after removal of another member may be * considered older than the removed member. Note that is only makes - * sense to compare with other members inside of one team (upNumber has - * a higher risk of being reused across teams). + * sense to compare with other members inside of one data center (upNumber has + * a higher risk of being reused across data centers). */ def isOlderThan(other: Member): Boolean = if (upNumber == other.upNumber) @@ -97,7 +97,7 @@ object Member { * INTERNAL API */ private[cluster] def removed(node: UniqueAddress): Member = - new Member(node, Int.MaxValue, Removed, Set(ClusterSettings.TeamRolePrefix + "-N/A")) + new Member(node, Int.MaxValue, Removed, Set(ClusterSettings.DcRolePrefix + "-N/A")) /** * `Address` ordering type class, sorts addresses by host and port. diff --git a/akka-cluster/src/main/scala/akka/cluster/protobuf/ClusterMessageSerializer.scala b/akka-cluster/src/main/scala/akka/cluster/protobuf/ClusterMessageSerializer.scala index 25a9e69eef..7af7cc9ced 100644 --- a/akka-cluster/src/main/scala/akka/cluster/protobuf/ClusterMessageSerializer.scala +++ b/akka-cluster/src/main/scala/akka/cluster/protobuf/ClusterMessageSerializer.scala @@ -356,11 +356,11 @@ class ClusterMessageSerializer(val system: ExtendedActorSystem) extends BaseSeri roleIndex ← roleIndexes role = roleMapping(roleIndex) } { - if (role.startsWith(ClusterSettings.TeamRolePrefix)) containsDc = true + if (role.startsWith(ClusterSettings.DcRolePrefix)) containsDc = true roles += role } - if (!containsDc) roles + (ClusterSettings.TeamRolePrefix + "default") + if (!containsDc) roles + (ClusterSettings.DcRolePrefix + "default") else roles } diff --git a/akka-cluster/src/multi-jvm/scala/akka/cluster/MBeanSpec.scala b/akka-cluster/src/multi-jvm/scala/akka/cluster/MBeanSpec.scala index cb43fe4d52..f33f9823b5 100644 --- a/akka-cluster/src/multi-jvm/scala/akka/cluster/MBeanSpec.scala +++ b/akka-cluster/src/multi-jvm/scala/akka/cluster/MBeanSpec.scala @@ -120,7 +120,7 @@ abstract class MBeanSpec | { | "address": "${sortedNodes(0)}", | "roles": [ - | "team-default", + | "dc-default", | "testNode" | ], | "status": "Up" @@ -128,7 +128,7 @@ abstract class MBeanSpec | { | "address": "${sortedNodes(1)}", | "roles": [ - | "team-default", + | "dc-default", | "testNode" | ], | "status": "Up" @@ -136,7 +136,7 @@ abstract class MBeanSpec | { | "address": "${sortedNodes(2)}", | "roles": [ - | "team-default", + | "dc-default", | "testNode" | ], | "status": "Up" @@ -144,7 +144,7 @@ abstract class MBeanSpec | { | "address": "${sortedNodes(3)}", | "roles": [ - | "team-default", + | "dc-default", | "testNode" | ], | "status": "Up" diff --git a/akka-cluster/src/multi-jvm/scala/akka/cluster/MultiTeamClusterSpec.scala b/akka-cluster/src/multi-jvm/scala/akka/cluster/MultiDcClusterSpec.scala similarity index 70% rename from akka-cluster/src/multi-jvm/scala/akka/cluster/MultiTeamClusterSpec.scala rename to akka-cluster/src/multi-jvm/scala/akka/cluster/MultiDcClusterSpec.scala index a1355f6ea9..9f49188dd4 100644 --- a/akka-cluster/src/multi-jvm/scala/akka/cluster/MultiTeamClusterSpec.scala +++ b/akka-cluster/src/multi-jvm/scala/akka/cluster/MultiDcClusterSpec.scala @@ -10,7 +10,7 @@ import com.typesafe.config.ConfigFactory import scala.concurrent.duration._ -object MultiTeamMultiJvmSpec extends MultiNodeConfig { +object MultiDcMultiJvmSpec extends MultiNodeConfig { val first = role("first") val second = role("second") val third = role("third") @@ -21,32 +21,32 @@ object MultiTeamMultiJvmSpec extends MultiNodeConfig { nodeConfig(first, second)(ConfigFactory.parseString( """ - akka.cluster.team = "dc1" + akka.cluster.data-center = "dc1" akka.loglevel = INFO """)) nodeConfig(third, fourth, fifth)(ConfigFactory.parseString( """ - akka.cluster.team = "dc2" + akka.cluster.data-center = "dc2" akka.loglevel = INFO """)) testTransport(on = true) } -class MultiTeamMultiJvmNode1 extends MultiTeamSpec -class MultiTeamMultiJvmNode2 extends MultiTeamSpec -class MultiTeamMultiJvmNode3 extends MultiTeamSpec -class MultiTeamMultiJvmNode4 extends MultiTeamSpec -class MultiTeamMultiJvmNode5 extends MultiTeamSpec +class MultiDcMultiJvmNode1 extends MultiDcSpec +class MultiDcMultiJvmNode2 extends MultiDcSpec +class MultiDcMultiJvmNode3 extends MultiDcSpec +class MultiDcMultiJvmNode4 extends MultiDcSpec +class MultiDcMultiJvmNode5 extends MultiDcSpec -abstract class MultiTeamSpec - extends MultiNodeSpec(MultiTeamMultiJvmSpec) +abstract class MultiDcSpec + extends MultiNodeSpec(MultiDcMultiJvmSpec) with MultiNodeClusterSpec { - import MultiTeamMultiJvmSpec._ + import MultiDcMultiJvmSpec._ - "A cluster with multiple cluster teams" must { + "A cluster with multiple data centers" must { "be able to form" in { runOn(first) { @@ -66,31 +66,31 @@ abstract class MultiTeamSpec enterBarrier("cluster started") } - "have a leader per team" in { + "have a leader per data center" in { runOn(first, second) { - cluster.settings.Team should ===("dc1") + cluster.settings.DataCenter should ===("dc1") clusterView.leader shouldBe defined val dc1 = Set(address(first), address(second)) dc1 should contain(clusterView.leader.get) } runOn(third, fourth) { - cluster.settings.Team should ===("dc2") + cluster.settings.DataCenter should ===("dc2") clusterView.leader shouldBe defined val dc2 = Set(address(third), address(fourth)) dc2 should contain(clusterView.leader.get) } - enterBarrier("leader per team") + enterBarrier("leader per data center") } - "be able to have team member changes while there is inter-team unreachability" in within(20.seconds) { + "be able to have data center member changes while there is inter data center unreachability" in within(20.seconds) { runOn(first) { testConductor.blackhole(first, third, Direction.Both).await } runOn(first, second, third, fourth) { awaitAssert(clusterView.unreachableMembers should not be empty) } - enterBarrier("inter-team unreachability") + enterBarrier("inter-data-center unreachability") runOn(fifth) { cluster.join(third) @@ -108,17 +108,17 @@ abstract class MultiTeamSpec runOn(first, second, third, fourth) { awaitAssert(clusterView.unreachableMembers should not be empty) } - enterBarrier("inter-team unreachability end") + enterBarrier("inter-data-center unreachability end") } - "be able to have team member changes while there is unreachability in another team" in within(20.seconds) { + "be able to have data center member changes while there is unreachability in another data center" in within(20.seconds) { runOn(first) { testConductor.blackhole(first, second, Direction.Both).await } runOn(first, second, third, fourth) { awaitAssert(clusterView.unreachableMembers should not be empty) } - enterBarrier("other-team-internal-unreachable") + enterBarrier("other-data-center-internal-unreachable") runOn(third) { cluster.join(fifth) @@ -130,15 +130,15 @@ abstract class MultiTeamSpec awaitAssert(clusterView.members.collect { case m if m.status == Up ⇒ m.address } should contain(address(fifth))) } - enterBarrier("other-team-internal-unreachable changed") + enterBarrier("other-data-center-internal-unreachable changed") runOn(first) { testConductor.passThrough(first, second, Direction.Both).await } - enterBarrier("other-team-internal-unreachable end") + enterBarrier("other-datac-enter-internal-unreachable end") } - "be able to down a member of another team" in within(20.seconds) { + "be able to down a member of another data-center" in within(20.seconds) { runOn(fifth) { cluster.down(address(second)) } @@ -146,7 +146,7 @@ abstract class MultiTeamSpec runOn(first, third, fifth) { awaitAssert(clusterView.members.map(_.address) should not contain (address(second))) } - enterBarrier("cross-team-downed") + enterBarrier("cross-data-center-downed") } } diff --git a/akka-cluster/src/multi-jvm/scala/akka/cluster/MultiTeamSplitBrainSpec.scala b/akka-cluster/src/multi-jvm/scala/akka/cluster/MultiDcSplitBrainSpec.scala similarity index 59% rename from akka-cluster/src/multi-jvm/scala/akka/cluster/MultiTeamSplitBrainSpec.scala rename to akka-cluster/src/multi-jvm/scala/akka/cluster/MultiDcSplitBrainSpec.scala index 772f2de585..86b75dbe09 100644 --- a/akka-cluster/src/multi-jvm/scala/akka/cluster/MultiTeamSplitBrainSpec.scala +++ b/akka-cluster/src/multi-jvm/scala/akka/cluster/MultiDcSplitBrainSpec.scala @@ -9,7 +9,7 @@ import com.typesafe.config.ConfigFactory import scala.concurrent.duration._ -object MultiTeamSplitBrainMultiJvmSpec extends MultiNodeConfig { +object MultiDcSplitBrainMultiJvmSpec extends MultiNodeConfig { val first = role("first") val second = role("second") val third = role("third") @@ -19,35 +19,35 @@ object MultiTeamSplitBrainMultiJvmSpec extends MultiNodeConfig { nodeConfig(first, second)(ConfigFactory.parseString( """ - akka.cluster.team = "dc1" + akka.cluster.data-center = "dc1" akka.loglevel = INFO """)) nodeConfig(third, fourth)(ConfigFactory.parseString( """ - akka.cluster.team = "dc2" + akka.cluster.data-center = "dc2" akka.loglevel = INFO """)) testTransport(on = true) } -class MultiTeamSplitBrainMultiJvmNode1 extends MultiTeamSpec -class MultiTeamSplitBrainMultiJvmNode2 extends MultiTeamSpec -class MultiTeamSplitBrainMultiJvmNode3 extends MultiTeamSpec -class MultiTeamSplitBrainMultiJvmNode4 extends MultiTeamSpec -class MultiTeamSplitBrainMultiJvmNode5 extends MultiTeamSpec +class MultiDcSplitBrainMultiJvmNode1 extends MultiDcSpec +class MultiDcSplitBrainMultiJvmNode2 extends MultiDcSpec +class MultiDcSplitBrainMultiJvmNode3 extends MultiDcSpec +class MultiDcSplitBrainMultiJvmNode4 extends MultiDcSpec +class MultiDcSplitBrainMultiJvmNode5 extends MultiDcSpec -abstract class MultiTeamSplitBrainSpec - extends MultiNodeSpec(MultiTeamSplitBrainMultiJvmSpec) +abstract class MultiDcSplitBrainSpec + extends MultiNodeSpec(MultiDcSplitBrainMultiJvmSpec) with MultiNodeClusterSpec { - import MultiTeamSplitBrainMultiJvmSpec._ + import MultiDcSplitBrainMultiJvmSpec._ val dc1 = List(first, second) val dc2 = List(third, fourth) - def splitTeams(): Unit = { + def splitDataCenters(): Unit = { runOn(first) { for { dc1Node ← dc1 @@ -66,7 +66,7 @@ abstract class MultiTeamSplitBrainSpec } - def unsplitTeams(): Unit = { + def unsplitDataCenters(): Unit = { runOn(first) { for { dc1Node ← dc1 @@ -79,45 +79,45 @@ abstract class MultiTeamSplitBrainSpec awaitAllReachable() } - "A cluster with multiple cluster teams" must { - "be able to form two teams" in { + "A cluster with multiple data centers" must { + "be able to form two data centers" in { awaitClusterUp(first, second, third) } - "be able to have a team member join while there is inter-team split" in within(20.seconds) { - // introduce a split between teams - splitTeams() - enterBarrier("team-split-1") + "be able to have a data center member join while there is inter data center split" in within(20.seconds) { + // introduce a split between data centers + splitDataCenters() + enterBarrier("data-center-split-1") runOn(fourth) { cluster.join(third) } - enterBarrier("inter-team unreachability") + enterBarrier("inter-data-center unreachability") // should be able to join and become up since the // split is between dc1 and dc2 runOn(third, fourth) { awaitAssert(clusterView.members.collect { - case m if m.team == "dc2" && m.status == MemberStatus.Up ⇒ m.address + case m if m.dataCenter == "dc2" && m.status == MemberStatus.Up ⇒ m.address }) should ===(Set(address(third), address(fourth))) } enterBarrier("dc2-join-completed") - unsplitTeams() - enterBarrier("team-unsplit-1") + unsplitDataCenters() + enterBarrier("data-center-unsplit-1") runOn(dc1: _*) { awaitAssert(clusterView.members.collect { - case m if m.team == "dc2" && m.status == MemberStatus.Up ⇒ m.address + case m if m.dataCenter == "dc2" && m.status == MemberStatus.Up ⇒ m.address }) should ===(Set(address(third), address(fourth))) } - enterBarrier("inter-team-split-1-done") + enterBarrier("inter-data-center-split-1-done") } - "be able to have team member leave while there is inter-team split" in within(20.seconds) { - splitTeams() - enterBarrier("team-split-2") + "be able to have data center member leave while there is inter data center split" in within(20.seconds) { + splitDataCenters() + enterBarrier("data-center-split-2") runOn(fourth) { cluster.leave(third) @@ -128,13 +128,13 @@ abstract class MultiTeamSplitBrainSpec } enterBarrier("node-4-left") - unsplitTeams() - enterBarrier("team-unsplit-2") + unsplitDataCenters() + enterBarrier("data-center-unsplit-2") runOn(first, second) { awaitAssert(clusterView.members.filter(_.address == address(fourth)) should ===(Set.empty)) } - enterBarrier("inter-team-split-2-done") + enterBarrier("inter-data-center-split-2-done") } } diff --git a/akka-cluster/src/multi-jvm/scala/akka/cluster/QuickRestartSpec.scala b/akka-cluster/src/multi-jvm/scala/akka/cluster/QuickRestartSpec.scala index 9da57760ca..24322d6b21 100644 --- a/akka-cluster/src/multi-jvm/scala/akka/cluster/QuickRestartSpec.scala +++ b/akka-cluster/src/multi-jvm/scala/akka/cluster/QuickRestartSpec.scala @@ -91,7 +91,7 @@ abstract class QuickRestartSpec Cluster(system).state.members.size should ===(totalNumberOfNodes) Cluster(system).state.members.map(_.status == MemberStatus.Up) // use the role to test that it is the new incarnation that joined, sneaky - Cluster(system).state.members.flatMap(_.roles) should ===(Set(s"round-$n", ClusterSettings.TeamRolePrefix + "default")) + Cluster(system).state.members.flatMap(_.roles) should ===(Set(s"round-$n", ClusterSettings.DcRolePrefix + "default")) } } enterBarrier("members-up-" + n) diff --git a/akka-cluster/src/test/scala/akka/cluster/ClusterConfigSpec.scala b/akka-cluster/src/test/scala/akka/cluster/ClusterConfigSpec.scala index 3792ec722f..5bcec3aa07 100644 --- a/akka-cluster/src/test/scala/akka/cluster/ClusterConfigSpec.scala +++ b/akka-cluster/src/test/scala/akka/cluster/ClusterConfigSpec.scala @@ -45,8 +45,8 @@ class ClusterConfigSpec extends AkkaSpec { DownRemovalMargin should ===(Duration.Zero) MinNrOfMembers should ===(1) MinNrOfMembersOfRole should ===(Map.empty[String, Int]) - Team should ===("default") - Roles should ===(Set(ClusterSettings.TeamRolePrefix + "default")) + DataCenter should ===("default") + Roles should ===(Set(ClusterSettings.DcRolePrefix + "default")) JmxEnabled should ===(true) UseDispatcher should ===(Dispatchers.DefaultDispatcherId) GossipDifferentViewProbability should ===(0.8 +- 0.0001) @@ -61,13 +61,13 @@ class ClusterConfigSpec extends AkkaSpec { |akka { | cluster { | roles = [ "hamlet" ] - | team = "blue" + | data-center = "blue" | } |} """.stripMargin).withFallback(ConfigFactory.load()), system.name) import settings._ - Roles should ===(Set("hamlet", ClusterSettings.TeamRolePrefix + "blue")) - Team should ===("blue") + Roles should ===(Set("hamlet", ClusterSettings.DcRolePrefix + "blue")) + DataCenter should ===("blue") } } } diff --git a/akka-cluster/src/test/scala/akka/cluster/ClusterDomainEventPublisherSpec.scala b/akka-cluster/src/test/scala/akka/cluster/ClusterDomainEventPublisherSpec.scala index 1d78f51a85..538546c50d 100644 --- a/akka-cluster/src/test/scala/akka/cluster/ClusterDomainEventPublisherSpec.scala +++ b/akka-cluster/src/test/scala/akka/cluster/ClusterDomainEventPublisherSpec.scala @@ -133,7 +133,7 @@ class ClusterDomainEventPublisherSpec extends AkkaSpec(ClusterDomainEventPublish publisher ! PublishChanges(Gossip(members = SortedSet(cJoining, dUp))) subscriber.expectMsgAllOf( RoleLeaderChanged("GRP", Some(dUp.address)), - RoleLeaderChanged(ClusterSettings.TeamRolePrefix + ClusterSettings.DefaultTeam, Some(dUp.address)) + RoleLeaderChanged(ClusterSettings.DcRolePrefix + ClusterSettings.DefaultDataCenter, Some(dUp.address)) ) publisher ! PublishChanges(Gossip(members = SortedSet(cUp, dUp))) subscriber.expectMsg(RoleLeaderChanged("GRP", Some(cUp.address))) diff --git a/akka-cluster/src/test/scala/akka/cluster/ClusterDomainEventSpec.scala b/akka-cluster/src/test/scala/akka/cluster/ClusterDomainEventSpec.scala index f3c5f54ab6..785b813d44 100644 --- a/akka-cluster/src/test/scala/akka/cluster/ClusterDomainEventSpec.scala +++ b/akka-cluster/src/test/scala/akka/cluster/ClusterDomainEventSpec.scala @@ -52,7 +52,7 @@ class ClusterDomainEventSpec extends WordSpec with Matchers { diffMemberEvents(g1, g2) should ===(Seq(MemberUp(bUp), MemberJoined(eJoining))) diffUnreachable(g1, g2, selfDummyAddress) should ===(Seq.empty) - diffSeen(ClusterSettings.DefaultTeam, g1, g2, selfDummyAddress) should ===(Seq(SeenChanged(convergence = true, seenBy = s2.map(_.address)))) + diffSeen(ClusterSettings.DefaultDataCenter, g1, g2, selfDummyAddress) should ===(Seq(SeenChanged(convergence = true, seenBy = s2.map(_.address)))) } "be produced for changed status of members" in { @@ -61,7 +61,7 @@ class ClusterDomainEventSpec extends WordSpec with Matchers { diffMemberEvents(g1, g2) should ===(Seq(MemberUp(aUp), MemberLeft(cLeaving), MemberJoined(eJoining))) diffUnreachable(g1, g2, selfDummyAddress) should ===(Seq.empty) - diffSeen(ClusterSettings.DefaultTeam, g1, g2, selfDummyAddress) should ===(Seq(SeenChanged(convergence = true, seenBy = s2.map(_.address)))) + diffSeen(ClusterSettings.DefaultDataCenter, g1, g2, selfDummyAddress) should ===(Seq(SeenChanged(convergence = true, seenBy = s2.map(_.address)))) } "be produced for members in unreachable" in { @@ -76,7 +76,7 @@ class ClusterDomainEventSpec extends WordSpec with Matchers { diffUnreachable(g1, g2, selfDummyAddress) should ===(Seq(UnreachableMember(bDown))) // never include self member in unreachable diffUnreachable(g1, g2, bDown.uniqueAddress) should ===(Seq()) - diffSeen(ClusterSettings.DefaultTeam, g1, g2, selfDummyAddress) should ===(Seq.empty) + diffSeen(ClusterSettings.DefaultDataCenter, g1, g2, selfDummyAddress) should ===(Seq.empty) } "be produced for members becoming reachable after unreachable" in { @@ -104,7 +104,7 @@ class ClusterDomainEventSpec extends WordSpec with Matchers { diffMemberEvents(g1, g2) should ===(Seq(MemberRemoved(dRemoved, Exiting))) diffUnreachable(g1, g2, selfDummyAddress) should ===(Seq.empty) - diffSeen(ClusterSettings.DefaultTeam, g1, g2, selfDummyAddress) should ===(Seq(SeenChanged(convergence = true, seenBy = s2.map(_.address)))) + diffSeen(ClusterSettings.DefaultDataCenter, g1, g2, selfDummyAddress) should ===(Seq(SeenChanged(convergence = true, seenBy = s2.map(_.address)))) } "be produced for convergence changes" in { @@ -113,10 +113,10 @@ class ClusterDomainEventSpec extends WordSpec with Matchers { diffMemberEvents(g1, g2) should ===(Seq.empty) diffUnreachable(g1, g2, selfDummyAddress) should ===(Seq.empty) - diffSeen(ClusterSettings.DefaultTeam, g1, g2, selfDummyAddress) should ===(Seq(SeenChanged(convergence = true, seenBy = Set(aUp.address, bUp.address)))) + diffSeen(ClusterSettings.DefaultDataCenter, g1, g2, selfDummyAddress) should ===(Seq(SeenChanged(convergence = true, seenBy = Set(aUp.address, bUp.address)))) diffMemberEvents(g2, g1) should ===(Seq.empty) diffUnreachable(g2, g1, selfDummyAddress) should ===(Seq.empty) - diffSeen(ClusterSettings.DefaultTeam, g2, g1, selfDummyAddress) should ===(Seq(SeenChanged(convergence = true, seenBy = Set(aUp.address, bUp.address, eJoining.address)))) + diffSeen(ClusterSettings.DefaultDataCenter, g2, g1, selfDummyAddress) should ===(Seq(SeenChanged(convergence = true, seenBy = Set(aUp.address, bUp.address, eJoining.address)))) } "be produced for leader changes" in { @@ -125,33 +125,33 @@ class ClusterDomainEventSpec extends WordSpec with Matchers { diffMemberEvents(g1, g2) should ===(Seq(MemberRemoved(aRemoved, Up))) diffUnreachable(g1, g2, selfDummyAddress) should ===(Seq.empty) - diffSeen(ClusterSettings.DefaultTeam, g1, g2, selfDummyAddress) should ===(Seq(SeenChanged(convergence = true, seenBy = s2.map(_.address)))) - diffLeader(ClusterSettings.DefaultTeam, g1, g2, selfDummyAddress) should ===(Seq(LeaderChanged(Some(bUp.address)))) + diffSeen(ClusterSettings.DefaultDataCenter, g1, g2, selfDummyAddress) should ===(Seq(SeenChanged(convergence = true, seenBy = s2.map(_.address)))) + diffLeader(ClusterSettings.DefaultDataCenter, g1, g2, selfDummyAddress) should ===(Seq(LeaderChanged(Some(bUp.address)))) } - "be produced for role leader changes in the same team" in { + "be produced for role leader changes in the same data center" in { val g0 = Gossip.empty val g1 = Gossip(members = SortedSet(aUp, bUp, cUp, dLeaving, eJoining)) val g2 = Gossip(members = SortedSet(bUp, cUp, dExiting, eJoining)) - diffRolesLeader(ClusterSettings.DefaultTeam, g0, g1, selfDummyAddress) should ===( + diffRolesLeader(ClusterSettings.DefaultDataCenter, g0, g1, selfDummyAddress) should ===( Set( // since this role is implicitly added - RoleLeaderChanged(ClusterSettings.TeamRolePrefix + ClusterSettings.DefaultTeam, Some(aUp.address)), + RoleLeaderChanged(ClusterSettings.DcRolePrefix + ClusterSettings.DefaultDataCenter, Some(aUp.address)), RoleLeaderChanged("AA", Some(aUp.address)), RoleLeaderChanged("AB", Some(aUp.address)), RoleLeaderChanged("BB", Some(bUp.address)), RoleLeaderChanged("DD", Some(dLeaving.address)), RoleLeaderChanged("DE", Some(dLeaving.address)), RoleLeaderChanged("EE", Some(eUp.address)))) - diffRolesLeader(ClusterSettings.DefaultTeam, g1, g2, selfDummyAddress) should ===( + diffRolesLeader(ClusterSettings.DefaultDataCenter, g1, g2, selfDummyAddress) should ===( Set( - RoleLeaderChanged(ClusterSettings.TeamRolePrefix + ClusterSettings.DefaultTeam, Some(bUp.address)), + RoleLeaderChanged(ClusterSettings.DcRolePrefix + ClusterSettings.DefaultDataCenter, Some(bUp.address)), RoleLeaderChanged("AA", None), RoleLeaderChanged("AB", Some(bUp.address)), RoleLeaderChanged("DE", Some(eJoining.address)))) } - "not be produced for role leader changes in other teams" in { + "not be produced for role leader changes in other data centers" in { val g0 = Gossip.empty val g1 = Gossip(members = SortedSet(aUp, bUp, cUp, dLeaving, eJoining)) val g2 = Gossip(members = SortedSet(bUp, cUp, dExiting, eJoining)) diff --git a/akka-cluster/src/test/scala/akka/cluster/GossipSpec.scala b/akka-cluster/src/test/scala/akka/cluster/GossipSpec.scala index 810555adfc..ab3f9a484e 100644 --- a/akka-cluster/src/test/scala/akka/cluster/GossipSpec.scala +++ b/akka-cluster/src/test/scala/akka/cluster/GossipSpec.scala @@ -7,7 +7,7 @@ package akka.cluster import org.scalatest.WordSpec import org.scalatest.Matchers import akka.actor.Address -import akka.cluster.ClusterSettings.DefaultTeam +import akka.cluster.ClusterSettings.DefaultDataCenter import scala.collection.immutable.SortedSet @@ -27,55 +27,54 @@ class GossipSpec extends WordSpec with Matchers { val e2 = TestMember(e1.address, Up) val e3 = TestMember(e1.address, Down) - val dc1a1 = TestMember(Address("akka.tcp", "sys", "a", 2552), Up, Set.empty, team = "dc1") - val dc1b1 = TestMember(Address("akka.tcp", "sys", "b", 2552), Up, Set.empty, team = "dc1") - val dc2c1 = TestMember(Address("akka.tcp", "sys", "c", 2552), Up, Set.empty, team = "dc2") - val dc2d1 = TestMember(Address("akka.tcp", "sys", "d", 2552), Up, Set.empty, team = "dc2") - val dc2d2 = TestMember(dc2d1.address, status = Down, roles = Set.empty, team = dc2d1.team) + val dc1a1 = TestMember(Address("akka.tcp", "sys", "a", 2552), Up, Set.empty, dataCenter = "dc1") + val dc1b1 = TestMember(Address("akka.tcp", "sys", "b", 2552), Up, Set.empty, dataCenter = "dc1") + val dc2c1 = TestMember(Address("akka.tcp", "sys", "c", 2552), Up, Set.empty, dataCenter = "dc2") + val dc2d1 = TestMember(Address("akka.tcp", "sys", "d", 2552), Up, Set.empty, dataCenter = "dc2") + val dc2d2 = TestMember(dc2d1.address, status = Down, roles = Set.empty, dataCenter = dc2d1.dataCenter) "A Gossip" must { "have correct test setup" in { List(a1, a2, b1, b2, c1, c2, c3, d1, e1, e2, e3).foreach(m ⇒ - m.team should ===(DefaultTeam) - ) + m.dataCenter should ===(DefaultDataCenter)) } "reach convergence when it's empty" in { - Gossip.empty.convergence(DefaultTeam, a1.uniqueAddress, Set.empty) should ===(true) + Gossip.empty.convergence(DefaultDataCenter, a1.uniqueAddress, Set.empty) should ===(true) } "reach convergence for one node" in { val g1 = Gossip(members = SortedSet(a1)).seen(a1.uniqueAddress) - g1.convergence(DefaultTeam, a1.uniqueAddress, Set.empty) should ===(true) + g1.convergence(DefaultDataCenter, a1.uniqueAddress, Set.empty) should ===(true) } "not reach convergence until all have seen version" in { val g1 = Gossip(members = SortedSet(a1, b1)).seen(a1.uniqueAddress) - g1.convergence(DefaultTeam, a1.uniqueAddress, Set.empty) should ===(false) + g1.convergence(DefaultDataCenter, a1.uniqueAddress, Set.empty) should ===(false) } "reach convergence for two nodes" in { val g1 = Gossip(members = SortedSet(a1, b1)).seen(a1.uniqueAddress).seen(b1.uniqueAddress) - g1.convergence(DefaultTeam, a1.uniqueAddress, Set.empty) should ===(true) + g1.convergence(DefaultDataCenter, a1.uniqueAddress, Set.empty) should ===(true) } "reach convergence, skipping joining" in { // e1 is joining val g1 = Gossip(members = SortedSet(a1, b1, e1)).seen(a1.uniqueAddress).seen(b1.uniqueAddress) - g1.convergence(DefaultTeam, a1.uniqueAddress, Set.empty) should ===(true) + g1.convergence(DefaultDataCenter, a1.uniqueAddress, Set.empty) should ===(true) } "reach convergence, skipping down" in { // e3 is down val g1 = Gossip(members = SortedSet(a1, b1, e3)).seen(a1.uniqueAddress).seen(b1.uniqueAddress) - g1.convergence(DefaultTeam, a1.uniqueAddress, Set.empty) should ===(true) + g1.convergence(DefaultDataCenter, a1.uniqueAddress, Set.empty) should ===(true) } "reach convergence, skipping Leaving with exitingConfirmed" in { // c1 is Leaving val g1 = Gossip(members = SortedSet(a1, b1, c1)).seen(a1.uniqueAddress).seen(b1.uniqueAddress) - g1.convergence(DefaultTeam, a1.uniqueAddress, Set(c1.uniqueAddress)) should ===(true) + g1.convergence(DefaultDataCenter, a1.uniqueAddress, Set(c1.uniqueAddress)) should ===(true) } "reach convergence, skipping unreachable Leaving with exitingConfirmed" in { @@ -83,16 +82,16 @@ class GossipSpec extends WordSpec with Matchers { val r1 = Reachability.empty.unreachable(b1.uniqueAddress, c1.uniqueAddress) val g1 = Gossip(members = SortedSet(a1, b1, c1), overview = GossipOverview(reachability = r1)) .seen(a1.uniqueAddress).seen(b1.uniqueAddress) - g1.convergence(DefaultTeam, a1.uniqueAddress, Set(c1.uniqueAddress)) should ===(true) + g1.convergence(DefaultDataCenter, a1.uniqueAddress, Set(c1.uniqueAddress)) should ===(true) } "not reach convergence when unreachable" in { val r1 = Reachability.empty.unreachable(b1.uniqueAddress, a1.uniqueAddress) val g1 = (Gossip(members = SortedSet(a1, b1), overview = GossipOverview(reachability = r1))) .seen(a1.uniqueAddress).seen(b1.uniqueAddress) - g1.convergence(DefaultTeam, b1.uniqueAddress, Set.empty) should ===(false) + g1.convergence(DefaultDataCenter, b1.uniqueAddress, Set.empty) should ===(false) // but from a1's point of view (it knows that itself is not unreachable) - g1.convergence(DefaultTeam, a1.uniqueAddress, Set.empty) should ===(true) + g1.convergence(DefaultDataCenter, a1.uniqueAddress, Set.empty) should ===(true) } "reach convergence when downed node has observed unreachable" in { @@ -100,7 +99,7 @@ class GossipSpec extends WordSpec with Matchers { val r1 = Reachability.empty.unreachable(e3.uniqueAddress, a1.uniqueAddress) val g1 = (Gossip(members = SortedSet(a1, b1, e3), overview = GossipOverview(reachability = r1))) .seen(a1.uniqueAddress).seen(b1.uniqueAddress).seen(e3.uniqueAddress) - g1.convergence(DefaultTeam, b1.uniqueAddress, Set.empty) should ===(true) + g1.convergence(DefaultDataCenter, b1.uniqueAddress, Set.empty) should ===(true) } "merge members by status priority" in { @@ -147,37 +146,37 @@ class GossipSpec extends WordSpec with Matchers { } "have leader as first member based on ordering, except Exiting status" in { - Gossip(members = SortedSet(c2, e2)).teamLeader(DefaultTeam, c2.uniqueAddress) should ===(Some(c2.uniqueAddress)) - Gossip(members = SortedSet(c3, e2)).teamLeader(DefaultTeam, c3.uniqueAddress) should ===(Some(e2.uniqueAddress)) - Gossip(members = SortedSet(c3)).teamLeader(DefaultTeam, c3.uniqueAddress) should ===(Some(c3.uniqueAddress)) + Gossip(members = SortedSet(c2, e2)).dcLeader(DefaultDataCenter, c2.uniqueAddress) should ===(Some(c2.uniqueAddress)) + Gossip(members = SortedSet(c3, e2)).dcLeader(DefaultDataCenter, c3.uniqueAddress) should ===(Some(e2.uniqueAddress)) + Gossip(members = SortedSet(c3)).dcLeader(DefaultDataCenter, c3.uniqueAddress) should ===(Some(c3.uniqueAddress)) } "have leader as first reachable member based on ordering" in { val r1 = Reachability.empty.unreachable(e2.uniqueAddress, c2.uniqueAddress) val g1 = Gossip(members = SortedSet(c2, e2), overview = GossipOverview(reachability = r1)) - g1.teamLeader(DefaultTeam, e2.uniqueAddress) should ===(Some(e2.uniqueAddress)) + g1.dcLeader(DefaultDataCenter, e2.uniqueAddress) should ===(Some(e2.uniqueAddress)) // but when c2 is selfUniqueAddress - g1.teamLeader(DefaultTeam, c2.uniqueAddress) should ===(Some(c2.uniqueAddress)) + g1.dcLeader(DefaultDataCenter, c2.uniqueAddress) should ===(Some(c2.uniqueAddress)) } "not have Down member as leader" in { - Gossip(members = SortedSet(e3)).teamLeader(DefaultTeam, e3.uniqueAddress) should ===(None) + Gossip(members = SortedSet(e3)).dcLeader(DefaultDataCenter, e3.uniqueAddress) should ===(None) } - "have a leader per team" in { + "have a leader per data center" in { val g1 = Gossip(members = SortedSet(dc1a1, dc1b1, dc2c1, dc2d1)) // everybodys point of view is dc1a1 being leader of dc1 - g1.teamLeader("dc1", dc1a1.uniqueAddress) should ===(Some(dc1a1.uniqueAddress)) - g1.teamLeader("dc1", dc1b1.uniqueAddress) should ===(Some(dc1a1.uniqueAddress)) - g1.teamLeader("dc1", dc2c1.uniqueAddress) should ===(Some(dc1a1.uniqueAddress)) - g1.teamLeader("dc1", dc2d1.uniqueAddress) should ===(Some(dc1a1.uniqueAddress)) + g1.dcLeader("dc1", dc1a1.uniqueAddress) should ===(Some(dc1a1.uniqueAddress)) + g1.dcLeader("dc1", dc1b1.uniqueAddress) should ===(Some(dc1a1.uniqueAddress)) + g1.dcLeader("dc1", dc2c1.uniqueAddress) should ===(Some(dc1a1.uniqueAddress)) + g1.dcLeader("dc1", dc2d1.uniqueAddress) should ===(Some(dc1a1.uniqueAddress)) // and dc2c1 being leader of dc2 - g1.teamLeader("dc2", dc1a1.uniqueAddress) should ===(Some(dc2c1.uniqueAddress)) - g1.teamLeader("dc2", dc1b1.uniqueAddress) should ===(Some(dc2c1.uniqueAddress)) - g1.teamLeader("dc2", dc2c1.uniqueAddress) should ===(Some(dc2c1.uniqueAddress)) - g1.teamLeader("dc2", dc2d1.uniqueAddress) should ===(Some(dc2c1.uniqueAddress)) + g1.dcLeader("dc2", dc1a1.uniqueAddress) should ===(Some(dc2c1.uniqueAddress)) + g1.dcLeader("dc2", dc1b1.uniqueAddress) should ===(Some(dc2c1.uniqueAddress)) + g1.dcLeader("dc2", dc2c1.uniqueAddress) should ===(Some(dc2c1.uniqueAddress)) + g1.dcLeader("dc2", dc2d1.uniqueAddress) should ===(Some(dc2c1.uniqueAddress)) } "merge seen table correctly" in { @@ -213,20 +212,20 @@ class GossipSpec extends WordSpec with Matchers { g3.youngestMember should ===(e2) } - "reach convergence per team" in { + "reach convergence per data center" in { val g = Gossip(members = SortedSet(dc1a1, dc1b1, dc2c1, dc2d1)) .seen(dc1a1.uniqueAddress) .seen(dc1b1.uniqueAddress) .seen(dc2c1.uniqueAddress) .seen(dc2d1.uniqueAddress) - g.teamLeader("dc1", dc1a1.uniqueAddress) should ===(Some(dc1a1.uniqueAddress)) + g.dcLeader("dc1", dc1a1.uniqueAddress) should ===(Some(dc1a1.uniqueAddress)) g.convergence("dc1", dc1a1.uniqueAddress, Set.empty) should ===(true) - g.teamLeader("dc2", dc2c1.uniqueAddress) should ===(Some(dc2c1.uniqueAddress)) + g.dcLeader("dc2", dc2c1.uniqueAddress) should ===(Some(dc2c1.uniqueAddress)) g.convergence("dc2", dc2c1.uniqueAddress, Set.empty) should ===(true) } - "reach convergence per team even if members of another team has not seen the gossip" in { + "reach convergence per data center even if members of another data center has not seen the gossip" in { val g = Gossip(members = SortedSet(dc1a1, dc1b1, dc2c1, dc2d1)) .seen(dc1a1.uniqueAddress) .seen(dc1b1.uniqueAddress) @@ -234,15 +233,15 @@ class GossipSpec extends WordSpec with Matchers { // dc2d1 has not seen the gossip // so dc1 can reach convergence - g.teamLeader("dc1", dc1a1.uniqueAddress) should ===(Some(dc1a1.uniqueAddress)) + g.dcLeader("dc1", dc1a1.uniqueAddress) should ===(Some(dc1a1.uniqueAddress)) g.convergence("dc1", dc1a1.uniqueAddress, Set.empty) should ===(true) // but dc2 cannot - g.teamLeader("dc2", dc2c1.uniqueAddress) should ===(Some(dc2c1.uniqueAddress)) + g.dcLeader("dc2", dc2c1.uniqueAddress) should ===(Some(dc2c1.uniqueAddress)) g.convergence("dc2", dc2c1.uniqueAddress, Set.empty) should ===(false) } - "reach convergence per team even if another team contains unreachable" in { + "reach convergence per data center even if another data center contains unreachable" in { val r1 = Reachability.empty.unreachable(dc2c1.uniqueAddress, dc2d1.uniqueAddress) val g = Gossip(members = SortedSet(dc1a1, dc1b1, dc2c1, dc2d1), overview = GossipOverview(reachability = r1)) @@ -251,16 +250,16 @@ class GossipSpec extends WordSpec with Matchers { .seen(dc2c1.uniqueAddress) .seen(dc2d1.uniqueAddress) - // this team doesn't care about dc2 having reachability problems and can reach convergence - g.teamLeader("dc1", dc1a1.uniqueAddress) should ===(Some(dc1a1.uniqueAddress)) + // this data center doesn't care about dc2 having reachability problems and can reach convergence + g.dcLeader("dc1", dc1a1.uniqueAddress) should ===(Some(dc1a1.uniqueAddress)) g.convergence("dc1", dc1a1.uniqueAddress, Set.empty) should ===(true) - // this team is cannot reach convergence because of unreachability within the team - g.teamLeader("dc2", dc2c1.uniqueAddress) should ===(Some(dc2c1.uniqueAddress)) + // this data center is cannot reach convergence because of unreachability within the data center + g.dcLeader("dc2", dc2c1.uniqueAddress) should ===(Some(dc2c1.uniqueAddress)) g.convergence("dc2", dc2c1.uniqueAddress, Set.empty) should ===(false) } - "reach convergence per team even if there is unreachable nodes in another team" in { + "reach convergence per data center even if there is unreachable nodes in another data center" in { val r1 = Reachability.empty .unreachable(dc1a1.uniqueAddress, dc2d1.uniqueAddress) .unreachable(dc2d1.uniqueAddress, dc1a1.uniqueAddress) @@ -271,33 +270,33 @@ class GossipSpec extends WordSpec with Matchers { .seen(dc2c1.uniqueAddress) .seen(dc2d1.uniqueAddress) - // neither team is affected by the inter-team unreachability as far as convergence goes - g.teamLeader("dc1", dc1a1.uniqueAddress) should ===(Some(dc1a1.uniqueAddress)) + // neither data center is affected by the inter data center unreachability as far as convergence goes + g.dcLeader("dc1", dc1a1.uniqueAddress) should ===(Some(dc1a1.uniqueAddress)) g.convergence("dc1", dc1a1.uniqueAddress, Set.empty) should ===(true) - g.teamLeader("dc2", dc2c1.uniqueAddress) should ===(Some(dc2c1.uniqueAddress)) + g.dcLeader("dc2", dc2c1.uniqueAddress) should ===(Some(dc2c1.uniqueAddress)) g.convergence("dc2", dc2c1.uniqueAddress, Set.empty) should ===(true) } - "ignore cross team unreachability when determining inside of team reachability" in { + "ignore cross data center unreachability when determining inside of data center reachability" in { val r1 = Reachability.empty .unreachable(dc1a1.uniqueAddress, dc2c1.uniqueAddress) .unreachable(dc2c1.uniqueAddress, dc1a1.uniqueAddress) val g = Gossip(members = SortedSet(dc1a1, dc1b1, dc2c1, dc2d1), overview = GossipOverview(reachability = r1)) - // inside of the teams we don't care about the cross team unreachability + // inside of the data center we don't care about the cross data center unreachability g.isReachable(dc1a1.uniqueAddress, dc1b1.uniqueAddress) should ===(true) g.isReachable(dc1b1.uniqueAddress, dc1a1.uniqueAddress) should ===(true) g.isReachable(dc2c1.uniqueAddress, dc2d1.uniqueAddress) should ===(true) g.isReachable(dc2d1.uniqueAddress, dc2c1.uniqueAddress) should ===(true) - g.isReachableExcludingDownedObservers(dc1a1.team, dc1b1.uniqueAddress) should ===(true) - g.isReachableExcludingDownedObservers(dc1b1.team, dc1a1.uniqueAddress) should ===(true) - g.isReachableExcludingDownedObservers(dc2c1.team, dc2d1.uniqueAddress) should ===(true) - g.isReachableExcludingDownedObservers(dc2d1.team, dc2c1.uniqueAddress) should ===(true) + g.isReachableExcludingDownedObservers(dc1a1.dataCenter, dc1b1.uniqueAddress) should ===(true) + g.isReachableExcludingDownedObservers(dc1b1.dataCenter, dc1a1.uniqueAddress) should ===(true) + g.isReachableExcludingDownedObservers(dc2c1.dataCenter, dc2d1.uniqueAddress) should ===(true) + g.isReachableExcludingDownedObservers(dc2d1.dataCenter, dc2c1.uniqueAddress) should ===(true) - // between teams it matters though + // between data centers it matters though g.isReachable(dc1a1.uniqueAddress, dc2c1.uniqueAddress) should ===(false) g.isReachable(dc2c1.uniqueAddress, dc1a1.uniqueAddress) should ===(false) // this isReachable method only says false for specific unreachable entries between the nodes @@ -305,25 +304,25 @@ class GossipSpec extends WordSpec with Matchers { g.isReachable(dc2d1.uniqueAddress, dc1a1.uniqueAddress) should ===(true) // this one looks at all unreachable-entries for the to-address - g.isReachableExcludingDownedObservers(dc1a1.team, dc2c1.uniqueAddress) should ===(false) - g.isReachableExcludingDownedObservers(dc1b1.team, dc2c1.uniqueAddress) should ===(false) - g.isReachableExcludingDownedObservers(dc2c1.team, dc1a1.uniqueAddress) should ===(false) - g.isReachableExcludingDownedObservers(dc2d1.team, dc1a1.uniqueAddress) should ===(false) + g.isReachableExcludingDownedObservers(dc1a1.dataCenter, dc2c1.uniqueAddress) should ===(false) + g.isReachableExcludingDownedObservers(dc1b1.dataCenter, dc2c1.uniqueAddress) should ===(false) + g.isReachableExcludingDownedObservers(dc2c1.dataCenter, dc1a1.uniqueAddress) should ===(false) + g.isReachableExcludingDownedObservers(dc2d1.dataCenter, dc1a1.uniqueAddress) should ===(false) // between the two other nodes there is no unreachability g.isReachable(dc1b1.uniqueAddress, dc2d1.uniqueAddress) should ===(true) g.isReachable(dc2d1.uniqueAddress, dc1b1.uniqueAddress) should ===(true) - g.isReachableExcludingDownedObservers(dc1b1.team, dc2d1.uniqueAddress) should ===(true) - g.isReachableExcludingDownedObservers(dc2d1.team, dc1b1.uniqueAddress) should ===(true) + g.isReachableExcludingDownedObservers(dc1b1.dataCenter, dc2d1.uniqueAddress) should ===(true) + g.isReachableExcludingDownedObservers(dc2d1.dataCenter, dc1b1.uniqueAddress) should ===(true) } - "not returning a downed team leader" in { + "not returning a downed data center leader" in { val g = Gossip(members = SortedSet(dc1a1.copy(Down), dc1b1)) g.leaderOf("dc1", g.members, dc1b1.uniqueAddress) should ===(Some(dc1b1.uniqueAddress)) } - "ignore cross team unreachability when determining team leader" in { + "ignore cross data center unreachability when determining data center leader" in { val r1 = Reachability.empty .unreachable(dc1a1.uniqueAddress, dc2d1.uniqueAddress) .unreachable(dc2d1.uniqueAddress, dc1a1.uniqueAddress) @@ -356,7 +355,7 @@ class GossipSpec extends WordSpec with Matchers { g.members.toList should ===(List(dc1a1, dc2d2)) } - "not reintroduce members from out-of-team gossip when merging" in { + "not reintroduce members from out-of data center gossip when merging" in { // dc1 does not know about any unreachability nor that the node has been downed val gdc1 = Gossip(members = SortedSet(dc1a1, dc1b1, dc2c1, dc2d1)) @@ -408,7 +407,7 @@ class GossipSpec extends WordSpec with Matchers { } "update members" in { - val joining = TestMember(Address("akka.tcp", "sys", "d", 2552), Joining, Set.empty, team = "dc2") + val joining = TestMember(Address("akka.tcp", "sys", "d", 2552), Joining, Set.empty, dataCenter = "dc2") val g = Gossip(members = SortedSet(dc1a1, joining)) g.member(joining.uniqueAddress).status should ===(Joining) diff --git a/akka-cluster/src/test/scala/akka/cluster/TestMember.scala b/akka-cluster/src/test/scala/akka/cluster/TestMember.scala index 028a727f33..58b33a395f 100644 --- a/akka-cluster/src/test/scala/akka/cluster/TestMember.scala +++ b/akka-cluster/src/test/scala/akka/cluster/TestMember.scala @@ -9,6 +9,6 @@ object TestMember { def apply(address: Address, status: MemberStatus): Member = apply(address, status, Set.empty) - def apply(address: Address, status: MemberStatus, roles: Set[String], team: ClusterSettings.Team = ClusterSettings.DefaultTeam): Member = - new Member(UniqueAddress(address, 0L), Int.MaxValue, status, roles + (ClusterSettings.TeamRolePrefix + team)) + def apply(address: Address, status: MemberStatus, roles: Set[String], dataCenter: ClusterSettings.DataCenter = ClusterSettings.DefaultDataCenter): Member = + new Member(UniqueAddress(address, 0L), Int.MaxValue, status, roles + (ClusterSettings.DcRolePrefix + dataCenter)) } diff --git a/akka-cluster/src/test/scala/akka/cluster/protobuf/ClusterMessageSerializerSpec.scala b/akka-cluster/src/test/scala/akka/cluster/protobuf/ClusterMessageSerializerSpec.scala index fd4c323309..8c7a174751 100644 --- a/akka-cluster/src/test/scala/akka/cluster/protobuf/ClusterMessageSerializerSpec.scala +++ b/akka-cluster/src/test/scala/akka/cluster/protobuf/ClusterMessageSerializerSpec.scala @@ -80,25 +80,22 @@ class ClusterMessageSerializerSpec extends AkkaSpec( checkSerialization(InternalClusterAction.Welcome(uniqueAddress, g2)) } - "add a default team role if none is present" in { + "add a default data center role if none is present" in { val env = roundtrip(GossipEnvelope(a1.uniqueAddress, d1.uniqueAddress, Gossip(SortedSet(a1, d1)))) - env.gossip.members.head.roles should be(Set(ClusterSettings.TeamRolePrefix + "default")) - env.gossip.members.tail.head.roles should be(Set("r1", ClusterSettings.TeamRolePrefix + "foo")) + env.gossip.members.head.roles should be(Set(ClusterSettings.DcRolePrefix + "default")) + env.gossip.members.tail.head.roles should be(Set("r1", ClusterSettings.DcRolePrefix + "foo")) } } "Cluster router pool" must { "be serializable" in { checkSerialization(ClusterRouterPool( RoundRobinPool( - nrOfInstances = 4 - ), + nrOfInstances = 4), ClusterRouterPoolSettings( totalInstances = 2, maxInstancesPerNode = 5, allowLocalRoutees = true, - useRole = Some("Richard, Duke of Gloucester") - ) - )) + useRole = Some("Richard, Duke of Gloucester")))) } } diff --git a/akka-distributed-data/src/main/scala/akka/cluster/ddata/Replicator.scala b/akka-distributed-data/src/main/scala/akka/cluster/ddata/Replicator.scala index 151ef0a5ed..ca79c9be89 100644 --- a/akka-distributed-data/src/main/scala/akka/cluster/ddata/Replicator.scala +++ b/akka-distributed-data/src/main/scala/akka/cluster/ddata/Replicator.scala @@ -1134,7 +1134,7 @@ final class Replicator(settings: ReplicatorSettings) extends Actor with ActorLog override def preStart(): Unit = { if (hasDurableKeys) durableStore ! LoadAll - // not using LeaderChanged/RoleLeaderChanged because here we need one node independent of team + // not using LeaderChanged/RoleLeaderChanged because here we need one node independent of data center cluster.subscribe(self, initialStateMode = InitialStateAsEvents, classOf[MemberEvent], classOf[ReachabilityEvent]) } diff --git a/project/MiMa.scala b/project/MiMa.scala index 5c55c43c01..73bf9c5897 100644 --- a/project/MiMa.scala +++ b/project/MiMa.scala @@ -1239,7 +1239,7 @@ object MiMa extends AutoPlugin { // older versions will be missing the method. We accept that incompatibility for now. ProblemFilters.exclude[ReversedMissingMethodProblem]("akka.remote.transport.AssociationHandle.disassociate"), - // #23228 single leader per cluster team + // #23228 single leader per cluster data center ProblemFilters.exclude[DirectMissingMethodProblem]("akka.cluster.Gossip.apply"), ProblemFilters.exclude[DirectMissingMethodProblem]("akka.cluster.Gossip.copy"), ProblemFilters.exclude[DirectMissingMethodProblem]("akka.cluster.Gossip.this"), From 867cc97bddf4ff5352a1c7ad4ede22e52c35fe6d Mon Sep 17 00:00:00 2001 From: Patrik Nordwall Date: Tue, 4 Jul 2017 21:58:03 +0200 Subject: [PATCH 11/34] Refactoring of Gossip class, #23290 * move methods that depends on selfUniqueAddress and selfDc to a separate MembershipState class, which also holds the latest gossip * this removes the need to pass in the parameters from everywhere and makes it easier to cache some results * makes it clear that those parameters are always selfUniqueAddress and selfDc, instead of some arbitary node/dc --- .../main/scala/akka/cluster/AutoDown.scala | 2 +- .../scala/akka/cluster/ClusterDaemon.scala | 96 +++++++------- .../scala/akka/cluster/ClusterEvent.scala | 108 +++++++++------- .../src/main/scala/akka/cluster/Gossip.scala | 93 +------------ .../src/main/scala/akka/cluster/Member.scala | 2 +- .../scala/akka/cluster/MembershipState.scala | 122 ++++++++++++++++++ .../ClusterDomainEventPublisherSpec.scala | 50 ++++--- .../akka/cluster/ClusterDomainEventSpec.scala | 76 ++++++----- .../test/scala/akka/cluster/GossipSpec.scala | 118 ++++++++--------- project/MiMa.scala | 40 +++--- 10 files changed, 386 insertions(+), 321 deletions(-) create mode 100644 akka-cluster/src/main/scala/akka/cluster/MembershipState.scala diff --git a/akka-cluster/src/main/scala/akka/cluster/AutoDown.scala b/akka-cluster/src/main/scala/akka/cluster/AutoDown.scala index 2685920277..187ef31a52 100644 --- a/akka-cluster/src/main/scala/akka/cluster/AutoDown.scala +++ b/akka-cluster/src/main/scala/akka/cluster/AutoDown.scala @@ -101,7 +101,7 @@ private[cluster] abstract class AutoDownBase(autoDownUnreachableAfter: FiniteDur import context.dispatcher - val skipMemberStatus = Gossip.convergenceSkipUnreachableWithMemberStatus + val skipMemberStatus = MembershipState.convergenceSkipUnreachableWithMemberStatus var scheduledUnreachable: Map[UniqueAddress, Cancellable] = Map.empty var pendingUnreachable: Set[UniqueAddress] = Set.empty diff --git a/akka-cluster/src/main/scala/akka/cluster/ClusterDaemon.scala b/akka-cluster/src/main/scala/akka/cluster/ClusterDaemon.scala index ae5815c978..5f8d539235 100644 --- a/akka-cluster/src/main/scala/akka/cluster/ClusterDaemon.scala +++ b/akka-cluster/src/main/scala/akka/cluster/ClusterDaemon.scala @@ -13,6 +13,7 @@ import akka.actor._ import akka.actor.SupervisorStrategy.Stop import akka.cluster.MemberStatus._ import akka.cluster.ClusterEvent._ +import akka.cluster.ClusterSettings.DataCenter import akka.dispatch.{ RequiresMessageQueue, UnboundedMessageQueueSemantics } import scala.collection.breakOut @@ -157,7 +158,7 @@ private[cluster] object InternalClusterAction { final case class SendCurrentClusterState(receiver: ActorRef) extends SubscriptionMessage sealed trait PublishMessage - final case class PublishChanges(newGossip: Gossip) extends PublishMessage + final case class PublishChanges(state: MembershipState) extends PublishMessage final case class PublishEvent(event: ClusterDomainEvent) extends PublishMessage final case object ExitingCompleted @@ -277,6 +278,7 @@ private[cluster] object ClusterCoreDaemon { val NumberOfGossipsBeforeShutdownWhenLeaderExits = 5 val MaxGossipsBeforeShuttingDownMyself = 5 + } /** @@ -287,6 +289,7 @@ private[cluster] class ClusterCoreDaemon(publisher: ActorRef) extends Actor with with RequiresMessageQueue[UnboundedMessageQueueSemantics] { import InternalClusterAction._ import ClusterCoreDaemon._ + import MembershipState._ val cluster = Cluster(context.system) import cluster.{ selfAddress, selfRoles, scheduler, failureDetector } @@ -299,7 +302,8 @@ private[cluster] class ClusterCoreDaemon(publisher: ActorRef) extends Actor with // note that self is not initially member, // and the Gossip is not versioned for this 'Node' yet - var latestGossip: Gossip = Gossip.empty + var membershipState = MembershipState(Gossip.empty, cluster.selfUniqueAddress, cluster.settings.DataCenter) + def latestGossip: Gossip = membershipState.latestGossip val statsEnabled = PublishStatsInterval.isFinite var gossipStats = GossipStats() @@ -478,7 +482,7 @@ private[cluster] class ClusterCoreDaemon(publisher: ActorRef) extends Actor with def initJoin(): Unit = { val selfStatus = latestGossip.member(selfUniqueAddress).status - if (Gossip.removeUnreachableWithMemberStatus.contains(selfStatus)) { + if (removeUnreachableWithMemberStatus.contains(selfStatus)) { // prevents a Down and Exiting node from being used for joining logInfo("Sending InitJoinNack message from node [{}] to [{}]", selfAddress, sender()) sender() ! InitJoinNack(selfAddress) @@ -570,7 +574,7 @@ private[cluster] class ClusterCoreDaemon(publisher: ActorRef) extends Actor with log.warning( "Member with wrong ActorSystem name tried to join, but was ignored, expected [{}] but was [{}]", selfAddress.system, joiningNode.address.system) - else if (Gossip.removeUnreachableWithMemberStatus.contains(selfStatus)) + else if (removeUnreachableWithMemberStatus.contains(selfStatus)) logInfo("Trying to join [{}] to [{}] member, ignoring. Use a member that is Up instead.", joiningNode, selfStatus) else { val localMembers = latestGossip.members @@ -616,7 +620,7 @@ private[cluster] class ClusterCoreDaemon(publisher: ActorRef) extends Actor with } else sender() ! Welcome(selfUniqueAddress, latestGossip) - publish(latestGossip) + publishMembershipState() } } } @@ -629,10 +633,10 @@ private[cluster] class ClusterCoreDaemon(publisher: ActorRef) extends Actor with if (joinWith != from.address) logInfo("Ignoring welcome from [{}] when trying to join with [{}]", from.address, joinWith) else { - latestGossip = gossip seen selfUniqueAddress + membershipState = membershipState.copy(latestGossip = gossip).seen() logInfo("Welcome from [{}]", from.address) assertLatestGossip() - publish(latestGossip) + publishMembershipState() if (from != selfUniqueAddress) gossipTo(from, sender()) becomeInitialized() @@ -653,7 +657,7 @@ private[cluster] class ClusterCoreDaemon(publisher: ActorRef) extends Actor with updateLatestGossip(newGossip) logInfo("Marked address [{}] as [{}]", address, Leaving) - publish(latestGossip) + publishMembershipState() // immediate gossip to speed up the leaving process gossip() } @@ -664,9 +668,9 @@ private[cluster] class ClusterCoreDaemon(publisher: ActorRef) extends Actor with // ExitingCompleted sent via CoordinatedShutdown to continue the leaving process. exitingTasksInProgress = false // mark as seen - latestGossip = latestGossip seen selfUniqueAddress + membershipState = membershipState.seen() assertLatestGossip() - publish(latestGossip) + publishMembershipState() // Let others know (best effort) before shutdown. Otherwise they will not see // convergence of the Exiting state until they have detected this node as @@ -681,10 +685,10 @@ private[cluster] class ClusterCoreDaemon(publisher: ActorRef) extends Actor with // send ExitingConfirmed to two potential leaders val membersExceptSelf = latestGossip.members.filter(_.uniqueAddress != selfUniqueAddress) - latestGossip.leaderOf(selfDc, membersExceptSelf, selfUniqueAddress) match { + membershipState.leaderOf(membersExceptSelf) match { case Some(node1) ⇒ clusterCore(node1.address) ! ExitingConfirmed(selfUniqueAddress) - latestGossip.leaderOf(selfDc, membersExceptSelf.filterNot(_.uniqueAddress == node1), selfUniqueAddress) match { + membershipState.leaderOf(membersExceptSelf.filterNot(_.uniqueAddress == node1)) match { case Some(node2) ⇒ clusterCore(node2.address) ! ExitingConfirmed(selfUniqueAddress) case None ⇒ // no more potential leader @@ -723,7 +727,7 @@ private[cluster] class ClusterCoreDaemon(publisher: ActorRef) extends Actor with val localMembers = localGossip.members val localOverview = localGossip.overview val localSeen = localOverview.seen - val localReachability = localGossip.dcReachability(selfDc) + val localReachability = membershipState.dcReachability // check if the node to DOWN is in the `members` set localMembers.find(_.address == address) match { @@ -735,7 +739,7 @@ private[cluster] class ClusterCoreDaemon(publisher: ActorRef) extends Actor with val newGossip = localGossip.markAsDown(m) updateLatestGossip(newGossip) - publish(latestGossip) + publishMembershipState() case Some(_) ⇒ // already down case None ⇒ logInfo("Ignoring down of unknown node [{}]", address) @@ -753,7 +757,7 @@ private[cluster] class ClusterCoreDaemon(publisher: ActorRef) extends Actor with log.warning( "Cluster Node [{}] - Marking node as TERMINATED [{}], due to quarantine. Node roles [{}]", selfAddress, node.address, selfRoles.mkString(",")) - publish(latestGossip) + publishMembershipState() downing(node.address) } } @@ -829,14 +833,14 @@ private[cluster] class ClusterCoreDaemon(publisher: ActorRef) extends Actor with // Perform the same pruning (clear of VectorClock) as the leader did when removing a member. // Removal of member itself is handled in merge (pickHighestPriority) val prunedLocalGossip = localGossip.members.foldLeft(localGossip) { (g, m) ⇒ - if (Gossip.removeUnreachableWithMemberStatus(m.status) && !remoteGossip.members.contains(m)) { + if (removeUnreachableWithMemberStatus(m.status) && !remoteGossip.members.contains(m)) { log.debug("Cluster Node [{}] - Pruned conflicting local gossip: {}", selfAddress, m) g.prune(VectorClock.Node(vclockName(m.uniqueAddress))) } else g } val prunedRemoteGossip = remoteGossip.members.foldLeft(remoteGossip) { (g, m) ⇒ - if (Gossip.removeUnreachableWithMemberStatus(m.status) && !localGossip.members.contains(m)) { + if (removeUnreachableWithMemberStatus(m.status) && !localGossip.members.contains(m)) { log.debug("Cluster Node [{}] - Pruned conflicting remote gossip: {}", selfAddress, m) g.prune(VectorClock.Node(vclockName(m.uniqueAddress))) } else @@ -849,9 +853,9 @@ private[cluster] class ClusterCoreDaemon(publisher: ActorRef) extends Actor with // Don't mark gossip state as seen while exiting is in progress, e.g. // shutting down singleton actors. This delays removal of the member until // the exiting tasks have been completed. - latestGossip = + membershipState = membershipState.copy(latestGossip = if (exitingTasksInProgress) winningGossip - else winningGossip seen selfUniqueAddress + else winningGossip seen selfUniqueAddress) assertLatestGossip() // for all new joining nodes we remove them from the failure detector @@ -877,7 +881,7 @@ private[cluster] class ClusterCoreDaemon(publisher: ActorRef) extends Actor with } } - publish(latestGossip) + publishMembershipState() val selfStatus = latestGossip.member(selfUniqueAddress).status if (selfStatus == Exiting && !exitingTasksInProgress) { @@ -1004,11 +1008,11 @@ private[cluster] class ClusterCoreDaemon(publisher: ActorRef) extends Actor with * Runs periodic leader actions, such as member status transitions, assigning partitions etc. */ def leaderActions(): Unit = { - if (latestGossip.isDcLeader(selfDc, selfUniqueAddress, selfUniqueAddress)) { + if (membershipState.isLeader(selfUniqueAddress)) { // only run the leader actions if we are the LEADER of the data center val firstNotice = 20 val periodicNotice = 60 - if (latestGossip.convergence(selfDc, selfUniqueAddress, exitingConfirmed)) { + if (membershipState.convergence(exitingConfirmed)) { if (leaderActionCounter >= firstNotice) logInfo("Leader can perform its duties again") leaderActionCounter = 0 @@ -1021,7 +1025,7 @@ private[cluster] class ClusterCoreDaemon(publisher: ActorRef) extends Actor with if (leaderActionCounter == firstNotice || leaderActionCounter % periodicNotice == 0) logInfo( "Leader can currently not perform its duties, reachability status: [{}], member status: [{}]", - latestGossip.dcReachabilityExcludingDownedObservers(selfDc), + membershipState.dcReachabilityExcludingDownedObservers, latestGossip.members.collect { case m if m.dataCenter == selfDc ⇒ s"${m.address} ${m.status} seen=${latestGossip.seenByNode(m.uniqueAddress)}" @@ -1036,8 +1040,8 @@ private[cluster] class ClusterCoreDaemon(publisher: ActorRef) extends Actor with if (latestGossip.member(selfUniqueAddress).status == Down) { // When all reachable have seen the state this member will shutdown itself when it has // status Down. The down commands should spread before we shutdown. - val unreachable = latestGossip.dcReachability(selfDc).allUnreachableOrTerminated - val downed = latestGossip.dcMembers(selfDc).collect { case m if m.status == Down ⇒ m.uniqueAddress } + val unreachable = membershipState.dcReachability.allUnreachableOrTerminated + val downed = membershipState.dcMembers.collect { case m if m.status == Down ⇒ m.uniqueAddress } if (downed.forall(node ⇒ unreachable(node) || latestGossip.seenByNode(node))) { // the reason for not shutting down immediately is to give the gossip a chance to spread // the downing information to other downed nodes, so that they can shutdown themselves @@ -1072,9 +1076,9 @@ private[cluster] class ClusterCoreDaemon(publisher: ActorRef) extends Actor with def leaderActionsOnConvergence(): Unit = { val removedUnreachable = for { - node ← latestGossip.dcReachability(selfDc).allUnreachableOrTerminated + node ← membershipState.dcReachability.allUnreachableOrTerminated m = latestGossip.member(node) - if m.dataCenter == selfDc && Gossip.removeUnreachableWithMemberStatus(m.status) + if m.dataCenter == selfDc && removeUnreachableWithMemberStatus(m.status) } yield m val removedExitingConfirmed = exitingConfirmed.filter { n ⇒ @@ -1148,7 +1152,7 @@ private[cluster] class ClusterCoreDaemon(publisher: ActorRef) extends Actor with val pruned = updatedGossip.pruneTombstones(System.currentTimeMillis() - PruneGossipTombstonesAfter.toMillis) if (pruned ne latestGossip) { updateLatestGossip(pruned) - publish(pruned) + publishMembershipState() } } @@ -1161,7 +1165,7 @@ private[cluster] class ClusterCoreDaemon(publisher: ActorRef) extends Actor with m.dataCenter == selfDc && m.status == Joining && enoughMembers && - latestGossip.dcReachabilityExcludingDownedObservers(selfDc).isReachable(m.uniqueAddress) + membershipState.dcReachabilityExcludingDownedObservers.isReachable(m.uniqueAddress) val changedMembers = localMembers.collect { case m if isJoiningToWeaklyUp(m) ⇒ m.copy(status = WeaklyUp) } @@ -1177,7 +1181,7 @@ private[cluster] class ClusterCoreDaemon(publisher: ActorRef) extends Actor with logInfo("Leader is moving node [{}] to [{}]", m.address, m.status) } - publish(latestGossip) + publishMembershipState() } } @@ -1230,7 +1234,7 @@ private[cluster] class ClusterCoreDaemon(publisher: ActorRef) extends Actor with if (newlyDetectedReachableMembers.nonEmpty) logInfo("Marking node(s) as REACHABLE [{}]. Node roles [{}]", newlyDetectedReachableMembers.mkString(", "), selfRoles.mkString(",")) - publish(latestGossip) + publishMembershipState() } } } @@ -1269,23 +1273,25 @@ private[cluster] class ClusterCoreDaemon(publisher: ActorRef) extends Actor with clusterCore(node.address) ! GossipStatus(selfUniqueAddress, latestGossip.version) def validNodeForGossip(node: UniqueAddress): Boolean = - node != selfUniqueAddress && latestGossip.isReachableExcludingDownedObservers(selfDc, node) + node != selfUniqueAddress && membershipState.isReachableExcludingDownedObservers(node) - def updateLatestGossip(newGossip: Gossip): Unit = { + def updateLatestGossip(gossip: Gossip): Unit = { // Updating the vclock version for the changes - val versionedGossip = newGossip :+ vclockNode + val versionedGossip = gossip :+ vclockNode // Don't mark gossip state as seen while exiting is in progress, e.g. // shutting down singleton actors. This delays removal of the member until // the exiting tasks have been completed. - if (exitingTasksInProgress) - latestGossip = versionedGossip.clearSeen() - else { - // Nobody else has seen this gossip but us - val seenVersionedGossip = versionedGossip onlySeen (selfUniqueAddress) - // Update the state with the new gossip - latestGossip = seenVersionedGossip - } + val newGossip = + if (exitingTasksInProgress) + versionedGossip.clearSeen() + else { + // Nobody else has seen this gossip but us + val seenVersionedGossip = versionedGossip onlySeen (selfUniqueAddress) + // Update the state with the new gossip + seenVersionedGossip + } + membershipState = membershipState.copy(newGossip) assertLatestGossip() } @@ -1293,11 +1299,11 @@ private[cluster] class ClusterCoreDaemon(publisher: ActorRef) extends Actor with if (Cluster.isAssertInvariantsEnabled && latestGossip.version.versions.size > latestGossip.members.size) throw new IllegalStateException(s"Too many vector clock entries in gossip state ${latestGossip}") - def publish(newGossip: Gossip): Unit = { + def publishMembershipState(): Unit = { if (cluster.settings.Debug.VerboseGossipLogging) - log.debug("Cluster Node [{}] dc [{}] - New gossip published [{}]", selfAddress, cluster.settings.DataCenter, newGossip) + log.debug("Cluster Node [{}] dc [{}] - New gossip published [{}]", selfAddress, cluster.settings.DataCenter, membershipState.latestGossip) - publisher ! PublishChanges(newGossip) + publisher ! PublishChanges(membershipState) if (PublishStatsInterval == Duration.Zero) publishInternalStats() } diff --git a/akka-cluster/src/main/scala/akka/cluster/ClusterEvent.scala b/akka-cluster/src/main/scala/akka/cluster/ClusterEvent.scala index c2728d8eb1..b0b72633a1 100644 --- a/akka-cluster/src/main/scala/akka/cluster/ClusterEvent.scala +++ b/akka-cluster/src/main/scala/akka/cluster/ClusterEvent.scala @@ -265,12 +265,14 @@ object ClusterEvent { /** * INTERNAL API */ - private[cluster] def diffUnreachable(oldGossip: Gossip, newGossip: Gossip, selfUniqueAddress: UniqueAddress): immutable.Seq[UnreachableMember] = - if (newGossip eq oldGossip) Nil + private[cluster] def diffUnreachable(oldState: MembershipState, newState: MembershipState): immutable.Seq[UnreachableMember] = + if (newState eq oldState) Nil else { + val oldGossip = oldState.latestGossip + val newGossip = newState.latestGossip val oldUnreachableNodes = oldGossip.overview.reachability.allUnreachableOrTerminated (newGossip.overview.reachability.allUnreachableOrTerminated.collect { - case node if !oldUnreachableNodes.contains(node) && node != selfUniqueAddress ⇒ + case node if !oldUnreachableNodes.contains(node) && node != newState.selfUniqueAddress ⇒ UnreachableMember(newGossip.member(node)) })(collection.breakOut) } @@ -278,11 +280,13 @@ object ClusterEvent { /** * INTERNAL API */ - private[cluster] def diffReachable(oldGossip: Gossip, newGossip: Gossip, selfUniqueAddress: UniqueAddress): immutable.Seq[ReachableMember] = - if (newGossip eq oldGossip) Nil + private[cluster] def diffReachable(oldState: MembershipState, newState: MembershipState): immutable.Seq[ReachableMember] = + if (newState eq oldState) Nil else { - (oldGossip.overview.reachability.allUnreachable.collect { - case node if newGossip.hasMember(node) && newGossip.overview.reachability.isReachable(node) && node != selfUniqueAddress ⇒ + val oldGossip = oldState.latestGossip + val newGossip = newState.latestGossip + (oldState.overview.reachability.allUnreachable.collect { + case node if newGossip.hasMember(node) && newGossip.overview.reachability.isReachable(node) && node != newState.selfUniqueAddress ⇒ ReachableMember(newGossip.member(node)) })(collection.breakOut) @@ -291,9 +295,11 @@ object ClusterEvent { /** * INTERNAL API. */ - private[cluster] def diffMemberEvents(oldGossip: Gossip, newGossip: Gossip): immutable.Seq[MemberEvent] = - if (newGossip eq oldGossip) Nil + private[cluster] def diffMemberEvents(oldState: MembershipState, newState: MembershipState): immutable.Seq[MemberEvent] = + if (newState eq oldState) Nil else { + val oldGossip = oldState.latestGossip + val newGossip = newState.latestGossip val newMembers = newGossip.members diff oldGossip.members val membersGroupedByAddress = List(newGossip.members, oldGossip.members).flatten.groupBy(_.uniqueAddress) val changedMembers = membersGroupedByAddress collect { @@ -319,9 +325,9 @@ object ClusterEvent { * INTERNAL API */ @InternalApi - private[cluster] def diffLeader(dc: DataCenter, oldGossip: Gossip, newGossip: Gossip, selfUniqueAddress: UniqueAddress): immutable.Seq[LeaderChanged] = { - val newLeader = newGossip.dcLeader(dc, selfUniqueAddress) - if (newLeader != oldGossip.dcLeader(dc, selfUniqueAddress)) List(LeaderChanged(newLeader.map(_.address))) + private[cluster] def diffLeader(oldState: MembershipState, newState: MembershipState): immutable.Seq[LeaderChanged] = { + val newLeader = newState.leader + if (newLeader != oldState.leader) List(LeaderChanged(newLeader.map(_.address))) else Nil } @@ -329,11 +335,11 @@ object ClusterEvent { * INTERNAL API */ @InternalApi - private[cluster] def diffRolesLeader(dc: DataCenter, oldGossip: Gossip, newGossip: Gossip, selfUniqueAddress: UniqueAddress): Set[RoleLeaderChanged] = { + private[cluster] def diffRolesLeader(oldState: MembershipState, newState: MembershipState): Set[RoleLeaderChanged] = { for { - role ← oldGossip.allRoles union newGossip.allRoles - newLeader = newGossip.roleLeader(dc, role, selfUniqueAddress) - if newLeader != oldGossip.roleLeader(dc, role, selfUniqueAddress) + role ← oldState.latestGossip.allRoles union newState.latestGossip.allRoles + newLeader = newState.roleLeader(role) + if newLeader != oldState.roleLeader(role) } yield RoleLeaderChanged(role, newLeader.map(_.address)) } @@ -341,12 +347,12 @@ object ClusterEvent { * INTERNAL API */ @InternalApi - private[cluster] def diffSeen(dc: DataCenter, oldGossip: Gossip, newGossip: Gossip, selfUniqueAddress: UniqueAddress): immutable.Seq[SeenChanged] = - if (newGossip eq oldGossip) Nil + private[cluster] def diffSeen(oldState: MembershipState, newState: MembershipState): immutable.Seq[SeenChanged] = + if (oldState eq newState) Nil else { - val newConvergence = newGossip.convergence(dc, selfUniqueAddress, Set.empty) - val newSeenBy = newGossip.seenBy - if (newConvergence != oldGossip.convergence(dc, selfUniqueAddress, Set.empty) || newSeenBy != oldGossip.seenBy) + val newConvergence = newState.convergence(Set.empty) + val newSeenBy = newState.latestGossip.seenBy + if (newConvergence != oldState.convergence(Set.empty) || newSeenBy != oldState.latestGossip.seenBy) List(SeenChanged(newConvergence, newSeenBy.map(_.address))) else Nil } @@ -355,9 +361,9 @@ object ClusterEvent { * INTERNAL API */ @InternalApi - private[cluster] def diffReachability(oldGossip: Gossip, newGossip: Gossip): immutable.Seq[ReachabilityChanged] = - if (newGossip.overview.reachability eq oldGossip.overview.reachability) Nil - else List(ReachabilityChanged(newGossip.overview.reachability)) + private[cluster] def diffReachability(oldState: MembershipState, newState: MembershipState): immutable.Seq[ReachabilityChanged] = + if (newState.overview.reachability eq oldState.overview.reachability) Nil + else List(ReachabilityChanged(newState.overview.reachability)) } @@ -372,7 +378,8 @@ private[cluster] final class ClusterDomainEventPublisher extends Actor with Acto val cluster = Cluster(context.system) val selfUniqueAddress = cluster.selfUniqueAddress - var latestGossip: Gossip = Gossip.empty + val emptyMembershipState = MembershipState(Gossip.empty, cluster.selfUniqueAddress, cluster.settings.DataCenter) + var membershipState: MembershipState = emptyMembershipState def selfDc = cluster.settings.DataCenter override def preRestart(reason: Throwable, message: Option[Any]) { @@ -382,11 +389,11 @@ private[cluster] final class ClusterDomainEventPublisher extends Actor with Acto override def postStop(): Unit = { // publish the final removed state before shutting down publish(ClusterShuttingDown) - publishChanges(Gossip.empty) + publishChanges(emptyMembershipState) } def receive = { - case PublishChanges(newGossip) ⇒ publishChanges(newGossip) + case PublishChanges(newState) ⇒ publishChanges(newState) case currentStats: CurrentInternalStats ⇒ publishInternalStats(currentStats) case SendCurrentClusterState(receiver) ⇒ sendCurrentClusterState(receiver) case Subscribe(subscriber, initMode, to) ⇒ subscribe(subscriber, initMode, to) @@ -401,16 +408,17 @@ private[cluster] final class ClusterDomainEventPublisher extends Actor with Acto * to mimic what you would have seen if you were listening to the events. */ def sendCurrentClusterState(receiver: ActorRef): Unit = { - val unreachable: Set[Member] = latestGossip.overview.reachability.allUnreachableOrTerminated.collect { - case node if node != selfUniqueAddress ⇒ latestGossip.member(node) - } + val unreachable: Set[Member] = + membershipState.latestGossip.overview.reachability.allUnreachableOrTerminated.collect { + case node if node != selfUniqueAddress ⇒ membershipState.latestGossip.member(node) + } val state = CurrentClusterState( - members = latestGossip.members, + members = membershipState.latestGossip.members, unreachable = unreachable, - seenBy = latestGossip.seenBy.map(_.address), - leader = latestGossip.dcLeader(selfDc, selfUniqueAddress).map(_.address), - roleLeaderMap = latestGossip.allRoles.map(r ⇒ - r → latestGossip.roleLeader(selfDc, r, selfUniqueAddress).map(_.address))(collection.breakOut)) + seenBy = membershipState.latestGossip.seenBy.map(_.address), + leader = membershipState.leader.map(_.address), + roleLeaderMap = membershipState.latestGossip.allRoles.map(r ⇒ + r → membershipState.roleLeader(r).map(_.address))(collection.breakOut)) receiver ! state } @@ -421,7 +429,7 @@ private[cluster] final class ClusterDomainEventPublisher extends Actor with Acto if (to.exists(_.isAssignableFrom(event.getClass))) subscriber ! event } - publishDiff(Gossip.empty, latestGossip, pub) + publishDiff(emptyMembershipState, membershipState, pub) case InitialStateAsSnapshot ⇒ sendCurrentClusterState(subscriber) } @@ -434,22 +442,22 @@ private[cluster] final class ClusterDomainEventPublisher extends Actor with Acto case Some(c) ⇒ eventStream.unsubscribe(subscriber, c) } - def publishChanges(newGossip: Gossip): Unit = { - val oldGossip = latestGossip - // keep the latestGossip to be sent to new subscribers - latestGossip = newGossip - publishDiff(oldGossip, newGossip, publish) + def publishChanges(newState: MembershipState): Unit = { + val oldState = membershipState + // keep the latest state to be sent to new subscribers + membershipState = newState + publishDiff(oldState, newState, publish) } - def publishDiff(oldGossip: Gossip, newGossip: Gossip, pub: AnyRef ⇒ Unit): Unit = { - diffMemberEvents(oldGossip, newGossip) foreach pub - diffUnreachable(oldGossip, newGossip, selfUniqueAddress) foreach pub - diffReachable(oldGossip, newGossip, selfUniqueAddress) foreach pub - diffLeader(selfDc, oldGossip, newGossip, selfUniqueAddress) foreach pub - diffRolesLeader(selfDc, oldGossip, newGossip, selfUniqueAddress) foreach pub + def publishDiff(oldState: MembershipState, newState: MembershipState, pub: AnyRef ⇒ Unit): Unit = { + diffMemberEvents(oldState, newState) foreach pub + diffUnreachable(oldState, newState) foreach pub + diffReachable(oldState, newState) foreach pub + diffLeader(oldState, newState) foreach pub + diffRolesLeader(oldState, newState) foreach pub // publish internal SeenState for testing purposes - diffSeen(selfDc, oldGossip, newGossip, selfUniqueAddress) foreach pub - diffReachability(oldGossip, newGossip) foreach pub + diffSeen(oldState, newState) foreach pub + diffReachability(oldState, newState) foreach pub } def publishInternalStats(currentStats: CurrentInternalStats): Unit = publish(currentStats) @@ -457,6 +465,6 @@ private[cluster] final class ClusterDomainEventPublisher extends Actor with Acto def publish(event: AnyRef): Unit = eventStream publish event def clearState(): Unit = { - latestGossip = Gossip.empty + membershipState = emptyMembershipState } } diff --git a/akka-cluster/src/main/scala/akka/cluster/Gossip.scala b/akka-cluster/src/main/scala/akka/cluster/Gossip.scala index a5e835113c..dcf3982355 100644 --- a/akka-cluster/src/main/scala/akka/cluster/Gossip.scala +++ b/akka-cluster/src/main/scala/akka/cluster/Gossip.scala @@ -22,11 +22,6 @@ private[cluster] object Gossip { def apply(members: immutable.SortedSet[Member]) = if (members.isEmpty) empty else empty.copy(members = members) - private val leaderMemberStatus = Set[MemberStatus](Up, Leaving) - private val convergenceMemberStatus = Set[MemberStatus](Up, Leaving) - val convergenceSkipUnreachableWithMemberStatus = Set[MemberStatus](Down, Exiting) - val removeUnreachableWithMemberStatus = Set[MemberStatus](Down, Exiting) - } /** @@ -75,7 +70,7 @@ private[cluster] final case class Gossip( private def assertInvariants(): Unit = { if (members.exists(_.status == Removed)) - throw new IllegalArgumentException(s"Live members must have status [${Removed}], " + + throw new IllegalArgumentException(s"Live members must not have status [${Removed}], " + s"got [${members.filter(_.status == Removed)}]") val inReachabilityButNotMember = overview.reachability.allObservers diff members.map(_.uniqueAddress) @@ -168,103 +163,17 @@ private[cluster] final case class Gossip( Gossip(mergedMembers, GossipOverview(mergedSeen, mergedReachability), mergedVClock, mergedTombstones) } - /** - * Checks if we have a cluster convergence. If there are any in data center node pairs that cannot reach each other - * then we can't have a convergence until those nodes reach each other again or one of them is downed - * - * @return true if convergence have been reached and false if not - */ - def convergence(dc: DataCenter, selfUniqueAddress: UniqueAddress, exitingConfirmed: Set[UniqueAddress]): Boolean = { - // Find cluster members in the data center that are unreachable from other members of the data center - // excluding observations from members outside of the data center, that have status DOWN or is passed in as confirmed exiting. - val unreachableInDc = dcReachabilityExcludingDownedObservers(dc).allUnreachableOrTerminated.collect { - case node if node != selfUniqueAddress && !exitingConfirmed(node) ⇒ member(node) - } - - // If another member in the data center that is UP or LEAVING and has not seen this gossip or is exiting - // convergence cannot be reached - def memberHinderingConvergenceExists = - members.exists(member ⇒ - member.dataCenter == dc && - Gossip.convergenceMemberStatus(member.status) && - !(seenByNode(member.uniqueAddress) || exitingConfirmed(member.uniqueAddress))) - - // unreachables outside of the data center or with status DOWN or EXITING does not affect convergence - def allUnreachablesCanBeIgnored = - unreachableInDc.forall(unreachable ⇒ Gossip.convergenceSkipUnreachableWithMemberStatus(unreachable.status)) - - allUnreachablesCanBeIgnored && !memberHinderingConvergenceExists - } - lazy val reachabilityExcludingDownedObservers: Reachability = { val downed = members.collect { case m if m.status == Down ⇒ m } overview.reachability.removeObservers(downed.map(_.uniqueAddress)) } - /** - * @return Reachability excluding observations from nodes outside of the data center, but including observed unreachable - * nodes outside of the data center - */ - def dcReachability(dc: DataCenter): Reachability = - overview.reachability.removeObservers(members.collect { case m if m.dataCenter != dc ⇒ m.uniqueAddress }) - - /** - * @return reachability for data center nodes, with observations from outside the data center or from downed nodes filtered out - */ - def dcReachabilityExcludingDownedObservers(dc: DataCenter): Reachability = { - val membersToExclude = members.collect { case m if m.status == Down || m.dataCenter != dc ⇒ m.uniqueAddress } - overview.reachability.removeObservers(membersToExclude).remove(members.collect { case m if m.dataCenter != dc ⇒ m.uniqueAddress }) - } - - def dcMembers(dc: DataCenter): SortedSet[Member] = - members.filter(_.dataCenter == dc) - - def isDcLeader(dc: DataCenter, node: UniqueAddress, selfUniqueAddress: UniqueAddress): Boolean = - dcLeader(dc, selfUniqueAddress).contains(node) - - def dcLeader(dc: DataCenter, selfUniqueAddress: UniqueAddress): Option[UniqueAddress] = - leaderOf(dc, members, selfUniqueAddress) - - def roleLeader(dc: DataCenter, role: String, selfUniqueAddress: UniqueAddress): Option[UniqueAddress] = - leaderOf(dc, members.filter(_.hasRole(role)), selfUniqueAddress) - - def leaderOf(dc: DataCenter, mbrs: immutable.SortedSet[Member], selfUniqueAddress: UniqueAddress): Option[UniqueAddress] = { - val reachability = dcReachability(dc) - - val reachableMembersInDc = - if (reachability.isAllReachable) mbrs.filter(m ⇒ m.dataCenter == dc && m.status != Down) - else mbrs.filter(m ⇒ - m.dataCenter == dc && - m.status != Down && - (reachability.isReachable(m.uniqueAddress) || m.uniqueAddress == selfUniqueAddress)) - if (reachableMembersInDc.isEmpty) None - else reachableMembersInDc.find(m ⇒ Gossip.leaderMemberStatus(m.status)) - .orElse(Some(reachableMembersInDc.min(Member.leaderStatusOrdering))) - .map(_.uniqueAddress) - } - def allDataCenters: Set[DataCenter] = members.map(_.dataCenter) def allRoles: Set[String] = members.flatMap(_.roles) def isSingletonCluster: Boolean = members.size == 1 - /** - * @return true if toAddress should be reachable from the fromDc in general, within a data center - * this means only caring about data center local observations, across data centers it - * means caring about all observations for the toAddress. - */ - def isReachableExcludingDownedObservers(fromDc: DataCenter, toAddress: UniqueAddress): Boolean = - if (!hasMember(toAddress)) false - else { - val to = member(toAddress) - - // if member is in the same data center, we ignore cross data center unreachability - if (fromDc == to.dataCenter) dcReachabilityExcludingDownedObservers(fromDc).isReachable(toAddress) - // if not it is enough that any non-downed node observed it as unreachable - else reachabilityExcludingDownedObservers.isReachable(toAddress) - } - /** * @return true if fromAddress should be able to reach toAddress based on the unreachability data and their * respective data centers diff --git a/akka-cluster/src/main/scala/akka/cluster/Member.scala b/akka-cluster/src/main/scala/akka/cluster/Member.scala index 4fb0fbc73e..c258e0a871 100644 --- a/akka-cluster/src/main/scala/akka/cluster/Member.scala +++ b/akka-cluster/src/main/scala/akka/cluster/Member.scala @@ -163,7 +163,7 @@ object Member { if (members.size == 2) acc + members.reduceLeft(highestPriorityOf) else { val m = members.head - if (tombstones.contains(m.uniqueAddress) || Gossip.removeUnreachableWithMemberStatus(m.status)) acc // removed + if (tombstones.contains(m.uniqueAddress) || MembershipState.removeUnreachableWithMemberStatus(m.status)) acc // removed else acc + m } } diff --git a/akka-cluster/src/main/scala/akka/cluster/MembershipState.scala b/akka-cluster/src/main/scala/akka/cluster/MembershipState.scala new file mode 100644 index 0000000000..e2375a4d5b --- /dev/null +++ b/akka-cluster/src/main/scala/akka/cluster/MembershipState.scala @@ -0,0 +1,122 @@ +/** + * Copyright (C) 2017 Lightbend Inc. + */ +package akka.cluster + +import scala.collection.immutable +import scala.collection.SortedSet +import akka.cluster.ClusterSettings.DataCenter +import akka.cluster.MemberStatus._ +import akka.annotation.InternalApi + +/** + * INTERNAL API + */ +@InternalApi private[akka] object MembershipState { + import MemberStatus._ + private val leaderMemberStatus = Set[MemberStatus](Up, Leaving) + private val convergenceMemberStatus = Set[MemberStatus](Up, Leaving) + val convergenceSkipUnreachableWithMemberStatus = Set[MemberStatus](Down, Exiting) + val removeUnreachableWithMemberStatus = Set[MemberStatus](Down, Exiting) +} + +/** + * INTERNAL API + */ +@InternalApi private[akka] final case class MembershipState(latestGossip: Gossip, selfUniqueAddress: UniqueAddress, selfDc: DataCenter) { + import MembershipState._ + + def members: immutable.SortedSet[Member] = latestGossip.members + + def overview: GossipOverview = latestGossip.overview + + def seen(): MembershipState = copy(latestGossip = latestGossip.seen(selfUniqueAddress)) + + /** + * Checks if we have a cluster convergence. If there are any in data center node pairs that cannot reach each other + * then we can't have a convergence until those nodes reach each other again or one of them is downed + * + * @return true if convergence have been reached and false if not + */ + def convergence(exitingConfirmed: Set[UniqueAddress]): Boolean = { + + // If another member in the data center that is UP or LEAVING and has not seen this gossip or is exiting + // convergence cannot be reached + def memberHinderingConvergenceExists = + members.exists(member ⇒ + member.dataCenter == selfDc && + convergenceMemberStatus(member.status) && + !(latestGossip.seenByNode(member.uniqueAddress) || exitingConfirmed(member.uniqueAddress))) + + // Find cluster members in the data center that are unreachable from other members of the data center + // excluding observations from members outside of the data center, that have status DOWN or is passed in as confirmed exiting. + val unreachableInDc = dcReachabilityExcludingDownedObservers.allUnreachableOrTerminated.collect { + case node if node != selfUniqueAddress && !exitingConfirmed(node) ⇒ latestGossip.member(node) + } + // unreachables outside of the data center or with status DOWN or EXITING does not affect convergence + val allUnreachablesCanBeIgnored = + unreachableInDc.forall(unreachable ⇒ convergenceSkipUnreachableWithMemberStatus(unreachable.status)) + + allUnreachablesCanBeIgnored && !memberHinderingConvergenceExists + } + + /** + * @return Reachability excluding observations from nodes outside of the data center, but including observed unreachable + * nodes outside of the data center + */ + lazy val dcReachability: Reachability = + overview.reachability.removeObservers( + members.collect { case m if m.dataCenter != selfDc ⇒ m.uniqueAddress }) + + /** + * @return reachability for data center nodes, with observations from outside the data center or from downed nodes filtered out + */ + lazy val dcReachabilityExcludingDownedObservers: Reachability = { + val membersToExclude = members.collect { case m if m.status == Down || m.dataCenter != selfDc ⇒ m.uniqueAddress } + overview.reachability.removeObservers(membersToExclude).remove(members.collect { case m if m.dataCenter != selfDc ⇒ m.uniqueAddress }) + } + + /** + * @return true if toAddress should be reachable from the fromDc in general, within a data center + * this means only caring about data center local observations, across data centers it + * means caring about all observations for the toAddress. + */ + def isReachableExcludingDownedObservers(toAddress: UniqueAddress): Boolean = + if (!latestGossip.hasMember(toAddress)) false + else { + val to = latestGossip.member(toAddress) + + // if member is in the same data center, we ignore cross data center unreachability + if (selfDc == to.dataCenter) dcReachabilityExcludingDownedObservers.isReachable(toAddress) + // if not it is enough that any non-downed node observed it as unreachable + else latestGossip.reachabilityExcludingDownedObservers.isReachable(toAddress) + } + + def dcMembers: SortedSet[Member] = + members.filter(_.dataCenter == selfDc) + + def isLeader(node: UniqueAddress): Boolean = + leader.contains(node) + + def leader: Option[UniqueAddress] = + leaderOf(members) + + def roleLeader(role: String): Option[UniqueAddress] = + leaderOf(members.filter(_.hasRole(role))) + + def leaderOf(mbrs: immutable.SortedSet[Member]): Option[UniqueAddress] = { + val reachability = dcReachability + + val reachableMembersInDc = + if (reachability.isAllReachable) mbrs.filter(m ⇒ m.dataCenter == selfDc && m.status != Down) + else mbrs.filter(m ⇒ + m.dataCenter == selfDc && + m.status != Down && + (reachability.isReachable(m.uniqueAddress) || m.uniqueAddress == selfUniqueAddress)) + if (reachableMembersInDc.isEmpty) None + else reachableMembersInDc.find(m ⇒ leaderMemberStatus(m.status)) + .orElse(Some(reachableMembersInDc.min(Member.leaderStatusOrdering))) + .map(_.uniqueAddress) + } + +} diff --git a/akka-cluster/src/test/scala/akka/cluster/ClusterDomainEventPublisherSpec.scala b/akka-cluster/src/test/scala/akka/cluster/ClusterDomainEventPublisherSpec.scala index 538546c50d..68471ff5b8 100644 --- a/akka-cluster/src/test/scala/akka/cluster/ClusterDomainEventPublisherSpec.scala +++ b/akka-cluster/src/test/scala/akka/cluster/ClusterDomainEventPublisherSpec.scala @@ -19,6 +19,7 @@ import akka.testkit.ImplicitSender import akka.actor.ActorRef import akka.remote.RARP import akka.testkit.TestProbe +import akka.cluster.ClusterSettings.DefaultDataCenter object ClusterDomainEventPublisherSpec { val config = """ @@ -36,6 +37,7 @@ class ClusterDomainEventPublisherSpec extends AkkaSpec(ClusterDomainEventPublish else "akka.tcp" var publisher: ActorRef = _ + val aUp = TestMember(Address(protocol, "sys", "a", 2552), Up) val aLeaving = aUp.copy(status = Leaving) val aExiting = aLeaving.copy(status = Exiting) @@ -48,16 +50,27 @@ class ClusterDomainEventPublisherSpec extends AkkaSpec(ClusterDomainEventPublish val a51Up = TestMember(Address(protocol, "sys", "a", 2551), Up) val dUp = TestMember(Address(protocol, "sys", "d", 2552), Up, Set("GRP")) + val emptyMembershipState = MembershipState(Gossip.empty, aUp.uniqueAddress, DefaultDataCenter) + val g0 = Gossip(members = SortedSet(aUp)).seen(aUp.uniqueAddress) + val state0 = MembershipState(g0, aUp.uniqueAddress, DefaultDataCenter) val g1 = Gossip(members = SortedSet(aUp, cJoining)).seen(aUp.uniqueAddress).seen(cJoining.uniqueAddress) + val state1 = MembershipState(g1, aUp.uniqueAddress, DefaultDataCenter) val g2 = Gossip(members = SortedSet(aUp, bExiting, cUp)).seen(aUp.uniqueAddress) + val state2 = MembershipState(g2, aUp.uniqueAddress, DefaultDataCenter) val g3 = g2.seen(bExiting.uniqueAddress).seen(cUp.uniqueAddress) + val state3 = MembershipState(g3, aUp.uniqueAddress, DefaultDataCenter) val g4 = Gossip(members = SortedSet(a51Up, aUp, bExiting, cUp)).seen(aUp.uniqueAddress) + val state4 = MembershipState(g4, aUp.uniqueAddress, DefaultDataCenter) val g5 = Gossip(members = SortedSet(a51Up, aUp, bExiting, cUp)).seen(aUp.uniqueAddress).seen(bExiting.uniqueAddress).seen(cUp.uniqueAddress).seen(a51Up.uniqueAddress) + val state5 = MembershipState(g5, aUp.uniqueAddress, DefaultDataCenter) val g6 = Gossip(members = SortedSet(aLeaving, bExiting, cUp)).seen(aUp.uniqueAddress) + val state6 = MembershipState(g6, aUp.uniqueAddress, DefaultDataCenter) val g7 = Gossip(members = SortedSet(aExiting, bExiting, cUp)).seen(aUp.uniqueAddress) + val state7 = MembershipState(g7, aUp.uniqueAddress, DefaultDataCenter) val g8 = Gossip(members = SortedSet(aUp, bExiting, cUp, dUp), overview = GossipOverview(reachability = Reachability.empty.unreachable(aUp.uniqueAddress, dUp.uniqueAddress))).seen(aUp.uniqueAddress) + val state8 = MembershipState(g8, aUp.uniqueAddress, DefaultDataCenter) // created in beforeEach var memberSubscriber: TestProbe = _ @@ -69,7 +82,7 @@ class ClusterDomainEventPublisherSpec extends AkkaSpec(ClusterDomainEventPublish system.eventStream.subscribe(memberSubscriber.ref, ClusterShuttingDown.getClass) publisher = system.actorOf(Props[ClusterDomainEventPublisher]) - publisher ! PublishChanges(g0) + publisher ! PublishChanges(state0) memberSubscriber.expectMsg(MemberUp(aUp)) memberSubscriber.expectMsg(LeaderChanged(Some(aUp.address))) } @@ -77,19 +90,19 @@ class ClusterDomainEventPublisherSpec extends AkkaSpec(ClusterDomainEventPublish "ClusterDomainEventPublisher" must { "publish MemberJoined" in { - publisher ! PublishChanges(g1) + publisher ! PublishChanges(state1) memberSubscriber.expectMsg(MemberJoined(cJoining)) } "publish MemberUp" in { - publisher ! PublishChanges(g2) - publisher ! PublishChanges(g3) + publisher ! PublishChanges(state2) + publisher ! PublishChanges(state3) memberSubscriber.expectMsg(MemberExited(bExiting)) memberSubscriber.expectMsg(MemberUp(cUp)) } "publish leader changed" in { - publisher ! PublishChanges(g4) + publisher ! PublishChanges(state4) memberSubscriber.expectMsg(MemberUp(a51Up)) memberSubscriber.expectMsg(MemberExited(bExiting)) memberSubscriber.expectMsg(MemberUp(cUp)) @@ -98,17 +111,17 @@ class ClusterDomainEventPublisherSpec extends AkkaSpec(ClusterDomainEventPublish } "publish leader changed when old leader leaves and is removed" in { - publisher ! PublishChanges(g3) + publisher ! PublishChanges(state3) memberSubscriber.expectMsg(MemberExited(bExiting)) memberSubscriber.expectMsg(MemberUp(cUp)) - publisher ! PublishChanges(g6) + publisher ! PublishChanges(state6) memberSubscriber.expectMsg(MemberLeft(aLeaving)) - publisher ! PublishChanges(g7) + publisher ! PublishChanges(state7) memberSubscriber.expectMsg(MemberExited(aExiting)) memberSubscriber.expectMsg(LeaderChanged(Some(cUp.address))) memberSubscriber.expectNoMsg(500 millis) // at the removed member a an empty gossip is the last thing - publisher ! PublishChanges(Gossip.empty) + publisher ! PublishChanges(emptyMembershipState) memberSubscriber.expectMsg(MemberRemoved(aRemoved, Exiting)) memberSubscriber.expectMsg(MemberRemoved(bRemoved, Exiting)) memberSubscriber.expectMsg(MemberRemoved(cRemoved, Up)) @@ -116,13 +129,13 @@ class ClusterDomainEventPublisherSpec extends AkkaSpec(ClusterDomainEventPublish } "not publish leader changed when same leader" in { - publisher ! PublishChanges(g4) + publisher ! PublishChanges(state4) memberSubscriber.expectMsg(MemberUp(a51Up)) memberSubscriber.expectMsg(MemberExited(bExiting)) memberSubscriber.expectMsg(MemberUp(cUp)) memberSubscriber.expectMsg(LeaderChanged(Some(a51Up.address))) - publisher ! PublishChanges(g5) + publisher ! PublishChanges(state5) memberSubscriber.expectNoMsg(500 millis) } @@ -130,12 +143,11 @@ class ClusterDomainEventPublisherSpec extends AkkaSpec(ClusterDomainEventPublish val subscriber = TestProbe() publisher ! Subscribe(subscriber.ref, InitialStateAsSnapshot, Set(classOf[RoleLeaderChanged])) subscriber.expectMsgType[CurrentClusterState] - publisher ! PublishChanges(Gossip(members = SortedSet(cJoining, dUp))) + publisher ! PublishChanges(MembershipState(Gossip(members = SortedSet(cJoining, dUp)), dUp.uniqueAddress, DefaultDataCenter)) subscriber.expectMsgAllOf( RoleLeaderChanged("GRP", Some(dUp.address)), - RoleLeaderChanged(ClusterSettings.DcRolePrefix + ClusterSettings.DefaultDataCenter, Some(dUp.address)) - ) - publisher ! PublishChanges(Gossip(members = SortedSet(cUp, dUp))) + RoleLeaderChanged(ClusterSettings.DcRolePrefix + ClusterSettings.DefaultDataCenter, Some(dUp.address))) + publisher ! PublishChanges(MembershipState(Gossip(members = SortedSet(cUp, dUp)), dUp.uniqueAddress, DefaultDataCenter)) subscriber.expectMsg(RoleLeaderChanged("GRP", Some(cUp.address))) } @@ -150,7 +162,7 @@ class ClusterDomainEventPublisherSpec extends AkkaSpec(ClusterDomainEventPublish "send events corresponding to current state when subscribe" in { val subscriber = TestProbe() - publisher ! PublishChanges(g8) + publisher ! PublishChanges(state8) publisher ! Subscribe(subscriber.ref, InitialStateAsEvents, Set(classOf[MemberEvent], classOf[ReachabilityEvent])) subscriber.receiveN(4).toSet should be(Set(MemberUp(aUp), MemberUp(cUp), MemberUp(dUp), MemberExited(bExiting))) subscriber.expectMsg(UnreachableMember(dUp)) @@ -162,7 +174,7 @@ class ClusterDomainEventPublisherSpec extends AkkaSpec(ClusterDomainEventPublish publisher ! Subscribe(subscriber.ref, InitialStateAsSnapshot, Set(classOf[MemberEvent])) subscriber.expectMsgType[CurrentClusterState] publisher ! Unsubscribe(subscriber.ref, Some(classOf[MemberEvent])) - publisher ! PublishChanges(g3) + publisher ! PublishChanges(state3) subscriber.expectNoMsg(500 millis) // but memberSubscriber is still subscriber memberSubscriber.expectMsg(MemberExited(bExiting)) @@ -173,10 +185,10 @@ class ClusterDomainEventPublisherSpec extends AkkaSpec(ClusterDomainEventPublish val subscriber = TestProbe() publisher ! Subscribe(subscriber.ref, InitialStateAsSnapshot, Set(classOf[SeenChanged])) subscriber.expectMsgType[CurrentClusterState] - publisher ! PublishChanges(g2) + publisher ! PublishChanges(state2) subscriber.expectMsgType[SeenChanged] subscriber.expectNoMsg(500 millis) - publisher ! PublishChanges(g3) + publisher ! PublishChanges(state3) subscriber.expectMsgType[SeenChanged] subscriber.expectNoMsg(500 millis) } diff --git a/akka-cluster/src/test/scala/akka/cluster/ClusterDomainEventSpec.scala b/akka-cluster/src/test/scala/akka/cluster/ClusterDomainEventSpec.scala index 785b813d44..420a39d18a 100644 --- a/akka-cluster/src/test/scala/akka/cluster/ClusterDomainEventSpec.scala +++ b/akka-cluster/src/test/scala/akka/cluster/ClusterDomainEventSpec.scala @@ -38,30 +38,33 @@ class ClusterDomainEventSpec extends WordSpec with Matchers { private[cluster] def converge(gossip: Gossip): (Gossip, Set[UniqueAddress]) = ((gossip, Set.empty[UniqueAddress]) /: gossip.members) { case ((gs, as), m) ⇒ (gs.seen(m.uniqueAddress), as + m.uniqueAddress) } + private def state(g: Gossip): MembershipState = + MembershipState(g, selfDummyAddress, ClusterSettings.DefaultDataCenter) + "Domain events" must { "be empty for the same gossip" in { val g1 = Gossip(members = SortedSet(aUp)) - diffUnreachable(g1, g1, selfDummyAddress) should ===(Seq.empty) + diffUnreachable(state(g1), state(g1)) should ===(Seq.empty) } "be produced for new members" in { val (g1, _) = converge(Gossip(members = SortedSet(aUp))) val (g2, s2) = converge(Gossip(members = SortedSet(aUp, bUp, eJoining))) - diffMemberEvents(g1, g2) should ===(Seq(MemberUp(bUp), MemberJoined(eJoining))) - diffUnreachable(g1, g2, selfDummyAddress) should ===(Seq.empty) - diffSeen(ClusterSettings.DefaultDataCenter, g1, g2, selfDummyAddress) should ===(Seq(SeenChanged(convergence = true, seenBy = s2.map(_.address)))) + diffMemberEvents(state(g1), state(g2)) should ===(Seq(MemberUp(bUp), MemberJoined(eJoining))) + diffUnreachable(state(g1), state(g2)) should ===(Seq.empty) + diffSeen(state(g1), state(g2)) should ===(Seq(SeenChanged(convergence = true, seenBy = s2.map(_.address)))) } "be produced for changed status of members" in { val (g1, _) = converge(Gossip(members = SortedSet(aJoining, bUp, cUp))) val (g2, s2) = converge(Gossip(members = SortedSet(aUp, bUp, cLeaving, eJoining))) - diffMemberEvents(g1, g2) should ===(Seq(MemberUp(aUp), MemberLeft(cLeaving), MemberJoined(eJoining))) - diffUnreachable(g1, g2, selfDummyAddress) should ===(Seq.empty) - diffSeen(ClusterSettings.DefaultDataCenter, g1, g2, selfDummyAddress) should ===(Seq(SeenChanged(convergence = true, seenBy = s2.map(_.address)))) + diffMemberEvents(state(g1), state(g2)) should ===(Seq(MemberUp(aUp), MemberLeft(cLeaving), MemberJoined(eJoining))) + diffUnreachable(state(g1), state(g2)) should ===(Seq.empty) + diffSeen(state(g1), state(g2)) should ===(Seq(SeenChanged(convergence = true, seenBy = s2.map(_.address)))) } "be produced for members in unreachable" in { @@ -73,10 +76,13 @@ class ClusterDomainEventSpec extends WordSpec with Matchers { unreachable(aUp.uniqueAddress, bDown.uniqueAddress) val g2 = Gossip(members = SortedSet(aUp, cUp, bDown, eDown), overview = GossipOverview(reachability = reachability2)) - diffUnreachable(g1, g2, selfDummyAddress) should ===(Seq(UnreachableMember(bDown))) + diffUnreachable(state(g1), state(g2)) should ===(Seq(UnreachableMember(bDown))) // never include self member in unreachable - diffUnreachable(g1, g2, bDown.uniqueAddress) should ===(Seq()) - diffSeen(ClusterSettings.DefaultDataCenter, g1, g2, selfDummyAddress) should ===(Seq.empty) + + diffUnreachable( + MembershipState(g1, bDown.uniqueAddress, ClusterSettings.DefaultDataCenter), + MembershipState(g2, bDown.uniqueAddress, ClusterSettings.DefaultDataCenter)) should ===(Seq()) + diffSeen(state(g1), state(g2)) should ===(Seq.empty) } "be produced for members becoming reachable after unreachable" in { @@ -90,50 +96,54 @@ class ClusterDomainEventSpec extends WordSpec with Matchers { reachable(aUp.uniqueAddress, bUp.uniqueAddress) val g2 = Gossip(members = SortedSet(aUp, cUp, bUp, eUp), overview = GossipOverview(reachability = reachability2)) - diffUnreachable(g1, g2, selfDummyAddress) should ===(Seq(UnreachableMember(cUp))) + diffUnreachable(state(g1), state(g2)) should ===(Seq(UnreachableMember(cUp))) // never include self member in unreachable - diffUnreachable(g1, g2, cUp.uniqueAddress) should ===(Seq()) - diffReachable(g1, g2, selfDummyAddress) should ===(Seq(ReachableMember(bUp))) + diffUnreachable( + MembershipState(g1, cUp.uniqueAddress, ClusterSettings.DefaultDataCenter), + MembershipState(g2, cUp.uniqueAddress, ClusterSettings.DefaultDataCenter)) should ===(Seq()) + diffReachable(state(g1), state(g2)) should ===(Seq(ReachableMember(bUp))) // never include self member in reachable - diffReachable(g1, g2, bUp.uniqueAddress) should ===(Seq()) + diffReachable( + MembershipState(g1, bUp.uniqueAddress, ClusterSettings.DefaultDataCenter), + MembershipState(g2, bUp.uniqueAddress, ClusterSettings.DefaultDataCenter)) should ===(Seq()) } "be produced for removed members" in { val (g1, _) = converge(Gossip(members = SortedSet(aUp, dExiting))) val (g2, s2) = converge(Gossip(members = SortedSet(aUp))) - diffMemberEvents(g1, g2) should ===(Seq(MemberRemoved(dRemoved, Exiting))) - diffUnreachable(g1, g2, selfDummyAddress) should ===(Seq.empty) - diffSeen(ClusterSettings.DefaultDataCenter, g1, g2, selfDummyAddress) should ===(Seq(SeenChanged(convergence = true, seenBy = s2.map(_.address)))) + diffMemberEvents(state(g1), state(g2)) should ===(Seq(MemberRemoved(dRemoved, Exiting))) + diffUnreachable(state(g1), state(g2)) should ===(Seq.empty) + diffSeen(state(g1), state(g2)) should ===(Seq(SeenChanged(convergence = true, seenBy = s2.map(_.address)))) } "be produced for convergence changes" in { val g1 = Gossip(members = SortedSet(aUp, bUp, eJoining)).seen(aUp.uniqueAddress).seen(bUp.uniqueAddress).seen(eJoining.uniqueAddress) val g2 = Gossip(members = SortedSet(aUp, bUp, eJoining)).seen(aUp.uniqueAddress).seen(bUp.uniqueAddress) - diffMemberEvents(g1, g2) should ===(Seq.empty) - diffUnreachable(g1, g2, selfDummyAddress) should ===(Seq.empty) - diffSeen(ClusterSettings.DefaultDataCenter, g1, g2, selfDummyAddress) should ===(Seq(SeenChanged(convergence = true, seenBy = Set(aUp.address, bUp.address)))) - diffMemberEvents(g2, g1) should ===(Seq.empty) - diffUnreachable(g2, g1, selfDummyAddress) should ===(Seq.empty) - diffSeen(ClusterSettings.DefaultDataCenter, g2, g1, selfDummyAddress) should ===(Seq(SeenChanged(convergence = true, seenBy = Set(aUp.address, bUp.address, eJoining.address)))) + diffMemberEvents(state(g1), state(g2)) should ===(Seq.empty) + diffUnreachable(state(g1), state(g2)) should ===(Seq.empty) + diffSeen(state(g1), state(g2)) should ===(Seq(SeenChanged(convergence = true, seenBy = Set(aUp.address, bUp.address)))) + diffMemberEvents(state(g2), state(g1)) should ===(Seq.empty) + diffUnreachable(state(g2), state(g1)) should ===(Seq.empty) + diffSeen(state(g2), state(g1)) should ===(Seq(SeenChanged(convergence = true, seenBy = Set(aUp.address, bUp.address, eJoining.address)))) } "be produced for leader changes" in { val (g1, _) = converge(Gossip(members = SortedSet(aUp, bUp, eJoining))) val (g2, s2) = converge(Gossip(members = SortedSet(bUp, eJoining))) - diffMemberEvents(g1, g2) should ===(Seq(MemberRemoved(aRemoved, Up))) - diffUnreachable(g1, g2, selfDummyAddress) should ===(Seq.empty) - diffSeen(ClusterSettings.DefaultDataCenter, g1, g2, selfDummyAddress) should ===(Seq(SeenChanged(convergence = true, seenBy = s2.map(_.address)))) - diffLeader(ClusterSettings.DefaultDataCenter, g1, g2, selfDummyAddress) should ===(Seq(LeaderChanged(Some(bUp.address)))) + diffMemberEvents(state(g1), state(g2)) should ===(Seq(MemberRemoved(aRemoved, Up))) + diffUnreachable(state(g1), state(g2)) should ===(Seq.empty) + diffSeen(state(g1), state(g2)) should ===(Seq(SeenChanged(convergence = true, seenBy = s2.map(_.address)))) + diffLeader(state(g1), state(g2)) should ===(Seq(LeaderChanged(Some(bUp.address)))) } "be produced for role leader changes in the same data center" in { val g0 = Gossip.empty val g1 = Gossip(members = SortedSet(aUp, bUp, cUp, dLeaving, eJoining)) val g2 = Gossip(members = SortedSet(bUp, cUp, dExiting, eJoining)) - diffRolesLeader(ClusterSettings.DefaultDataCenter, g0, g1, selfDummyAddress) should ===( + diffRolesLeader(state(g0), state(g1)) should ===( Set( // since this role is implicitly added RoleLeaderChanged(ClusterSettings.DcRolePrefix + ClusterSettings.DefaultDataCenter, Some(aUp.address)), @@ -143,7 +153,7 @@ class ClusterDomainEventSpec extends WordSpec with Matchers { RoleLeaderChanged("DD", Some(dLeaving.address)), RoleLeaderChanged("DE", Some(dLeaving.address)), RoleLeaderChanged("EE", Some(eUp.address)))) - diffRolesLeader(ClusterSettings.DefaultDataCenter, g1, g2, selfDummyAddress) should ===( + diffRolesLeader(state(g1), state(g2)) should ===( Set( RoleLeaderChanged(ClusterSettings.DcRolePrefix + ClusterSettings.DefaultDataCenter, Some(bUp.address)), RoleLeaderChanged("AA", None), @@ -153,10 +163,14 @@ class ClusterDomainEventSpec extends WordSpec with Matchers { "not be produced for role leader changes in other data centers" in { val g0 = Gossip.empty + val s0 = state(g0).copy(selfDc = "dc2") val g1 = Gossip(members = SortedSet(aUp, bUp, cUp, dLeaving, eJoining)) + val s1 = state(g1).copy(selfDc = "dc2") val g2 = Gossip(members = SortedSet(bUp, cUp, dExiting, eJoining)) - diffRolesLeader("dc2", g0, g1, selfDummyAddress) should ===(Set.empty) - diffRolesLeader("dc2", g1, g2, selfDummyAddress) should ===(Set.empty) + val s2 = state(g2).copy(selfDc = "dc2") + + diffRolesLeader(s0, s1) should ===(Set.empty) + diffRolesLeader(s1, s2) should ===(Set.empty) } } } diff --git a/akka-cluster/src/test/scala/akka/cluster/GossipSpec.scala b/akka-cluster/src/test/scala/akka/cluster/GossipSpec.scala index ab3f9a484e..25e17d91c6 100644 --- a/akka-cluster/src/test/scala/akka/cluster/GossipSpec.scala +++ b/akka-cluster/src/test/scala/akka/cluster/GossipSpec.scala @@ -7,6 +7,7 @@ package akka.cluster import org.scalatest.WordSpec import org.scalatest.Matchers import akka.actor.Address +import akka.cluster.ClusterSettings.DataCenter import akka.cluster.ClusterSettings.DefaultDataCenter import scala.collection.immutable.SortedSet @@ -33,6 +34,9 @@ class GossipSpec extends WordSpec with Matchers { val dc2d1 = TestMember(Address("akka.tcp", "sys", "d", 2552), Up, Set.empty, dataCenter = "dc2") val dc2d2 = TestMember(dc2d1.address, status = Down, roles = Set.empty, dataCenter = dc2d1.dataCenter) + private def state(g: Gossip, selfMember: Member = a1): MembershipState = + MembershipState(g, selfMember.uniqueAddress, selfMember.dataCenter) + "A Gossip" must { "have correct test setup" in { @@ -41,40 +45,40 @@ class GossipSpec extends WordSpec with Matchers { } "reach convergence when it's empty" in { - Gossip.empty.convergence(DefaultDataCenter, a1.uniqueAddress, Set.empty) should ===(true) + state(Gossip.empty).convergence(Set.empty) should ===(true) } "reach convergence for one node" in { val g1 = Gossip(members = SortedSet(a1)).seen(a1.uniqueAddress) - g1.convergence(DefaultDataCenter, a1.uniqueAddress, Set.empty) should ===(true) + state(g1).convergence(Set.empty) should ===(true) } "not reach convergence until all have seen version" in { val g1 = Gossip(members = SortedSet(a1, b1)).seen(a1.uniqueAddress) - g1.convergence(DefaultDataCenter, a1.uniqueAddress, Set.empty) should ===(false) + state(g1).convergence(Set.empty) should ===(false) } "reach convergence for two nodes" in { val g1 = Gossip(members = SortedSet(a1, b1)).seen(a1.uniqueAddress).seen(b1.uniqueAddress) - g1.convergence(DefaultDataCenter, a1.uniqueAddress, Set.empty) should ===(true) + state(g1).convergence(Set.empty) should ===(true) } "reach convergence, skipping joining" in { // e1 is joining val g1 = Gossip(members = SortedSet(a1, b1, e1)).seen(a1.uniqueAddress).seen(b1.uniqueAddress) - g1.convergence(DefaultDataCenter, a1.uniqueAddress, Set.empty) should ===(true) + state(g1).convergence(Set.empty) should ===(true) } "reach convergence, skipping down" in { // e3 is down val g1 = Gossip(members = SortedSet(a1, b1, e3)).seen(a1.uniqueAddress).seen(b1.uniqueAddress) - g1.convergence(DefaultDataCenter, a1.uniqueAddress, Set.empty) should ===(true) + state(g1).convergence(Set.empty) should ===(true) } "reach convergence, skipping Leaving with exitingConfirmed" in { // c1 is Leaving val g1 = Gossip(members = SortedSet(a1, b1, c1)).seen(a1.uniqueAddress).seen(b1.uniqueAddress) - g1.convergence(DefaultDataCenter, a1.uniqueAddress, Set(c1.uniqueAddress)) should ===(true) + state(g1).convergence(Set(c1.uniqueAddress)) should ===(true) } "reach convergence, skipping unreachable Leaving with exitingConfirmed" in { @@ -82,16 +86,16 @@ class GossipSpec extends WordSpec with Matchers { val r1 = Reachability.empty.unreachable(b1.uniqueAddress, c1.uniqueAddress) val g1 = Gossip(members = SortedSet(a1, b1, c1), overview = GossipOverview(reachability = r1)) .seen(a1.uniqueAddress).seen(b1.uniqueAddress) - g1.convergence(DefaultDataCenter, a1.uniqueAddress, Set(c1.uniqueAddress)) should ===(true) + state(g1).convergence(Set(c1.uniqueAddress)) should ===(true) } "not reach convergence when unreachable" in { val r1 = Reachability.empty.unreachable(b1.uniqueAddress, a1.uniqueAddress) val g1 = (Gossip(members = SortedSet(a1, b1), overview = GossipOverview(reachability = r1))) .seen(a1.uniqueAddress).seen(b1.uniqueAddress) - g1.convergence(DefaultDataCenter, b1.uniqueAddress, Set.empty) should ===(false) + state(g1, b1).convergence(Set.empty) should ===(false) // but from a1's point of view (it knows that itself is not unreachable) - g1.convergence(DefaultDataCenter, a1.uniqueAddress, Set.empty) should ===(true) + state(g1).convergence(Set.empty) should ===(true) } "reach convergence when downed node has observed unreachable" in { @@ -99,7 +103,7 @@ class GossipSpec extends WordSpec with Matchers { val r1 = Reachability.empty.unreachable(e3.uniqueAddress, a1.uniqueAddress) val g1 = (Gossip(members = SortedSet(a1, b1, e3), overview = GossipOverview(reachability = r1))) .seen(a1.uniqueAddress).seen(b1.uniqueAddress).seen(e3.uniqueAddress) - g1.convergence(DefaultDataCenter, b1.uniqueAddress, Set.empty) should ===(true) + state(g1, b1).convergence(Set.empty) should ===(true) } "merge members by status priority" in { @@ -146,37 +150,33 @@ class GossipSpec extends WordSpec with Matchers { } "have leader as first member based on ordering, except Exiting status" in { - Gossip(members = SortedSet(c2, e2)).dcLeader(DefaultDataCenter, c2.uniqueAddress) should ===(Some(c2.uniqueAddress)) - Gossip(members = SortedSet(c3, e2)).dcLeader(DefaultDataCenter, c3.uniqueAddress) should ===(Some(e2.uniqueAddress)) - Gossip(members = SortedSet(c3)).dcLeader(DefaultDataCenter, c3.uniqueAddress) should ===(Some(c3.uniqueAddress)) + state(Gossip(members = SortedSet(c2, e2)), c2).leader should ===(Some(c2.uniqueAddress)) + state(Gossip(members = SortedSet(c3, e2)), c3).leader should ===(Some(e2.uniqueAddress)) + state(Gossip(members = SortedSet(c3)), c3).leader should ===(Some(c3.uniqueAddress)) } "have leader as first reachable member based on ordering" in { val r1 = Reachability.empty.unreachable(e2.uniqueAddress, c2.uniqueAddress) val g1 = Gossip(members = SortedSet(c2, e2), overview = GossipOverview(reachability = r1)) - g1.dcLeader(DefaultDataCenter, e2.uniqueAddress) should ===(Some(e2.uniqueAddress)) + state(g1, e2).leader should ===(Some(e2.uniqueAddress)) // but when c2 is selfUniqueAddress - g1.dcLeader(DefaultDataCenter, c2.uniqueAddress) should ===(Some(c2.uniqueAddress)) + state(g1, c2).leader should ===(Some(c2.uniqueAddress)) } "not have Down member as leader" in { - Gossip(members = SortedSet(e3)).dcLeader(DefaultDataCenter, e3.uniqueAddress) should ===(None) + state(Gossip(members = SortedSet(e3)), e3).leader should ===(None) } "have a leader per data center" in { val g1 = Gossip(members = SortedSet(dc1a1, dc1b1, dc2c1, dc2d1)) - // everybodys point of view is dc1a1 being leader of dc1 - g1.dcLeader("dc1", dc1a1.uniqueAddress) should ===(Some(dc1a1.uniqueAddress)) - g1.dcLeader("dc1", dc1b1.uniqueAddress) should ===(Some(dc1a1.uniqueAddress)) - g1.dcLeader("dc1", dc2c1.uniqueAddress) should ===(Some(dc1a1.uniqueAddress)) - g1.dcLeader("dc1", dc2d1.uniqueAddress) should ===(Some(dc1a1.uniqueAddress)) + // dc1a1 being leader of dc1 + state(g1, dc1a1).leader should ===(Some(dc1a1.uniqueAddress)) + state(g1, dc1b1).leader should ===(Some(dc1a1.uniqueAddress)) // and dc2c1 being leader of dc2 - g1.dcLeader("dc2", dc1a1.uniqueAddress) should ===(Some(dc2c1.uniqueAddress)) - g1.dcLeader("dc2", dc1b1.uniqueAddress) should ===(Some(dc2c1.uniqueAddress)) - g1.dcLeader("dc2", dc2c1.uniqueAddress) should ===(Some(dc2c1.uniqueAddress)) - g1.dcLeader("dc2", dc2d1.uniqueAddress) should ===(Some(dc2c1.uniqueAddress)) + state(g1, dc2c1).leader should ===(Some(dc2c1.uniqueAddress)) + state(g1, dc2d1).leader should ===(Some(dc2c1.uniqueAddress)) } "merge seen table correctly" in { @@ -218,11 +218,11 @@ class GossipSpec extends WordSpec with Matchers { .seen(dc1b1.uniqueAddress) .seen(dc2c1.uniqueAddress) .seen(dc2d1.uniqueAddress) - g.dcLeader("dc1", dc1a1.uniqueAddress) should ===(Some(dc1a1.uniqueAddress)) - g.convergence("dc1", dc1a1.uniqueAddress, Set.empty) should ===(true) + state(g, dc1a1).leader should ===(Some(dc1a1.uniqueAddress)) + state(g, dc1a1).convergence(Set.empty) should ===(true) - g.dcLeader("dc2", dc2c1.uniqueAddress) should ===(Some(dc2c1.uniqueAddress)) - g.convergence("dc2", dc2c1.uniqueAddress, Set.empty) should ===(true) + state(g, dc2c1).leader should ===(Some(dc2c1.uniqueAddress)) + state(g, dc2c1).convergence(Set.empty) should ===(true) } "reach convergence per data center even if members of another data center has not seen the gossip" in { @@ -233,12 +233,12 @@ class GossipSpec extends WordSpec with Matchers { // dc2d1 has not seen the gossip // so dc1 can reach convergence - g.dcLeader("dc1", dc1a1.uniqueAddress) should ===(Some(dc1a1.uniqueAddress)) - g.convergence("dc1", dc1a1.uniqueAddress, Set.empty) should ===(true) + state(g, dc1a1).leader should ===(Some(dc1a1.uniqueAddress)) + state(g, dc1a1).convergence(Set.empty) should ===(true) // but dc2 cannot - g.dcLeader("dc2", dc2c1.uniqueAddress) should ===(Some(dc2c1.uniqueAddress)) - g.convergence("dc2", dc2c1.uniqueAddress, Set.empty) should ===(false) + state(g, dc2c1).leader should ===(Some(dc2c1.uniqueAddress)) + state(g, dc2c1).convergence(Set.empty) should ===(false) } "reach convergence per data center even if another data center contains unreachable" in { @@ -251,12 +251,12 @@ class GossipSpec extends WordSpec with Matchers { .seen(dc2d1.uniqueAddress) // this data center doesn't care about dc2 having reachability problems and can reach convergence - g.dcLeader("dc1", dc1a1.uniqueAddress) should ===(Some(dc1a1.uniqueAddress)) - g.convergence("dc1", dc1a1.uniqueAddress, Set.empty) should ===(true) + state(g, dc1a1).leader should ===(Some(dc1a1.uniqueAddress)) + state(g, dc1a1).convergence(Set.empty) should ===(true) // this data center is cannot reach convergence because of unreachability within the data center - g.dcLeader("dc2", dc2c1.uniqueAddress) should ===(Some(dc2c1.uniqueAddress)) - g.convergence("dc2", dc2c1.uniqueAddress, Set.empty) should ===(false) + state(g, dc2c1).leader should ===(Some(dc2c1.uniqueAddress)) + state(g, dc2c1).convergence(Set.empty) should ===(false) } "reach convergence per data center even if there is unreachable nodes in another data center" in { @@ -271,11 +271,11 @@ class GossipSpec extends WordSpec with Matchers { .seen(dc2d1.uniqueAddress) // neither data center is affected by the inter data center unreachability as far as convergence goes - g.dcLeader("dc1", dc1a1.uniqueAddress) should ===(Some(dc1a1.uniqueAddress)) - g.convergence("dc1", dc1a1.uniqueAddress, Set.empty) should ===(true) + state(g, dc1a1).leader should ===(Some(dc1a1.uniqueAddress)) + state(g, dc1a1).convergence(Set.empty) should ===(true) - g.dcLeader("dc2", dc2c1.uniqueAddress) should ===(Some(dc2c1.uniqueAddress)) - g.convergence("dc2", dc2c1.uniqueAddress, Set.empty) should ===(true) + state(g, dc2c1).leader should ===(Some(dc2c1.uniqueAddress)) + state(g, dc2c1).convergence(Set.empty) should ===(true) } "ignore cross data center unreachability when determining inside of data center reachability" in { @@ -291,10 +291,10 @@ class GossipSpec extends WordSpec with Matchers { g.isReachable(dc2c1.uniqueAddress, dc2d1.uniqueAddress) should ===(true) g.isReachable(dc2d1.uniqueAddress, dc2c1.uniqueAddress) should ===(true) - g.isReachableExcludingDownedObservers(dc1a1.dataCenter, dc1b1.uniqueAddress) should ===(true) - g.isReachableExcludingDownedObservers(dc1b1.dataCenter, dc1a1.uniqueAddress) should ===(true) - g.isReachableExcludingDownedObservers(dc2c1.dataCenter, dc2d1.uniqueAddress) should ===(true) - g.isReachableExcludingDownedObservers(dc2d1.dataCenter, dc2c1.uniqueAddress) should ===(true) + state(g, dc1a1).isReachableExcludingDownedObservers(dc1b1.uniqueAddress) should ===(true) + state(g, dc1b1).isReachableExcludingDownedObservers(dc1a1.uniqueAddress) should ===(true) + state(g, dc2c1).isReachableExcludingDownedObservers(dc2d1.uniqueAddress) should ===(true) + state(g, dc2d1).isReachableExcludingDownedObservers(dc2c1.uniqueAddress) should ===(true) // between data centers it matters though g.isReachable(dc1a1.uniqueAddress, dc2c1.uniqueAddress) should ===(false) @@ -304,22 +304,22 @@ class GossipSpec extends WordSpec with Matchers { g.isReachable(dc2d1.uniqueAddress, dc1a1.uniqueAddress) should ===(true) // this one looks at all unreachable-entries for the to-address - g.isReachableExcludingDownedObservers(dc1a1.dataCenter, dc2c1.uniqueAddress) should ===(false) - g.isReachableExcludingDownedObservers(dc1b1.dataCenter, dc2c1.uniqueAddress) should ===(false) - g.isReachableExcludingDownedObservers(dc2c1.dataCenter, dc1a1.uniqueAddress) should ===(false) - g.isReachableExcludingDownedObservers(dc2d1.dataCenter, dc1a1.uniqueAddress) should ===(false) + state(g, dc1a1).isReachableExcludingDownedObservers(dc2c1.uniqueAddress) should ===(false) + state(g, dc1b1).isReachableExcludingDownedObservers(dc2c1.uniqueAddress) should ===(false) + state(g, dc2c1).isReachableExcludingDownedObservers(dc1a1.uniqueAddress) should ===(false) + state(g, dc2d1).isReachableExcludingDownedObservers(dc1a1.uniqueAddress) should ===(false) // between the two other nodes there is no unreachability g.isReachable(dc1b1.uniqueAddress, dc2d1.uniqueAddress) should ===(true) g.isReachable(dc2d1.uniqueAddress, dc1b1.uniqueAddress) should ===(true) - g.isReachableExcludingDownedObservers(dc1b1.dataCenter, dc2d1.uniqueAddress) should ===(true) - g.isReachableExcludingDownedObservers(dc2d1.dataCenter, dc1b1.uniqueAddress) should ===(true) + state(g, dc1b1).isReachableExcludingDownedObservers(dc2d1.uniqueAddress) should ===(true) + state(g, dc2d1).isReachableExcludingDownedObservers(dc1b1.uniqueAddress) should ===(true) } "not returning a downed data center leader" in { val g = Gossip(members = SortedSet(dc1a1.copy(Down), dc1b1)) - g.leaderOf("dc1", g.members, dc1b1.uniqueAddress) should ===(Some(dc1b1.uniqueAddress)) + state(g, dc1b1).leaderOf(g.members) should ===(Some(dc1b1.uniqueAddress)) } "ignore cross data center unreachability when determining data center leader" in { @@ -329,15 +329,11 @@ class GossipSpec extends WordSpec with Matchers { val g = Gossip(members = SortedSet(dc1a1, dc1b1, dc2c1, dc2d1), overview = GossipOverview(reachability = r1)) - g.leaderOf("dc1", g.members, dc1a1.uniqueAddress) should ===(Some(dc1a1.uniqueAddress)) - g.leaderOf("dc1", g.members, dc1b1.uniqueAddress) should ===(Some(dc1a1.uniqueAddress)) - g.leaderOf("dc1", g.members, dc2c1.uniqueAddress) should ===(Some(dc1a1.uniqueAddress)) - g.leaderOf("dc1", g.members, dc2d1.uniqueAddress) should ===(Some(dc1a1.uniqueAddress)) + state(g, dc1a1).leaderOf(g.members) should ===(Some(dc1a1.uniqueAddress)) + state(g, dc1b1).leaderOf(g.members) should ===(Some(dc1a1.uniqueAddress)) - g.leaderOf("dc2", g.members, dc1a1.uniqueAddress) should ===(Some(dc2c1.uniqueAddress)) - g.leaderOf("dc2", g.members, dc1b1.uniqueAddress) should ===(Some(dc2c1.uniqueAddress)) - g.leaderOf("dc2", g.members, dc2c1.uniqueAddress) should ===(Some(dc2c1.uniqueAddress)) - g.leaderOf("dc2", g.members, dc2d1.uniqueAddress) should ===(Some(dc2c1.uniqueAddress)) + state(g, dc2c1).leaderOf(g.members) should ===(Some(dc2c1.uniqueAddress)) + state(g, dc2d1).leaderOf(g.members) should ===(Some(dc2c1.uniqueAddress)) } // TODO test coverage for when leaderOf returns None - I have not been able to figure it out diff --git a/project/MiMa.scala b/project/MiMa.scala index 73bf9c5897..92e229c589 100644 --- a/project/MiMa.scala +++ b/project/MiMa.scala @@ -1221,14 +1221,6 @@ object MiMa extends AutoPlugin { // #22881 Make sure connections are aborted correctly on Windows ProblemFilters.exclude[ReversedMissingMethodProblem]("akka.io.ChannelRegistration.cancel"), - // #23231 multi-DC Sharding - ProblemFilters.exclude[IncompatibleResultTypeProblem]("akka.cluster.ddata.Replicator.leader"), - ProblemFilters.exclude[DirectMissingMethodProblem]("akka.cluster.ddata.Replicator.receiveLeaderChanged"), - ProblemFilters.exclude[IncompatibleMethTypeProblem]("akka.cluster.ddata.Replicator.leader_="), - FilterAnyProblemStartingWith("akka.cluster.sharding.ClusterShardingGuardian"), - ProblemFilters.exclude[DirectMissingMethodProblem]("akka.cluster.sharding.ShardRegion.proxyProps"), - ProblemFilters.exclude[DirectMissingMethodProblem]("akka.cluster.sharding.ShardRegion.this"), - // #23144 recoverWithRetries cleanup ProblemFilters.exclude[DirectMissingMethodProblem]("akka.stream.impl.fusing.RecoverWith.InfiniteRetries"), @@ -1237,23 +1229,29 @@ object MiMa extends AutoPlugin { // #23023 added a new overload with implementation to trait, so old transport implementations compiled against // older versions will be missing the method. We accept that incompatibility for now. - ProblemFilters.exclude[ReversedMissingMethodProblem]("akka.remote.transport.AssociationHandle.disassociate"), - + ProblemFilters.exclude[ReversedMissingMethodProblem]("akka.remote.transport.AssociationHandle.disassociate") + ), + "2.5.3" -> Seq( + // #23231 multi-DC Sharding + ProblemFilters.exclude[IncompatibleResultTypeProblem]("akka.cluster.ddata.Replicator.leader"), + ProblemFilters.exclude[DirectMissingMethodProblem]("akka.cluster.ddata.Replicator.receiveLeaderChanged"), + ProblemFilters.exclude[IncompatibleMethTypeProblem]("akka.cluster.ddata.Replicator.leader_="), + FilterAnyProblemStartingWith("akka.cluster.sharding.ClusterShardingGuardian"), + ProblemFilters.exclude[DirectMissingMethodProblem]("akka.cluster.sharding.ShardRegion.proxyProps"), + ProblemFilters.exclude[DirectMissingMethodProblem]("akka.cluster.sharding.ShardRegion.this"), + // #23228 single leader per cluster data center - ProblemFilters.exclude[DirectMissingMethodProblem]("akka.cluster.Gossip.apply"), - ProblemFilters.exclude[DirectMissingMethodProblem]("akka.cluster.Gossip.copy"), - ProblemFilters.exclude[DirectMissingMethodProblem]("akka.cluster.Gossip.this"), - ProblemFilters.exclude[DirectMissingMethodProblem]("akka.cluster.Gossip.convergence"), - ProblemFilters.exclude[DirectMissingMethodProblem]("akka.cluster.Gossip.isLeader"), - ProblemFilters.exclude[DirectMissingMethodProblem]("akka.cluster.Gossip.leader"), - ProblemFilters.exclude[DirectMissingMethodProblem]("akka.cluster.Gossip.leaderOf"), - ProblemFilters.exclude[DirectMissingMethodProblem]("akka.cluster.Gossip.roleLeader"), - ProblemFilters.exclude[DirectMissingMethodProblem]("akka.cluster.ClusterCoreDaemon.NumberOfGossipsBeforeShutdownWhenLeaderExits"), - ProblemFilters.exclude[DirectMissingMethodProblem]("akka.cluster.ClusterCoreDaemon.vclockName"), - ProblemFilters.exclude[DirectMissingMethodProblem]("akka.cluster.ClusterCoreDaemon.MaxGossipsBeforeShuttingDownMyself"), + FilterAnyProblemStartingWith("akka.cluster.Gossip"), + FilterAnyProblemStartingWith("akka.cluster.ClusterCoreDaemon"), + FilterAnyProblemStartingWith("akka.cluster.ClusterDomainEventPublisher"), + FilterAnyProblemStartingWith("akka.cluster.InternalClusterAction"), + ProblemFilters.exclude[DirectMissingMethodProblem]("akka.cluster.ClusterEvent.diffReachable"), ProblemFilters.exclude[DirectMissingMethodProblem]("akka.cluster.ClusterEvent.diffLeader"), ProblemFilters.exclude[DirectMissingMethodProblem]("akka.cluster.ClusterEvent.diffRolesLeader"), ProblemFilters.exclude[DirectMissingMethodProblem]("akka.cluster.ClusterEvent.diffSeen"), + ProblemFilters.exclude[IncompatibleMethTypeProblem]("akka.cluster.ClusterEvent.diffReachability"), + ProblemFilters.exclude[DirectMissingMethodProblem]("akka.cluster.ClusterEvent.diffUnreachable"), + ProblemFilters.exclude[IncompatibleMethTypeProblem]("akka.cluster.ClusterEvent.diffMemberEvents"), ProblemFilters.exclude[ReversedMissingMethodProblem]("akka.cluster.protobuf.msg.ClusterMessages#GossipOrBuilder.getTombstonesCount"), ProblemFilters.exclude[ReversedMissingMethodProblem]("akka.cluster.protobuf.msg.ClusterMessages#GossipOrBuilder.getTombstones"), ProblemFilters.exclude[ReversedMissingMethodProblem]("akka.cluster.protobuf.msg.ClusterMessages#GossipOrBuilder.getTombstonesList"), From ab3efff3bd5268e5a730008222405b895dd115e9 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Johan=20Andr=C3=A9n?= Date: Wed, 5 Jul 2017 11:08:55 +0200 Subject: [PATCH 12/34] MultiDcSplitBrainSpec fixed #23288 --- .../akka/cluster/MultiDcSplitBrainSpec.scala | 46 ++++++++++--------- .../akka/cluster/MultiNodeClusterSpec.scala | 4 +- 2 files changed, 28 insertions(+), 22 deletions(-) diff --git a/akka-cluster/src/multi-jvm/scala/akka/cluster/MultiDcSplitBrainSpec.scala b/akka-cluster/src/multi-jvm/scala/akka/cluster/MultiDcSplitBrainSpec.scala index 86b75dbe09..4b0700beb8 100644 --- a/akka-cluster/src/multi-jvm/scala/akka/cluster/MultiDcSplitBrainSpec.scala +++ b/akka-cluster/src/multi-jvm/scala/akka/cluster/MultiDcSplitBrainSpec.scala @@ -3,6 +3,7 @@ */ package akka.cluster +import akka.remote.testconductor.RoleName import akka.remote.testkit.{ MultiNodeConfig, MultiNodeSpec } import akka.remote.transport.ThrottlerTransportAdapter.Direction import com.typesafe.config.ConfigFactory @@ -15,28 +16,29 @@ object MultiDcSplitBrainMultiJvmSpec extends MultiNodeConfig { val third = role("third") val fourth = role("fourth") - commonConfig(MultiNodeClusterSpec.clusterConfig) + commonConfig(ConfigFactory.parseString( + """ + akka.loglevel = INFO + akka.cluster.run-coordinated-shutdown-when-down = off + """).withFallback(MultiNodeClusterSpec.clusterConfig)) nodeConfig(first, second)(ConfigFactory.parseString( """ akka.cluster.data-center = "dc1" - akka.loglevel = INFO """)) nodeConfig(third, fourth)(ConfigFactory.parseString( """ akka.cluster.data-center = "dc2" - akka.loglevel = INFO """)) testTransport(on = true) } -class MultiDcSplitBrainMultiJvmNode1 extends MultiDcSpec -class MultiDcSplitBrainMultiJvmNode2 extends MultiDcSpec -class MultiDcSplitBrainMultiJvmNode3 extends MultiDcSpec -class MultiDcSplitBrainMultiJvmNode4 extends MultiDcSpec -class MultiDcSplitBrainMultiJvmNode5 extends MultiDcSpec +class MultiDcSplitBrainMultiJvmNode1 extends MultiDcSplitBrainSpec +class MultiDcSplitBrainMultiJvmNode2 extends MultiDcSplitBrainSpec +class MultiDcSplitBrainMultiJvmNode3 extends MultiDcSplitBrainSpec +class MultiDcSplitBrainMultiJvmNode4 extends MultiDcSplitBrainSpec abstract class MultiDcSplitBrainSpec extends MultiNodeSpec(MultiDcSplitBrainMultiJvmSpec) @@ -47,7 +49,7 @@ abstract class MultiDcSplitBrainSpec val dc1 = List(first, second) val dc2 = List(third, fourth) - def splitDataCenters(): Unit = { + def splitDataCenters(dc1: Seq[RoleName], dc2: Seq[RoleName]): Unit = { runOn(first) { for { dc1Node ← dc1 @@ -58,15 +60,15 @@ abstract class MultiDcSplitBrainSpec } runOn(dc1: _*) { - awaitAssert(clusterView.unreachableMembers.map(_.address) should ===(dc2.map(address))) + awaitAssert(clusterView.unreachableMembers.map(_.address) should contain allElementsOf (dc2.map(address))) } runOn(dc2: _*) { - awaitAssert(clusterView.unreachableMembers.map(_.address) should ===(dc1.map(address))) + awaitAssert(clusterView.unreachableMembers.map(_.address) should contain allElementsOf (dc1.map(address))) } } - def unsplitDataCenters(): Unit = { + def unsplitDataCenters(dc1: Seq[RoleName], dc2: Seq[RoleName]): Unit = { runOn(first) { for { dc1Node ← dc1 @@ -76,7 +78,9 @@ abstract class MultiDcSplitBrainSpec } } - awaitAllReachable() + runOn(dc1 ++ dc2: _*) { + awaitAssert(clusterView.unreachableMembers.map(_.address) should be(empty)) + } } "A cluster with multiple data centers" must { @@ -86,7 +90,7 @@ abstract class MultiDcSplitBrainSpec "be able to have a data center member join while there is inter data center split" in within(20.seconds) { // introduce a split between data centers - splitDataCenters() + splitDataCenters(dc1 = List(first, second), dc2 = List(third)) enterBarrier("data-center-split-1") runOn(fourth) { @@ -99,36 +103,36 @@ abstract class MultiDcSplitBrainSpec runOn(third, fourth) { awaitAssert(clusterView.members.collect { case m if m.dataCenter == "dc2" && m.status == MemberStatus.Up ⇒ m.address - }) should ===(Set(address(third), address(fourth))) + } should ===(Set(address(third), address(fourth)))) } enterBarrier("dc2-join-completed") - unsplitDataCenters() + unsplitDataCenters(dc1 = List(first, second), dc2 = List(third)) enterBarrier("data-center-unsplit-1") runOn(dc1: _*) { awaitAssert(clusterView.members.collect { case m if m.dataCenter == "dc2" && m.status == MemberStatus.Up ⇒ m.address - }) should ===(Set(address(third), address(fourth))) + } should ===(Set(address(third), address(fourth)))) } enterBarrier("inter-data-center-split-1-done") } "be able to have data center member leave while there is inter data center split" in within(20.seconds) { - splitDataCenters() + splitDataCenters(dc1, dc2) enterBarrier("data-center-split-2") runOn(fourth) { - cluster.leave(third) + cluster.leave(fourth) } - runOn(third, fourth) { + runOn(third) { awaitAssert(clusterView.members.filter(_.address == address(fourth)) should ===(Set.empty)) } enterBarrier("node-4-left") - unsplitDataCenters() + unsplitDataCenters(dc1, List(third)) enterBarrier("data-center-unsplit-2") runOn(first, second) { diff --git a/akka-cluster/src/multi-jvm/scala/akka/cluster/MultiNodeClusterSpec.scala b/akka-cluster/src/multi-jvm/scala/akka/cluster/MultiNodeClusterSpec.scala index b249676e18..cb7f0e666b 100644 --- a/akka-cluster/src/multi-jvm/scala/akka/cluster/MultiNodeClusterSpec.scala +++ b/akka-cluster/src/multi-jvm/scala/akka/cluster/MultiNodeClusterSpec.scala @@ -305,7 +305,9 @@ trait MultiNodeClusterSpec extends Suite with STMultiNodeSpec with WatchedByCoro awaitAssert(clusterView.members.size should ===(numberOfMembers)) awaitAssert(clusterView.members.map(_.status) should ===(Set(MemberStatus.Up))) // clusterView.leader is updated by LeaderChanged, await that to be updated also - val expectedLeader = clusterView.members.headOption.map(_.address) + val expectedLeader = clusterView.members.collectFirst { + case m if m.dataCenter == cluster.settings.DataCenter ⇒ m.address + } awaitAssert(clusterView.leader should ===(expectedLeader)) } } From 0b1ce7223da19dac94b6f6676f5e4663dd2b208d Mon Sep 17 00:00:00 2001 From: Patrik Nordwall Date: Wed, 5 Jul 2017 14:52:42 +0200 Subject: [PATCH 13/34] fix usage of the the leader sorted set in Replicator * since the ordering can change based on the member's status it's not possible to use ordinary - for removal * similar issue at a few places where ageOrdering was used --- .../scala/akka/cluster/sharding/ShardRegion.scala | 14 ++++++++++++-- .../cluster/singleton/ClusterSingletonProxy.scala | 8 +++++--- .../main/scala/akka/cluster/ddata/Replicator.scala | 8 +++++--- 3 files changed, 22 insertions(+), 8 deletions(-) diff --git a/akka-cluster-sharding/src/main/scala/akka/cluster/sharding/ShardRegion.scala b/akka-cluster-sharding/src/main/scala/akka/cluster/sharding/ShardRegion.scala index 8de68d4cd4..bc3a42d1e8 100644 --- a/akka-cluster-sharding/src/main/scala/akka/cluster/sharding/ShardRegion.scala +++ b/akka-cluster-sharding/src/main/scala/akka/cluster/sharding/ShardRegion.scala @@ -479,13 +479,23 @@ private[akka] class ShardRegion( def receiveClusterEvent(evt: ClusterDomainEvent): Unit = evt match { case MemberUp(m) ⇒ if (matchingRole(m)) - changeMembers(membersByAge - m + m) // replace + changeMembers { + // replace, it's possible that the upNumber is changed + membersByAge = membersByAge.filterNot(_.uniqueAddress == m.uniqueAddress) + membersByAge += m + membersByAge + } case MemberRemoved(m, _) ⇒ if (m.uniqueAddress == cluster.selfUniqueAddress) context.stop(self) else if (matchingRole(m)) - changeMembers(membersByAge - m) + changeMembers { + // filter, it's possible that the upNumber is changed + membersByAge = membersByAge.filterNot(_.uniqueAddress == m.uniqueAddress) + membersByAge += m + membersByAge + } case _: MemberEvent ⇒ // these are expected, no need to warn about them diff --git a/akka-cluster-tools/src/main/scala/akka/cluster/singleton/ClusterSingletonProxy.scala b/akka-cluster-tools/src/main/scala/akka/cluster/singleton/ClusterSingletonProxy.scala index 0c017d31dd..57dd041218 100644 --- a/akka-cluster-tools/src/main/scala/akka/cluster/singleton/ClusterSingletonProxy.scala +++ b/akka-cluster-tools/src/main/scala/akka/cluster/singleton/ClusterSingletonProxy.scala @@ -222,7 +222,8 @@ final class ClusterSingletonProxy(singletonManagerPath: String, settings: Cluste def add(m: Member): Unit = { if (matchingRole(m)) trackChange { () ⇒ - membersByAge -= m // replace + // replace, it's possible that the upNumber is changed + membersByAge = membersByAge.filterNot(_.uniqueAddress == m.uniqueAddress) membersByAge += m } } @@ -233,8 +234,9 @@ final class ClusterSingletonProxy(singletonManagerPath: String, settings: Cluste */ def remove(m: Member): Unit = { if (matchingRole(m)) - trackChange { - () ⇒ membersByAge -= m + trackChange { () ⇒ + // filter, it's possible that the upNumber is changed + membersByAge = membersByAge.filterNot(_.uniqueAddress == m.uniqueAddress) } } diff --git a/akka-distributed-data/src/main/scala/akka/cluster/ddata/Replicator.scala b/akka-distributed-data/src/main/scala/akka/cluster/ddata/Replicator.scala index ca79c9be89..0c2291c936 100644 --- a/akka-distributed-data/src/main/scala/akka/cluster/ddata/Replicator.scala +++ b/akka-distributed-data/src/main/scala/akka/cluster/ddata/Replicator.scala @@ -1740,7 +1740,8 @@ final class Replicator(settings: ReplicatorSettings) extends Actor with ActorLog if (m.address == selfAddress) context stop self else if (matchingRole(m)) { - leader -= m + // filter, it's possible that the ordering is changed since it based on MemberStatus + leader = leader.filterNot(_.uniqueAddress == m.uniqueAddress) nodes -= m.address weaklyUpNodes -= m.address log.debug("adding removed node [{}] from MemberRemoved", m.uniqueAddress) @@ -1752,8 +1753,9 @@ final class Replicator(settings: ReplicatorSettings) extends Actor with ActorLog def receiveOtherMemberEvent(m: Member): Unit = if (matchingRole(m)) { - // update changed status - leader = (leader - m) + m + // replace, it's possible that the ordering is changed since it based on MemberStatus + leader = leader.filterNot(_.uniqueAddress == m.uniqueAddress) + leader += m } def receiveUnreachable(m: Member): Unit = From a7ed5ce6b03f4617e286a54c5f55c327434a9212 Mon Sep 17 00:00:00 2001 From: Patrik Nordwall Date: Thu, 6 Jul 2017 11:02:49 +0200 Subject: [PATCH 14/34] fix copy-pasta mistake in ShardRegion --- .../src/main/scala/akka/cluster/sharding/ShardRegion.scala | 1 - 1 file changed, 1 deletion(-) diff --git a/akka-cluster-sharding/src/main/scala/akka/cluster/sharding/ShardRegion.scala b/akka-cluster-sharding/src/main/scala/akka/cluster/sharding/ShardRegion.scala index bc3a42d1e8..380f6d8c56 100644 --- a/akka-cluster-sharding/src/main/scala/akka/cluster/sharding/ShardRegion.scala +++ b/akka-cluster-sharding/src/main/scala/akka/cluster/sharding/ShardRegion.scala @@ -493,7 +493,6 @@ private[akka] class ShardRegion( changeMembers { // filter, it's possible that the upNumber is changed membersByAge = membersByAge.filterNot(_.uniqueAddress == m.uniqueAddress) - membersByAge += m membersByAge } From b568975accc562413f621bcf956ca0b59c74dc11 Mon Sep 17 00:00:00 2001 From: Konrad `ktoso` Malawski Date: Fri, 7 Jul 2017 13:17:41 +0200 Subject: [PATCH 15/34] =clu #23229 multi-dc heartbeating, only N nodes perform monitoring --- .../src/main/resources/reference.conf | 35 ++ .../src/main/scala/akka/cluster/Cluster.scala | 17 +- .../scala/akka/cluster/ClusterDaemon.scala | 12 +- .../scala/akka/cluster/ClusterHeartbeat.scala | 54 ++- .../scala/akka/cluster/ClusterSettings.scala | 18 +- .../cluster/CrossDcClusterHeartbeat.scala | 318 ++++++++++++++++++ .../src/main/scala/akka/cluster/Member.scala | 2 +- .../cluster/MultiDcSunnyWeatherSpec.scala | 145 ++++++++ 8 files changed, 574 insertions(+), 27 deletions(-) create mode 100644 akka-cluster/src/main/scala/akka/cluster/CrossDcClusterHeartbeat.scala create mode 100644 akka-cluster/src/multi-jvm/scala/akka/cluster/MultiDcSunnyWeatherSpec.scala diff --git a/akka-cluster/src/main/resources/reference.conf b/akka-cluster/src/main/resources/reference.conf index 2bb2bf6766..dc2847b258 100644 --- a/akka-cluster/src/main/resources/reference.conf +++ b/akka-cluster/src/main/resources/reference.conf @@ -205,6 +205,41 @@ akka { } + # Configures mult-dc specific heartbeating and other mechanisms, + # many of them have a direct counter-part in "one datacenter mode", + # in which case these settings would not be used at all - they only apply, + # if your cluster nodes are configured with at-least 2 different `akka.cluster.data-center` values. + multi-data-center { + + failure-detector { + # FQCN of the failure detector implementation. + # It must implement akka.remote.FailureDetector and have + # a public constructor with a com.typesafe.config.Config and + # akka.actor.EventStream parameter. + implementation-class = "akka.remote.DeadlineFailureDetector" + + # Number of potentially lost/delayed heartbeats that will be + # accepted before considering it to be an anomaly. + # This margin is important to be able to survive sudden, occasional, + # pauses in heartbeat arrivals, due to for example garbage collect or + # network drop. + acceptable-heartbeat-pause = 10 s + + # How often keep-alive heartbeat messages should be sent to each connection. + heartbeat-interval = 3 s + + # After the heartbeat request has been sent the first failure detection + # will start after this period, even though no heartbeat message has + # been received. + expected-response-after = 1 s + + # Maximum number of oldest members in a data center that will monitor other (oldest nodes in other) data centers. + # This is done to lessen the cross data center communication, as only those top-n-oldest nodes + # need to maintain connections to the other data-centers. + nr-of-monitoring-members = 5 + } + } + # If the tick-duration of the default scheduler is longer than the # tick-duration configured here a dedicated scheduler will be used for # periodic tasks of the cluster, otherwise the default scheduler is used. diff --git a/akka-cluster/src/main/scala/akka/cluster/Cluster.scala b/akka-cluster/src/main/scala/akka/cluster/Cluster.scala index 3576cc6f0f..d4b7ddcb4d 100644 --- a/akka-cluster/src/main/scala/akka/cluster/Cluster.scala +++ b/akka-cluster/src/main/scala/akka/cluster/Cluster.scala @@ -10,6 +10,7 @@ import java.util.concurrent.atomic.AtomicBoolean import akka.ConfigurationException import akka.actor._ +import akka.cluster.ClusterSettings.DataCenter import akka.dispatch.MonitorableThreadFactory import akka.event.{ Logging, LoggingAdapter } import akka.japi.Util @@ -77,6 +78,9 @@ class Cluster(val system: ExtendedActorSystem) extends Extension { */ def selfAddress: Address = selfUniqueAddress.address + /** Data center to which this node belongs to (defaults to "default" if not configured explicitly) */ + def selfDataCenter: DataCenter = settings.DataCenter + /** * roles that this member has */ @@ -96,10 +100,17 @@ class Cluster(val system: ExtendedActorSystem) extends Extension { logInfo("Starting up...") val failureDetector: FailureDetectorRegistry[Address] = { - def createFailureDetector(): FailureDetector = + val createFailureDetector = () ⇒ FailureDetectorLoader.load(settings.FailureDetectorImplementationClass, settings.FailureDetectorConfig, system) - new DefaultFailureDetectorRegistry(() ⇒ createFailureDetector()) + new DefaultFailureDetectorRegistry(createFailureDetector) + } + + val crossDcFailureDetector: FailureDetectorRegistry[Address] = { + val createFailureDetector = () ⇒ + FailureDetectorLoader.load(settings.CrossDcFailureDetectorSettings.ImplementationClass, settings.CrossDcFailureDetectorSettings.config, system) + + new DefaultFailureDetectorRegistry(createFailureDetector) } // needs to be lazy to allow downing provider impls to access Cluster (if not we get deadlock) @@ -411,7 +422,7 @@ class Cluster(val system: ExtendedActorSystem) extends Extension { private def closeScheduler(): Unit = scheduler match { case x: Closeable ⇒ x.close() - case _ ⇒ + case _ ⇒ // ignore, this is fine } /** diff --git a/akka-cluster/src/main/scala/akka/cluster/ClusterDaemon.scala b/akka-cluster/src/main/scala/akka/cluster/ClusterDaemon.scala index 5f8d539235..80391ec612 100644 --- a/akka-cluster/src/main/scala/akka/cluster/ClusterDaemon.scala +++ b/akka-cluster/src/main/scala/akka/cluster/ClusterDaemon.scala @@ -296,6 +296,8 @@ private[cluster] class ClusterCoreDaemon(publisher: ActorRef) extends Actor with import cluster.settings._ import cluster.InfoLogger._ + val selfDc = cluster.selfDataCenter + protected def selfUniqueAddress = cluster.selfUniqueAddress val vclockNode = VectorClock.Node(vclockName(selfUniqueAddress)) @@ -334,8 +336,6 @@ private[cluster] class ClusterCoreDaemon(publisher: ActorRef) extends Actor with } var exitingConfirmed = Set.empty[UniqueAddress] - def selfDc = cluster.settings.DataCenter - /** * Looks up and returns the remote cluster command connection for the specific address. */ @@ -431,8 +431,12 @@ private[cluster] class ClusterCoreDaemon(publisher: ActorRef) extends Actor with def becomeInitialized(): Unit = { // start heartbeatSender here, and not in constructor to make sure that // heartbeating doesn't start before Welcome is received - context.actorOf(Props[ClusterHeartbeatSender]. - withDispatcher(UseDispatcher), name = "heartbeatSender") + val internalHeartbeatSenderProps = Props(new ClusterHeartbeatSender()).withDispatcher(UseDispatcher) + context.actorOf(internalHeartbeatSenderProps, name = "heartbeatSender") + + val externalHeartbeatProps = Props(new CrossDcHeartbeatSender()).withDispatcher(UseDispatcher) + context.actorOf(externalHeartbeatProps, name = "crossDcHeartbeatSender") + // make sure that join process is stopped stopSeedNodeProcess() context.become(initialized) diff --git a/akka-cluster/src/main/scala/akka/cluster/ClusterHeartbeat.scala b/akka-cluster/src/main/scala/akka/cluster/ClusterHeartbeat.scala index bc3ac1bffc..a6a454a7f5 100644 --- a/akka-cluster/src/main/scala/akka/cluster/ClusterHeartbeat.scala +++ b/akka-cluster/src/main/scala/akka/cluster/ClusterHeartbeat.scala @@ -5,17 +5,18 @@ package akka.cluster import scala.annotation.tailrec import scala.collection.immutable -import akka.actor.{ ActorLogging, ActorSelection, Address, Actor, RootActorPath } +import akka.actor.{ Actor, ActorLogging, ActorPath, ActorRef, ActorSelection, Address, DeadLetterSuppression, RootActorPath } import akka.cluster.ClusterEvent._ import akka.remote.FailureDetectorRegistry import akka.remote.HeartbeatMessage -import akka.actor.DeadLetterSuppression +import akka.annotation.InternalApi /** * INTERNAL API. * * Receives Heartbeat messages and replies. */ +@InternalApi private[cluster] final class ClusterHeartbeatReceiver extends Actor with ActorLogging { import ClusterHeartbeatSender._ @@ -29,6 +30,15 @@ private[cluster] final class ClusterHeartbeatReceiver extends Actor with ActorLo } +/** INTERNAL API: Utilities to obtain ClusterHeartbeatReceiver paths */ +@InternalApi +private[cluster] object ClusterHeartbeatReceiver { + + def name: String = "heartbeatReceiver" + def path(address: Address): ActorPath = + RootActorPath(address) / "system" / "cluster" / name +} + /** * INTERNAL API */ @@ -65,12 +75,14 @@ private[cluster] final class ClusterHeartbeatSender extends Actor with ActorLogg import cluster.settings._ import context.dispatcher - // the failureDetector is only updated by this actor, but read from other places - val failureDetector = Cluster(context.system).failureDetector + val filterInternalClusterMembers: Member ⇒ Boolean = + _.dataCenter == cluster.selfDataCenter val selfHeartbeat = Heartbeat(selfAddress) - var state = ClusterHeartbeatSenderState( + val failureDetector = cluster.failureDetector + + var state: ClusterHeartbeatSenderState = ClusterHeartbeatSenderState( ring = HeartbeatNodeRing(selfUniqueAddress, Set(selfUniqueAddress), Set.empty, MonitoredByNrOfMembers), oldReceiversNowUnreachable = Set.empty[UniqueAddress], failureDetector) @@ -94,7 +106,7 @@ private[cluster] final class ClusterHeartbeatSender extends Actor with ActorLogg * Looks up and returns the remote cluster heartbeat connection for the specific address. */ def heartbeatReceiver(address: Address): ActorSelection = - context.actorSelection(RootActorPath(address) / "system" / "cluster" / "heartbeatReceiver") + context.actorSelection(ClusterHeartbeatReceiver.path(address)) def receive = initializing @@ -116,22 +128,28 @@ private[cluster] final class ClusterHeartbeatSender extends Actor with ActorLogg } def init(snapshot: CurrentClusterState): Unit = { - val nodes: Set[UniqueAddress] = snapshot.members.map(_.uniqueAddress) - val unreachable: Set[UniqueAddress] = snapshot.unreachable.map(_.uniqueAddress) + val nodes = snapshot.members.collect { case m if filterInternalClusterMembers(m) ⇒ m.uniqueAddress } + val unreachable = snapshot.unreachable.collect { case m if filterInternalClusterMembers(m) ⇒ m.uniqueAddress } state = state.init(nodes, unreachable) } def addMember(m: Member): Unit = - if (m.uniqueAddress != selfUniqueAddress && !state.contains(m.uniqueAddress)) + if (m.uniqueAddress != selfUniqueAddress && // is not self + !state.contains(m.uniqueAddress) && // not already added + filterInternalClusterMembers(m) // should be watching members from this DC (internal / external) + ) { state = state.addMember(m.uniqueAddress) + } def removeMember(m: Member): Unit = - if (m.uniqueAddress == cluster.selfUniqueAddress) { - // This cluster node will be shutdown, but stop this actor immediately - // to avoid further updates - context stop self - } else { - state = state.removeMember(m.uniqueAddress) + if (filterInternalClusterMembers(m)) { // we only ever deal with internal cluster members here + if (m.uniqueAddress == cluster.selfUniqueAddress) { + // This cluster node will be shutdown, but stop this actor immediately + // to avoid further updates + context stop self + } else { + state = state.removeMember(m.uniqueAddress) + } } def unreachableMember(m: Member): Unit = @@ -142,7 +160,7 @@ private[cluster] final class ClusterHeartbeatSender extends Actor with ActorLogg def heartbeat(): Unit = { state.activeReceivers foreach { to ⇒ - if (cluster.failureDetector.isMonitoring(to.address)) { + if (failureDetector.isMonitoring(to.address)) { if (verboseHeartbeat) log.debug("Cluster Node [{}] - Heartbeat to [{}]", selfAddress, to.address) } else { if (verboseHeartbeat) log.debug("Cluster Node [{}] - First Heartbeat to [{}]", selfAddress, to.address) @@ -152,7 +170,6 @@ private[cluster] final class ClusterHeartbeatSender extends Actor with ActorLogg } heartbeatReceiver(to.address) ! selfHeartbeat } - } def heartbeatRsp(from: UniqueAddress): Unit = { @@ -173,6 +190,7 @@ private[cluster] final class ClusterHeartbeatSender extends Actor with ActorLogg * State of [[ClusterHeartbeatSender]]. Encapsulated to facilitate unit testing. * It is immutable, but it updates the failureDetector. */ +@InternalApi private[cluster] final case class ClusterHeartbeatSenderState( ring: HeartbeatNodeRing, oldReceiversNowUnreachable: Set[UniqueAddress], @@ -262,7 +280,7 @@ private[cluster] final case class HeartbeatNodeRing( /** * Receivers for `selfAddress`. Cached for subsequent access. */ - lazy val myReceivers: immutable.Set[UniqueAddress] = receivers(selfAddress) + lazy val myReceivers: Set[UniqueAddress] = receivers(selfAddress) private val useAllAsReceivers = monitoredByNrOfMembers >= (nodeRing.size - 1) diff --git a/akka-cluster/src/main/scala/akka/cluster/ClusterSettings.scala b/akka-cluster/src/main/scala/akka/cluster/ClusterSettings.scala index b7106526ca..df37649d2b 100644 --- a/akka-cluster/src/main/scala/akka/cluster/ClusterSettings.scala +++ b/akka-cluster/src/main/scala/akka/cluster/ClusterSettings.scala @@ -10,7 +10,7 @@ import com.typesafe.config.ConfigObject import scala.concurrent.duration.Duration import akka.actor.Address import akka.actor.AddressFromURIString -import akka.annotation.InternalApi +import akka.annotation.{ DoNotInherit, InternalApi } import akka.dispatch.Dispatchers import akka.util.Helpers.{ ConfigOps, Requiring, toRootLowerCase } @@ -117,6 +117,21 @@ final class ClusterSettings(val config: Config, val systemName: String) { val AllowWeaklyUpMembers = cc.getBoolean("allow-weakly-up-members") val DataCenter: DataCenter = cc.getString("data-center") + + final class CrossDcFailureDetectorSettings(val config: Config) { + val ImplementationClass: String = config.getString("implementation-class") + val HeartbeatInterval: FiniteDuration = { + config.getMillisDuration("heartbeat-interval") + } requiring (_ > Duration.Zero, "failure-detector.heartbeat-interval must be > 0") + val HeartbeatExpectedResponseAfter: FiniteDuration = { + config.getMillisDuration("expected-response-after") + } requiring (_ > Duration.Zero, "failure-detector.expected-response-after > 0") + val NrOfMonitoringActors: Int = { + config.getInt("nr-of-monitoring-members") + } requiring (_ > 0, "failure-detector.nr-of-monitoring-members must be > 0") + } + val CrossDcFailureDetectorSettings = new CrossDcFailureDetectorSettings(cc.getConfig("multi-data-center.failure-detector")) + val Roles: Set[String] = { val configuredRoles = (immutableSeq(cc.getStringList("roles")).toSet) requiring ( _.forall(!_.startsWith(DcRolePrefix)), @@ -124,6 +139,7 @@ final class ClusterSettings(val config: Config, val systemName: String) { configuredRoles + s"$DcRolePrefix$DataCenter" } + val MinNrOfMembers: Int = { cc.getInt("min-nr-of-members") } requiring (_ > 0, "min-nr-of-members must be > 0") diff --git a/akka-cluster/src/main/scala/akka/cluster/CrossDcClusterHeartbeat.scala b/akka-cluster/src/main/scala/akka/cluster/CrossDcClusterHeartbeat.scala new file mode 100644 index 0000000000..b022051bb6 --- /dev/null +++ b/akka-cluster/src/main/scala/akka/cluster/CrossDcClusterHeartbeat.scala @@ -0,0 +1,318 @@ +/* + * Copyright (C) 2017 Lightbend Inc. + */ + +package akka.cluster + +import akka.actor.{ Actor, ActorLogging, ActorSelection, Address, NoSerializationVerificationNeeded, RootActorPath } +import akka.annotation.InternalApi +import akka.cluster.ClusterEvent._ +import akka.cluster.ClusterSettings.DataCenter +import akka.event.Logging +import akka.remote.FailureDetectorRegistry +import akka.util.ConstantFun + +import scala.collection.{ SortedSet, immutable, breakOut } + +/** + * INTERNAL API + * + * This actor is will be started on all nodes participating in a cluster, + * however unlike the within-dc heartbeat sender ([[ClusterHeartbeatSender]]), + * it will only actively work on `n` "oldest" nodes of a given data center. + * + * It will monitor it's oldest counterparts in other data centers. + * For example, a DC configured to have (up to) 4 monitoring actors, + * will have 4 such active at any point in time, and those will monitor + * the (at most) 4 oldest nodes of each data center. + * + * This monitoring mode is both simple and predictable, and also uses the assumption that + * "nodes which stay around for a long time, become old", and those rarely change. In a way, + * they are the "core" of a cluster, while other nodes may be very dynamically changing worked + * nodes which aggresively come and go as the traffic in the service changes. + */ +@InternalApi +private[cluster] final class CrossDcHeartbeatSender extends Actor with ActorLogging { + import CrossDcHeartbeatSender._ + + val cluster = Cluster(context.system) + val verboseHeartbeat = cluster.settings.Debug.VerboseHeartbeatLogging + import cluster.settings._ + import cluster.{ scheduler, selfAddress, selfDataCenter, selfUniqueAddress } + import context.dispatcher + + // For inspecting if in active state; allows avoiding "becoming active" when already active + var activelyMonitoring = false + + val isExternalClusterMember: Member ⇒ Boolean = + member ⇒ member.dataCenter != cluster.selfDataCenter + + val crossDcSettings: cluster.settings.CrossDcFailureDetectorSettings = cluster.settings.CrossDcFailureDetectorSettings + val crossDcFailureDetector = cluster.crossDcFailureDetector + + val selfHeartbeat = ClusterHeartbeatSender.Heartbeat(selfAddress) + + var dataCentersState: CrossDcHeartbeatingState = CrossDcHeartbeatingState.init( + crossDcFailureDetector, + crossDcSettings.NrOfMonitoringActors, + SortedSet.empty + ) + + // start periodic heartbeat to other nodes in cluster + val heartbeatTask = scheduler.schedule( + PeriodicTasksInitialDelay max HeartbeatInterval, + HeartbeatInterval, self, ClusterHeartbeatSender.HeartbeatTick) + + override def preStart(): Unit = { + cluster.subscribe(self, classOf[MemberEvent]) + if (verboseHeartbeat) log.debug("Initialized cross-dc heartbeat sender as DORMANT in DC: [{}]", selfDataCenter) + } + + override def postStop(): Unit = { + dataCentersState.activeReceivers.foreach(a ⇒ crossDcFailureDetector.remove(a.address)) + heartbeatTask.cancel() + cluster.unsubscribe(self) + } + + /** + * Looks up and returns the remote cluster heartbeat connection for the specific address. + */ + def heartbeatReceiver(address: Address): ActorSelection = + context.actorSelection(ClusterHeartbeatReceiver.path(address)) + + def receive: Actor.Receive = + dormant orElse introspecting + + /** + * In this state no cross-datacenter heartbeats are sent by this actor. + * This may be because one of those reasons: + * - no nodes in other DCs were detected yet + * - nodes in other DCs are present, but this node is not tht n-th oldest in this DC (see + * `number-of-cross-datacenter-monitoring-actors`), so it does not have to monitor that other data centers + * + * In this state it will however listen to cluster events to eventually take over monitoring other DCs + * in case it becomes "old enough". + */ + def dormant: Actor.Receive = { + case s: CurrentClusterState ⇒ init(s) + case MemberRemoved(m, _) ⇒ removeMember(m) + case evt: MemberEvent ⇒ addMember(evt.member) + case ClusterHeartbeatSender.HeartbeatTick ⇒ // ignore... + } + + def active: Actor.Receive = { + case ClusterHeartbeatSender.HeartbeatTick ⇒ heartbeat() + case ClusterHeartbeatSender.HeartbeatRsp(from) ⇒ heartbeatRsp(from) + case MemberRemoved(m, _) ⇒ removeMember(m) + case evt: MemberEvent ⇒ addMember(evt.member) + case ClusterHeartbeatSender.ExpectedFirstHeartbeat(from) ⇒ triggerFirstHeartbeat(from) + } + + def introspecting: Actor.Receive = { + case ReportStatus() ⇒ + sender() ! { + if (activelyMonitoring) CrossDcHeartbeatSender.MonitoringActive(dataCentersState) + else CrossDcHeartbeatSender.MonitoringDormant() + } + } + + def init(snapshot: CurrentClusterState): Unit = { + // val unreachable = snapshot.unreachable.collect({ case m if isExternalClusterMember(m) => m.uniqueAddress }) + // nr of monitored nodes is the same as the number of monitoring nodes (`n` oldest in one DC watch `n` oldest in other) + val nodes = snapshot.members + val nrOfMonitoredNodes = crossDcSettings.NrOfMonitoringActors + dataCentersState = CrossDcHeartbeatingState.init(crossDcFailureDetector, nrOfMonitoredNodes, nodes) + } + + def addMember(m: Member): Unit = + if (m.status != MemberStatus.Joining && m.status != MemberStatus.WeaklyUp) { + // since we only monitor nodes in Up or later states, due to the n-th oldest requirement + dataCentersState = dataCentersState.addMember(m) + if (verboseHeartbeat && m.dataCenter != selfDataCenter) + log.debug("Register member {} for cross DC heartbeat (will only heartbeat if oldest)", m) + + becomeActiveIfResponsibleForHeartbeat() + } + + def removeMember(m: Member): Unit = + if (m.uniqueAddress == cluster.selfUniqueAddress) { + // This cluster node will be shutdown, but stop this actor immediately to avoid further updates + context stop self + } else { + dataCentersState = dataCentersState.removeMember(m) + becomeActiveIfResponsibleForHeartbeat() + } + + def heartbeat(): Unit = { + dataCentersState.activeReceivers foreach { to ⇒ + if (crossDcFailureDetector.isMonitoring(to.address)) { + if (verboseHeartbeat) log.debug("Cluster Node [{}][{}] - (Cross) Heartbeat to [{}]", selfDataCenter, selfAddress, to.address) + } else { + if (verboseHeartbeat) log.debug("Cluster Node [{}][{}] - First (Cross) Heartbeat to [{}]", selfDataCenter, selfAddress, to.address) + // schedule the expected first heartbeat for later, which will give the + // other side a chance to reply, and also trigger some resends if needed + scheduler.scheduleOnce(HeartbeatExpectedResponseAfter, self, ClusterHeartbeatSender.ExpectedFirstHeartbeat(to)) + } + heartbeatReceiver(to.address) ! selfHeartbeat + } + } + + def heartbeatRsp(from: UniqueAddress): Unit = { + if (verboseHeartbeat) log.debug("Cluster Node [{}][{}] - (Cross) Heartbeat response from [{}]", selfDataCenter, selfAddress, from.address) + dataCentersState = dataCentersState.heartbeatRsp(from) + } + + def triggerFirstHeartbeat(from: UniqueAddress): Unit = + if (dataCentersState.activeReceivers.contains(from) && !crossDcFailureDetector.isMonitoring(from.address)) { + if (verboseHeartbeat) log.debug("Cluster Node [{}][{}] - Trigger extra expected (cross) heartbeat from [{}]", selfAddress, from.address) + crossDcFailureDetector.heartbeat(from.address) + } + + private def selfIsResponsibleForCrossDcHeartbeat(): Boolean = { + val activeDcs: Int = dataCentersState.dataCenters.size + if (activeDcs > 1) dataCentersState.shouldActivelyMonitorNodes(selfDataCenter, selfUniqueAddress) + else false + } + + /** Idempotent, become active if this node is n-th oldest and should monitor other nodes */ + private def becomeActiveIfResponsibleForHeartbeat(): Unit = { + if (!activelyMonitoring && selfIsResponsibleForCrossDcHeartbeat()) { + if (verboseHeartbeat) log.debug("Becoming ACTIVE (for DC: {}), monitoring other DCs oldest nodes", selfDataCenter) + activelyMonitoring = true + + context.become(active orElse introspecting) + } else if (!activelyMonitoring) + if (verboseHeartbeat) log.info("Remaining DORMANT; others in {} handle heartbeating other DCs", selfDataCenter) + } + +} + +/** INTERNAL API */ +@InternalApi +private[akka] object CrossDcHeartbeatSender { + + // -- messages intended only for local messaging during testing -- + sealed trait InspectionCommand extends NoSerializationVerificationNeeded + final case class ReportStatus() + + sealed trait StatusReport extends NoSerializationVerificationNeeded + sealed trait MonitoringStateReport extends StatusReport + final case class MonitoringActive(state: CrossDcHeartbeatingState) extends MonitoringStateReport + final case class MonitoringDormant() extends MonitoringStateReport + // -- end of messages intended only for local messaging during testing -- +} + +/** INTERNAL API */ +@InternalApi +private[cluster] final case class CrossDcHeartbeatingState( + failureDetector: FailureDetectorRegistry[Address], + nrOfMonitoredNodesPerDc: Int, + state: Map[ClusterSettings.DataCenter, SortedSet[Member]]) { + import CrossDcHeartbeatingState._ + + /** + * Decides if `self` node should become active and monitor other nodes with heartbeats. + * Only the `nrOfMonitoredNodesPerDc`-oldest nodes in each DC fulfil this role. + */ + def shouldActivelyMonitorNodes(selfDc: ClusterSettings.DataCenter, selfAddress: UniqueAddress): Boolean = { + /** Since we need ordering of oldests guaranteed, we must only look at Up (or Leaving, Exiting...) nodes */ + def atLeastInUpState(m: Member): Boolean = + m.status != MemberStatus.WeaklyUp && m.status != MemberStatus.Joining + + val selfDcNeighbours: SortedSet[Member] = state.getOrElse(selfDc, emptyMembersSortedSet) + val selfDcOldOnes = selfDcNeighbours.filter(atLeastInUpState).take(nrOfMonitoredNodesPerDc) + + // if this node is part of the "n oldest nodes" it should indeed monitor other nodes: + val shouldMonitorActively = selfDcOldOnes.exists(_.uniqueAddress == selfAddress) + shouldMonitorActively + } + + def addMember(m: Member): CrossDcHeartbeatingState = { + val dc = m.dataCenter + + // we need to remove the member first, to avoid having "duplicates" + // this is because the removal and uniqueness we need is only by uniqueAddress + // which is not used by the `ageOrdering` + val oldMembersWithoutM = state.getOrElse(dc, emptyMembersSortedSet) + .filterNot(_.uniqueAddress == m.uniqueAddress) + + val updatedMembers = oldMembersWithoutM + m + val updatedState = this.copy(state = state.updated(dc, updatedMembers)) + + // guarding against the case of two members having the same upNumber, in which case the activeReceivers + // which are based on the ageOrdering could actually have changed by adding a node. In practice this + // should happen rarely, since upNumbers are assigned sequentially, and we only ever compare nodes + // in the same DC. If it happens though, we need to remove the previously monitored node from the failure + // detector, to prevent both a resource leak and that node actually appearing as unreachable in the gossip (!) + val stoppedMonitoringReceivers = updatedState.activeReceiversIn(dc) diff this.activeReceiversIn(dc) + stoppedMonitoringReceivers.foreach(m ⇒ failureDetector.remove(m.address)) // at most one element difference + + updatedState + } + + def removeMember(m: Member): CrossDcHeartbeatingState = { + val dc = m.dataCenter + state.get(dc) match { + case Some(dcMembers) ⇒ + val updatedMembers = dcMembers.filterNot(_.uniqueAddress == m.uniqueAddress) + + failureDetector.remove(m.address) + copy(state = state.updated(dc, updatedMembers)) + case None ⇒ + this // no change needed, was certainly not present (not even its DC was) + } + } + + val activeReceivers: Set[UniqueAddress] = + dataCenters.flatMap(k ⇒ state(k).take(nrOfMonitoredNodesPerDc).map(_.uniqueAddress)(breakOut)) + + private def activeReceiversIn(dc: DataCenter): Set[UniqueAddress] = + state.getOrElse(dc, emptyMembersSortedSet).take(nrOfMonitoredNodesPerDc).map(_.uniqueAddress)(breakOut) + + def allMembers: Iterable[Member] = + state.values.flatMap(ConstantFun.scalaIdentityFunction) + + def heartbeatRsp(from: UniqueAddress): CrossDcHeartbeatingState = { + if (activeReceivers.contains(from)) { + failureDetector heartbeat from.address + } + this + } + + def dataCenters: Set[DataCenter] = + state.keys.toSet + +} + +/** INTERNAL API */ +@InternalApi +private[cluster] object CrossDcHeartbeatingState { + + /** Sorted by age */ + private def emptyMembersSortedSet: SortedSet[Member] = SortedSet.empty[Member](Member.ageOrdering) + + def init( + crossDcFailureDetector: FailureDetectorRegistry[Address], + nrOfMonitoredNodesPerDc: Int, + members: SortedSet[Member]): CrossDcHeartbeatingState = { + CrossDcHeartbeatingState( + crossDcFailureDetector, + nrOfMonitoredNodesPerDc, + state = { + // TODO unduplicate this with other places where we do this + val groupedByDc = members.groupBy(_.dataCenter) + + if (members.ordering == Member.ageOrdering) { + // we already have the right ordering + groupedByDc + } else { + // we need to enforce the ageOrdering for the SortedSet in each DC + groupedByDc.map { + case (dc, ms) ⇒ + dc → (SortedSet.empty[Member](Member.ageOrdering) union ms) + } + } + }) + } + +} diff --git a/akka-cluster/src/main/scala/akka/cluster/Member.scala b/akka-cluster/src/main/scala/akka/cluster/Member.scala index c258e0a871..e784067c74 100644 --- a/akka-cluster/src/main/scala/akka/cluster/Member.scala +++ b/akka-cluster/src/main/scala/akka/cluster/Member.scala @@ -55,7 +55,7 @@ class Member private[cluster] ( * cluster. A member that joined after removal of another member may be * considered older than the removed member. Note that is only makes * sense to compare with other members inside of one data center (upNumber has - * a higher risk of being reused across data centers). + * a higher risk of being reused across data centers). // TODO should we enforce this to compare only within DCs? */ def isOlderThan(other: Member): Boolean = if (upNumber == other.upNumber) diff --git a/akka-cluster/src/multi-jvm/scala/akka/cluster/MultiDcSunnyWeatherSpec.scala b/akka-cluster/src/multi-jvm/scala/akka/cluster/MultiDcSunnyWeatherSpec.scala new file mode 100644 index 0000000000..ff4eaf9156 --- /dev/null +++ b/akka-cluster/src/multi-jvm/scala/akka/cluster/MultiDcSunnyWeatherSpec.scala @@ -0,0 +1,145 @@ +/** + * Copyright (C) 2009-2017 Lightbend Inc. + */ +package akka.cluster + +import akka.annotation.InternalApi +import akka.remote.testconductor.RoleName +import akka.remote.testkit.{ MultiNodeConfig, MultiNodeSpec } +import akka.testkit._ +import com.typesafe.config.ConfigFactory + +import scala.collection.immutable +import scala.collection.immutable.SortedSet +import scala.concurrent.duration._ + +object MultiDcSunnyWeatherMultiJvmSpec extends MultiNodeConfig { + val first = role("first") + val second = role("second") + val third = role("third") + val fourth = role("fourth") + val fifth = role("fifth") + + nodeConfig(first, second, third)(ConfigFactory.parseString( + """ + akka { + cluster.data-center = alpha + } + """)) + + nodeConfig(fourth, fifth)(ConfigFactory.parseString( + """ + akka { + cluster.data-center = beta + } + """)) + + commonConfig(ConfigFactory.parseString( + """ + akka { + actor.provider = cluster + + loggers = ["akka.testkit.TestEventListener"] + loglevel = INFO + + remote.log-remote-lifecycle-events = off + + cluster { + + debug.verbose-heartbeat-logging = off + + failure-detector { + monitored-by-nr-of-members = 2 + } + + multi-data-center { + failure-detector { + nr-of-monitoring-members = 2 + } + } + } + } + """)) + +} + +class MultiDcSunnyWeatherMultiJvmNode1 extends MultiDcSunnyWeatherSpec +class MultiDcSunnyWeatherMultiJvmNode2 extends MultiDcSunnyWeatherSpec +class MultiDcSunnyWeatherMultiJvmNode3 extends MultiDcSunnyWeatherSpec +class MultiDcSunnyWeatherMultiJvmNode4 extends MultiDcSunnyWeatherSpec +class MultiDcSunnyWeatherMultiJvmNode5 extends MultiDcSunnyWeatherSpec + +abstract class MultiDcSunnyWeatherSpec extends MultiNodeSpec(MultiDcSunnyWeatherMultiJvmSpec) + with MultiNodeClusterSpec { + + "A normal cluster" must { + "be healthy" taggedAs LongRunningTest in { + + val observer = TestProbe("alpha-observer") + + // allow all nodes to join: + awaitClusterUp(roles: _*) + + val crossDcHeartbeatSenderPath = "/system/cluster/core/daemon/crossDcHeartbeatSender" + val selectCrossDcHeartbeatSender = system.actorSelection(crossDcHeartbeatSenderPath) + + val expectedAlphaHeartbeaterNodes = takeNOldestMembers(_.dataCenter == "alpha", 2) + val expectedAlphaHeartbeaterRoles = membersAsRoles(expectedAlphaHeartbeaterNodes) + + val expectedBetaHeartbeaterNodes = takeNOldestMembers(_.dataCenter == "beta", 2) + val expectedBetaHeartbeaterRoles = membersAsRoles(expectedBetaHeartbeaterNodes) + + val expectedNoActiveHeartbeatSenderRoles = roles.toSet -- (expectedAlphaHeartbeaterRoles union expectedBetaHeartbeaterRoles) + + enterBarrier("found-expectations") + + info(s"expectedAlphaHeartbeaterNodes = ${expectedAlphaHeartbeaterNodes.map(_.address.port.get)}") + info(s"expectedBetaHeartbeaterNodes = ${expectedBetaHeartbeaterNodes.map(_.address.port.get)}") + info(s"expectedNoActiveHeartbeatSenderRoles = ${expectedNoActiveHeartbeatSenderRoles.map(_.port.get)}") + + expectedAlphaHeartbeaterRoles.size should ===(2) + expectedBetaHeartbeaterRoles.size should ===(2) + + implicit val sender = observer.ref + runOn(expectedAlphaHeartbeaterRoles.toList: _*) { + selectCrossDcHeartbeatSender ! CrossDcHeartbeatSender.ReportStatus() + val status = observer.expectMsgType[CrossDcHeartbeatSender.MonitoringActive](5.seconds) + } + runOn(expectedBetaHeartbeaterRoles.toList: _*) { + selectCrossDcHeartbeatSender ! CrossDcHeartbeatSender.ReportStatus() + val status = observer.expectMsgType[CrossDcHeartbeatSender.MonitoringActive](5.seconds) + } + runOn(expectedNoActiveHeartbeatSenderRoles.toList: _*) { + selectCrossDcHeartbeatSender ! CrossDcHeartbeatSender.ReportStatus() + val status = observer.expectMsgType[CrossDcHeartbeatSender.MonitoringDormant](5.seconds) + } + + enterBarrier("done") + } + } + + /** + * INTERNAL API + * Returns `Up` (or in "later" status, like Leaving etc, but never `Joining` or `WeaklyUp`) members, + * sorted by Member.ageOrdering (from oldest to youngest). This restriction on status is needed to + * strongly guaratnee the order of "oldest" members, as they're linearized by the order in which they become Up + * (since marking that transition is a Leader action). + */ + private def membersByAge(): immutable.SortedSet[Member] = + SortedSet.empty(Member.ageOrdering) + .union(cluster.state.members.filter(m ⇒ m.status != MemberStatus.WeaklyUp && m.status != MemberStatus.WeaklyUp)) + + /** INTERNAL API */ + @InternalApi + private[cluster] def takeNOldestMembers(memberFilter: Member ⇒ Boolean, n: Int): immutable.SortedSet[Member] = + membersByAge() + .filter(m ⇒ m.status != MemberStatus.Joining && m.status != MemberStatus.WeaklyUp) + .filter(memberFilter) + .take(n) + + private def membersAsRoles(ms: immutable.Set[Member]): immutable.Set[RoleName] = { + val res = ms.flatMap(m ⇒ roleName(m.address)) + require(res.size == ms.size, s"Not all members were converted to roles! Got: ${ms}, found ${res}") + res + } +} From c0d439eac3fb50a412861d477c37fbb2d6831bd8 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Johan=20Andr=C3=A9n?= Date: Fri, 7 Jul 2017 13:19:10 +0100 Subject: [PATCH 16/34] limit cross dc gossip #23282 --- .../src/main/resources/reference.conf | 16 +- .../src/main/scala/akka/cluster/Cluster.scala | 4 +- .../scala/akka/cluster/ClusterDaemon.scala | 114 +++------- .../scala/akka/cluster/ClusterEvent.scala | 6 +- .../scala/akka/cluster/ClusterSettings.scala | 45 ++-- .../cluster/CrossDcClusterHeartbeat.scala | 6 +- .../src/main/scala/akka/cluster/Gossip.scala | 7 + .../scala/akka/cluster/MembershipState.scala | 199 ++++++++++++++++- .../akka/cluster/MultiDcClusterSpec.scala | 60 +++-- .../akka/cluster/MultiDcSplitBrainSpec.scala | 11 - .../ClusterDomainEventPublisherSpec.scala | 29 +-- .../akka/cluster/ClusterDomainEventSpec.scala | 18 +- .../test/scala/akka/cluster/GossipSpec.scala | 2 +- .../cluster/GossipTargetSelectorSpec.scala | 206 ++++++++++++++++++ 14 files changed, 549 insertions(+), 174 deletions(-) create mode 100644 akka-cluster/src/test/scala/akka/cluster/GossipTargetSelectorSpec.scala diff --git a/akka-cluster/src/main/resources/reference.conf b/akka-cluster/src/main/resources/reference.conf index dc2847b258..ef0a32fd1b 100644 --- a/akka-cluster/src/main/resources/reference.conf +++ b/akka-cluster/src/main/resources/reference.conf @@ -210,7 +210,16 @@ akka { # in which case these settings would not be used at all - they only apply, # if your cluster nodes are configured with at-least 2 different `akka.cluster.data-center` values. multi-data-center { - + + # Try to limit the number of connections between data centers. Used for gossip and heartbeating. + # This will not limit connections created for the messaging of the application. + # If the cluster does not span multiple data centers, this value has no effect. + cross-data-center-connections = 5 + + # The n oldest nodes in a data center will choose to gossip to another data center with + # this probability. Must be a value between 0.0 and 1.0 where 0.0 means never, 1.0 means always. + cross-data-center-gossip-probability = 0.2 + failure-detector { # FQCN of the failure detector implementation. # It must implement akka.remote.FailureDetector and have @@ -232,11 +241,6 @@ akka { # will start after this period, even though no heartbeat message has # been received. expected-response-after = 1 s - - # Maximum number of oldest members in a data center that will monitor other (oldest nodes in other) data centers. - # This is done to lessen the cross data center communication, as only those top-n-oldest nodes - # need to maintain connections to the other data-centers. - nr-of-monitoring-members = 5 } } diff --git a/akka-cluster/src/main/scala/akka/cluster/Cluster.scala b/akka-cluster/src/main/scala/akka/cluster/Cluster.scala index d4b7ddcb4d..dde1cf19ba 100644 --- a/akka-cluster/src/main/scala/akka/cluster/Cluster.scala +++ b/akka-cluster/src/main/scala/akka/cluster/Cluster.scala @@ -108,7 +108,9 @@ class Cluster(val system: ExtendedActorSystem) extends Extension { val crossDcFailureDetector: FailureDetectorRegistry[Address] = { val createFailureDetector = () ⇒ - FailureDetectorLoader.load(settings.CrossDcFailureDetectorSettings.ImplementationClass, settings.CrossDcFailureDetectorSettings.config, system) + FailureDetectorLoader.load( + settings.MultiDataCenter.CrossDcFailureDetectorSettings.ImplementationClass, + settings.MultiDataCenter.CrossDcFailureDetectorSettings.config, system) new DefaultFailureDetectorRegistry(createFailureDetector) } diff --git a/akka-cluster/src/main/scala/akka/cluster/ClusterDaemon.scala b/akka-cluster/src/main/scala/akka/cluster/ClusterDaemon.scala index 80391ec612..c038c5ecb5 100644 --- a/akka-cluster/src/main/scala/akka/cluster/ClusterDaemon.scala +++ b/akka-cluster/src/main/scala/akka/cluster/ClusterDaemon.scala @@ -4,7 +4,7 @@ package akka.cluster import language.existentials -import scala.collection.immutable +import scala.collection.{ SortedSet, breakOut, immutable, mutable } import scala.concurrent.duration._ import java.util.concurrent.ThreadLocalRandom @@ -15,8 +15,6 @@ import akka.cluster.MemberStatus._ import akka.cluster.ClusterEvent._ import akka.cluster.ClusterSettings.DataCenter import akka.dispatch.{ RequiresMessageQueue, UnboundedMessageQueueSemantics } - -import scala.collection.breakOut import akka.remote.QuarantinedEvent import java.util.ArrayList import java.util.Collections @@ -25,9 +23,11 @@ import akka.pattern.ask import akka.util.Timeout import akka.Done import akka.annotation.InternalApi +import akka.cluster.ClusterSettings.DataCenter import scala.concurrent.Future import scala.concurrent.Promise +import scala.util.Random /** * Base trait for all cluster messages. All ClusterMessage's are serializable. @@ -301,10 +301,18 @@ private[cluster] class ClusterCoreDaemon(publisher: ActorRef) extends Actor with protected def selfUniqueAddress = cluster.selfUniqueAddress val vclockNode = VectorClock.Node(vclockName(selfUniqueAddress)) + val gossipTargetSelector = new GossipTargetSelector( + ReduceGossipDifferentViewProbability, + cluster.settings.MultiDataCenter.CrossDcGossipProbability) // note that self is not initially member, // and the Gossip is not versioned for this 'Node' yet - var membershipState = MembershipState(Gossip.empty, cluster.selfUniqueAddress, cluster.settings.DataCenter) + var membershipState = MembershipState( + Gossip.empty, + cluster.selfUniqueAddress, + cluster.settings.DataCenter, + cluster.settings.MultiDataCenter.CrossDcConnections) + def latestGossip: Gossip = membershipState.latestGossip val statsEnabled = PublishStatsInterval.isFinite @@ -925,88 +933,25 @@ private[cluster] class ClusterCoreDaemon(publisher: ActorRef) extends Actor with */ def gossipRandomN(n: Int): Unit = { if (!isSingletonCluster && n > 0) { - val localGossip = latestGossip - // using ArrayList to be able to shuffle - val possibleTargets = new ArrayList[UniqueAddress](localGossip.members.size) - localGossip.members.foreach { m ⇒ - if (validNodeForGossip(m.uniqueAddress)) - possibleTargets.add(m.uniqueAddress) - } - val randomTargets = - if (possibleTargets.size <= n) - possibleTargets - else { - Collections.shuffle(possibleTargets, ThreadLocalRandom.current()) - possibleTargets.subList(0, n) - } - - val iter = randomTargets.iterator - while (iter.hasNext) - gossipTo(iter.next()) + gossipTargetSelector.randomNodesForFullGossip(membershipState, n).foreach(gossipTo) } } /** * Initiates a new round of gossip. */ - def gossip(): Unit = { - + def gossip(): Unit = if (!isSingletonCluster) { - val localGossip = latestGossip - - val preferredGossipTargets: Vector[UniqueAddress] = - if (ThreadLocalRandom.current.nextDouble() < adjustedGossipDifferentViewProbability) { - // If it's time to try to gossip to some nodes with a different view - // gossip to a random alive member with preference to a member with older gossip version - localGossip.members.collect { - case m if !localGossip.seenByNode(m.uniqueAddress) && validNodeForGossip(m.uniqueAddress) ⇒ - m.uniqueAddress - }(breakOut) - } else Vector.empty - - if (preferredGossipTargets.nonEmpty) { - val peer = selectRandomNode(preferredGossipTargets) - // send full gossip because it has different view - peer foreach gossipTo - } else { - // Fall back to localGossip; important to not accidentally use `map` of the SortedSet, since the original order is not preserved) - val peer = selectRandomNode(localGossip.members.toIndexedSeq.collect { - case m if validNodeForGossip(m.uniqueAddress) ⇒ m.uniqueAddress - }) - peer foreach { node ⇒ - if (localGossip.seenByNode(node)) gossipStatusTo(node) - else gossipTo(node) - } + gossipTargetSelector.gossipTarget(membershipState) match { + case Some(peer) ⇒ + if (!membershipState.isInSameDc(peer) || latestGossip.seenByNode(peer)) + // avoid transferring the full state if possible + gossipStatusTo(peer) + else + gossipTo(peer) + case None ⇒ // nothing to see here } } - } - - /** - * For large clusters we should avoid shooting down individual - * nodes. Therefore the probability is reduced for large clusters. - */ - def adjustedGossipDifferentViewProbability: Double = { - val size = latestGossip.members.size - val low = ReduceGossipDifferentViewProbability - val high = low * 3 - // start reduction when cluster is larger than configured ReduceGossipDifferentViewProbability - if (size <= low) - GossipDifferentViewProbability - else { - // don't go lower than 1/10 of the configured GossipDifferentViewProbability - val minP = GossipDifferentViewProbability / 10 - if (size >= high) - minP - else { - // linear reduction of the probability with increasing number of nodes - // from ReduceGossipDifferentViewProbability at ReduceGossipDifferentViewProbability nodes - // to ReduceGossipDifferentViewProbability / 10 at ReduceGossipDifferentViewProbability * 3 nodes - // i.e. default from 0.8 at 400 nodes, to 0.08 at 1600 nodes - val k = (minP - GossipDifferentViewProbability) / (high - low) - GossipDifferentViewProbability + (size - low) * k - } - } - } /** * Runs periodic leader actions, such as member status transitions, assigning partitions etc. @@ -1244,10 +1189,6 @@ private[cluster] class ClusterCoreDaemon(publisher: ActorRef) extends Actor with } } - def selectRandomNode(nodes: IndexedSeq[UniqueAddress]): Option[UniqueAddress] = - if (nodes.isEmpty) None - else Some(nodes(ThreadLocalRandom.current nextInt nodes.size)) - def isSingletonCluster: Boolean = latestGossip.isSingletonCluster // needed for tests @@ -1261,24 +1202,21 @@ private[cluster] class ClusterCoreDaemon(publisher: ActorRef) extends Actor with * Gossips latest gossip to a node. */ def gossipTo(node: UniqueAddress): Unit = - if (validNodeForGossip(node)) + if (membershipState.validNodeForGossip(node)) clusterCore(node.address) ! GossipEnvelope(selfUniqueAddress, node, latestGossip) def gossipTo(node: UniqueAddress, destination: ActorRef): Unit = - if (validNodeForGossip(node)) + if (membershipState.validNodeForGossip(node)) destination ! GossipEnvelope(selfUniqueAddress, node, latestGossip) def gossipStatusTo(node: UniqueAddress, destination: ActorRef): Unit = - if (validNodeForGossip(node)) + if (membershipState.validNodeForGossip(node)) destination ! GossipStatus(selfUniqueAddress, latestGossip.version) def gossipStatusTo(node: UniqueAddress): Unit = - if (validNodeForGossip(node)) + if (membershipState.validNodeForGossip(node)) clusterCore(node.address) ! GossipStatus(selfUniqueAddress, latestGossip.version) - def validNodeForGossip(node: UniqueAddress): Boolean = - node != selfUniqueAddress && membershipState.isReachableExcludingDownedObservers(node) - def updateLatestGossip(gossip: Gossip): Unit = { // Updating the vclock version for the changes val versionedGossip = gossip :+ vclockNode diff --git a/akka-cluster/src/main/scala/akka/cluster/ClusterEvent.scala b/akka-cluster/src/main/scala/akka/cluster/ClusterEvent.scala index b0b72633a1..16c9b21c7e 100644 --- a/akka-cluster/src/main/scala/akka/cluster/ClusterEvent.scala +++ b/akka-cluster/src/main/scala/akka/cluster/ClusterEvent.scala @@ -378,7 +378,11 @@ private[cluster] final class ClusterDomainEventPublisher extends Actor with Acto val cluster = Cluster(context.system) val selfUniqueAddress = cluster.selfUniqueAddress - val emptyMembershipState = MembershipState(Gossip.empty, cluster.selfUniqueAddress, cluster.settings.DataCenter) + val emptyMembershipState = MembershipState( + Gossip.empty, + cluster.selfUniqueAddress, + cluster.settings.DataCenter, + cluster.settings.MultiDataCenter.CrossDcConnections) var membershipState: MembershipState = emptyMembershipState def selfDc = cluster.settings.DataCenter diff --git a/akka-cluster/src/main/scala/akka/cluster/ClusterSettings.scala b/akka-cluster/src/main/scala/akka/cluster/ClusterSettings.scala index df37649d2b..63a4cec36f 100644 --- a/akka-cluster/src/main/scala/akka/cluster/ClusterSettings.scala +++ b/akka-cluster/src/main/scala/akka/cluster/ClusterSettings.scala @@ -10,7 +10,7 @@ import com.typesafe.config.ConfigObject import scala.concurrent.duration.Duration import akka.actor.Address import akka.actor.AddressFromURIString -import akka.annotation.{ DoNotInherit, InternalApi } +import akka.annotation.InternalApi import akka.dispatch.Dispatchers import akka.util.Helpers.{ ConfigOps, Requiring, toRootLowerCase } @@ -34,7 +34,6 @@ object ClusterSettings { } final class ClusterSettings(val config: Config, val systemName: String) { - import ClusterSettings._ import ClusterSettings._ private val cc = config.getConfig("akka.cluster") @@ -51,6 +50,28 @@ final class ClusterSettings(val config: Config, val systemName: String) { FailureDetectorConfig.getInt("monitored-by-nr-of-members") } requiring (_ > 0, "failure-detector.monitored-by-nr-of-members must be > 0") + final class CrossDcFailureDetectorSettings(val config: Config) { + val ImplementationClass: String = config.getString("implementation-class") + val HeartbeatInterval: FiniteDuration = { + config.getMillisDuration("heartbeat-interval") + } requiring (_ > Duration.Zero, "failure-detector.heartbeat-interval must be > 0") + val HeartbeatExpectedResponseAfter: FiniteDuration = { + config.getMillisDuration("expected-response-after") + } requiring (_ > Duration.Zero, "failure-detector.expected-response-after > 0") + def NrOfMonitoringActors: Int = MultiDataCenter.CrossDcConnections + } + + object MultiDataCenter { + val CrossDcConnections: Int = cc.getInt("multi-data-center.cross-data-center-connections") + .requiring(_ > 0, "cross-data-center-connections must be > 0") + + val CrossDcGossipProbability: Double = cc.getDouble("multi-data-center.cross-data-center-gossip-probability") + .requiring(d ⇒ d >= 0.0D && d <= 1.0D, "cross-data-center-gossip-probability must be >= 0.0 and <= 1.0") + + val CrossDcFailureDetectorSettings: CrossDcFailureDetectorSettings = + new CrossDcFailureDetectorSettings(cc.getConfig("multi-data-center.failure-detector")) + } + val SeedNodes: immutable.IndexedSeq[Address] = immutableSeq(cc.getStringList("seed-nodes")).map { case AddressFromURIString(addr) ⇒ addr }.toVector val SeedNodeTimeout: FiniteDuration = cc.getMillisDuration("seed-node-timeout") @@ -114,24 +135,10 @@ final class ClusterSettings(val config: Config, val systemName: String) { val QuarantineRemovedNodeAfter: FiniteDuration = cc.getMillisDuration("quarantine-removed-node-after") requiring (_ > Duration.Zero, "quarantine-removed-node-after must be > 0") - val AllowWeaklyUpMembers = cc.getBoolean("allow-weakly-up-members") + val AllowWeaklyUpMembers: Boolean = cc.getBoolean("allow-weakly-up-members") val DataCenter: DataCenter = cc.getString("data-center") - final class CrossDcFailureDetectorSettings(val config: Config) { - val ImplementationClass: String = config.getString("implementation-class") - val HeartbeatInterval: FiniteDuration = { - config.getMillisDuration("heartbeat-interval") - } requiring (_ > Duration.Zero, "failure-detector.heartbeat-interval must be > 0") - val HeartbeatExpectedResponseAfter: FiniteDuration = { - config.getMillisDuration("expected-response-after") - } requiring (_ > Duration.Zero, "failure-detector.expected-response-after > 0") - val NrOfMonitoringActors: Int = { - config.getInt("nr-of-monitoring-members") - } requiring (_ > 0, "failure-detector.nr-of-monitoring-members must be > 0") - } - val CrossDcFailureDetectorSettings = new CrossDcFailureDetectorSettings(cc.getConfig("multi-data-center.failure-detector")) - val Roles: Set[String] = { val configuredRoles = (immutableSeq(cc.getStringList("roles")).toSet) requiring ( _.forall(!_.startsWith(DcRolePrefix)), @@ -162,8 +169,8 @@ final class ClusterSettings(val config: Config, val systemName: String) { val SchedulerTicksPerWheel: Int = cc.getInt("scheduler.ticks-per-wheel") object Debug { - val VerboseHeartbeatLogging = cc.getBoolean("debug.verbose-heartbeat-logging") - val VerboseGossipLogging = cc.getBoolean("debug.verbose-gossip-logging") + val VerboseHeartbeatLogging: Boolean = cc.getBoolean("debug.verbose-heartbeat-logging") + val VerboseGossipLogging: Boolean = cc.getBoolean("debug.verbose-gossip-logging") } } diff --git a/akka-cluster/src/main/scala/akka/cluster/CrossDcClusterHeartbeat.scala b/akka-cluster/src/main/scala/akka/cluster/CrossDcClusterHeartbeat.scala index b022051bb6..2a5c9585b3 100644 --- a/akka-cluster/src/main/scala/akka/cluster/CrossDcClusterHeartbeat.scala +++ b/akka-cluster/src/main/scala/akka/cluster/CrossDcClusterHeartbeat.scala @@ -47,7 +47,9 @@ private[cluster] final class CrossDcHeartbeatSender extends Actor with ActorLogg val isExternalClusterMember: Member ⇒ Boolean = member ⇒ member.dataCenter != cluster.selfDataCenter - val crossDcSettings: cluster.settings.CrossDcFailureDetectorSettings = cluster.settings.CrossDcFailureDetectorSettings + val crossDcSettings: cluster.settings.CrossDcFailureDetectorSettings = + cluster.settings.MultiDataCenter.CrossDcFailureDetectorSettings + val crossDcFailureDetector = cluster.crossDcFailureDetector val selfHeartbeat = ClusterHeartbeatSender.Heartbeat(selfAddress) @@ -299,7 +301,7 @@ private[cluster] object CrossDcHeartbeatingState { crossDcFailureDetector, nrOfMonitoredNodesPerDc, state = { - // TODO unduplicate this with other places where we do this + // TODO unduplicate this with the logic in MembershipState.ageSortedTopOldestMembersPerDc val groupedByDc = members.groupBy(_.dataCenter) if (members.ordering == Member.ageOrdering) { diff --git a/akka-cluster/src/main/scala/akka/cluster/Gossip.scala b/akka-cluster/src/main/scala/akka/cluster/Gossip.scala index dcf3982355..9a05003004 100644 --- a/akka-cluster/src/main/scala/akka/cluster/Gossip.scala +++ b/akka-cluster/src/main/scala/akka/cluster/Gossip.scala @@ -87,6 +87,13 @@ private[cluster] final case class Gossip( @transient private lazy val membersMap: Map[UniqueAddress, Member] = members.map(m ⇒ m.uniqueAddress → m)(collection.breakOut) + @transient lazy val isMultiDc = + if (members.size <= 1) false + else { + val dc1 = members.head.dataCenter + members.exists(_.dataCenter != dc1) + } + /** * Increments the version for this 'Node'. */ diff --git a/akka-cluster/src/main/scala/akka/cluster/MembershipState.scala b/akka-cluster/src/main/scala/akka/cluster/MembershipState.scala index e2375a4d5b..99e10a273c 100644 --- a/akka-cluster/src/main/scala/akka/cluster/MembershipState.scala +++ b/akka-cluster/src/main/scala/akka/cluster/MembershipState.scala @@ -3,12 +3,19 @@ */ package akka.cluster +import java.util.{ ArrayList, Collections } +import java.util.concurrent.ThreadLocalRandom + import scala.collection.immutable import scala.collection.SortedSet import akka.cluster.ClusterSettings.DataCenter import akka.cluster.MemberStatus._ import akka.annotation.InternalApi +import scala.annotation.tailrec +import scala.collection.breakOut +import scala.util.Random + /** * INTERNAL API */ @@ -23,9 +30,16 @@ import akka.annotation.InternalApi /** * INTERNAL API */ -@InternalApi private[akka] final case class MembershipState(latestGossip: Gossip, selfUniqueAddress: UniqueAddress, selfDc: DataCenter) { +@InternalApi private[akka] final case class MembershipState( + latestGossip: Gossip, + selfUniqueAddress: UniqueAddress, + selfDc: DataCenter, + crossDcConnections: Int) { + import MembershipState._ + lazy val selfMember = latestGossip.member(selfUniqueAddress) + def members: immutable.SortedSet[Member] = latestGossip.members def overview: GossipOverview = latestGossip.overview @@ -76,6 +90,20 @@ import akka.annotation.InternalApi overview.reachability.removeObservers(membersToExclude).remove(members.collect { case m if m.dataCenter != selfDc ⇒ m.uniqueAddress }) } + /** + * @return Up to `crossDcConnections` oldest members for each DC + */ + lazy val ageSortedTopOldestMembersPerDc: Map[DataCenter, SortedSet[Member]] = + // TODO make this recursive and bail early when size reached to make it fast for large clusters + latestGossip.members.foldLeft(Map.empty[DataCenter, SortedSet[Member]]) { (acc, member) ⇒ + acc.get(member.dataCenter) match { + case Some(set) ⇒ + if (set.size < crossDcConnections) acc + (member.dataCenter → (set + member)) + else acc + case None ⇒ acc + (member.dataCenter → (SortedSet.empty(Member.ageOrdering) + member)) + } + } + /** * @return true if toAddress should be reachable from the fromDc in general, within a data center * this means only caring about data center local observations, across data centers it @@ -119,4 +147,173 @@ import akka.annotation.InternalApi .map(_.uniqueAddress) } + def isInSameDc(node: UniqueAddress): Boolean = + node == selfUniqueAddress || latestGossip.member(node).dataCenter == selfDc + + def validNodeForGossip(node: UniqueAddress): Boolean = + node != selfUniqueAddress && + ((isInSameDc(node) && isReachableExcludingDownedObservers(node)) || + // if cross DC we need to check pairwise unreachable observation + overview.reachability.isReachable(selfUniqueAddress, node)) + +} + +/** + * INTERNAL API + */ +@InternalApi private[akka] class GossipTargetSelector( + reduceGossipDifferentViewProbability: Double, + crossDcGossipProbability: Double) { + + final def gossipTarget(state: MembershipState): Option[UniqueAddress] = { + selectRandomNode(gossipTargets(state)) + } + + final def gossipTargets(state: MembershipState): Vector[UniqueAddress] = + if (state.latestGossip.isMultiDc) multiDcGossipTargets(state) + else localDcGossipTargets(state) + + /** + * Select `n` random nodes to gossip to (used to quickly inform the rest of the cluster when leaving for example) + */ + def randomNodesForFullGossip(state: MembershipState, n: Int): Vector[UniqueAddress] = + if (state.latestGossip.isMultiDc && state.ageSortedTopOldestMembersPerDc(state.selfDc).contains(state.selfMember)) { + // this node is one of the N oldest in the cluster, gossip to one cross-dc but mostly locally + val randomLocalNodes = Random.shuffle(state.members.toVector.collect { + case m if m.dataCenter == state.selfDc && state.validNodeForGossip(m.uniqueAddress) ⇒ m.uniqueAddress + }) + + @tailrec + def selectOtherDcNode(randomizedDcs: List[DataCenter]): Option[UniqueAddress] = + randomizedDcs match { + case Nil ⇒ None // couldn't find a single cross-dc-node to talk to + case dc :: tail ⇒ + state.ageSortedTopOldestMembersPerDc(dc).collectFirst { + case m if state.validNodeForGossip(m.uniqueAddress) ⇒ m.uniqueAddress + } match { + case Some(addr) ⇒ Some(addr) + case None ⇒ selectOtherDcNode(tail) + } + + } + val otherDcs = Random.shuffle((state.ageSortedTopOldestMembersPerDc.keySet - state.selfDc).toList) + + selectOtherDcNode(otherDcs) match { + case Some(node) ⇒ randomLocalNodes.take(n - 1) :+ node + case None ⇒ randomLocalNodes.take(n) + } + + } else { + // single dc or not among the N oldest - select local nodes + val selectedNodes = state.members.toVector.collect { + case m if m.dataCenter == state.selfDc && state.validNodeForGossip(m.uniqueAddress) ⇒ m.uniqueAddress + } + + if (selectedNodes.size <= n) selectedNodes + else Random.shuffle(selectedNodes).take(n) + } + + /** + * Chooses a set of possible gossip targets that is in the same dc. If the cluster is not multi dc this + * means it is a choice among all nodes of the cluster. + */ + protected def localDcGossipTargets(state: MembershipState): Vector[UniqueAddress] = { + val latestGossip = state.latestGossip + val firstSelection: Vector[UniqueAddress] = + if (preferNodesWithDifferentView(state)) { + // If it's time to try to gossip to some nodes with a different view + // gossip to a random alive same dc member with preference to a member with older gossip version + latestGossip.members.collect { + case m if m.dataCenter == state.selfDc && !latestGossip.seenByNode(m.uniqueAddress) && state.validNodeForGossip(m.uniqueAddress) ⇒ + m.uniqueAddress + }(breakOut) + } else Vector.empty + + // Fall back to localGossip + if (firstSelection.isEmpty) { + latestGossip.members.toVector.collect { + case m if m.dataCenter == state.selfDc && state.validNodeForGossip(m.uniqueAddress) ⇒ m.uniqueAddress + } + } else firstSelection + + } + + /** + * Choose cross-dc nodes if this one of the N oldest nodes, and if not fall back to gosip locally in the dc + */ + protected def multiDcGossipTargets(state: MembershipState): Vector[UniqueAddress] = { + val latestGossip = state.latestGossip + // only a fraction of the time across data centers + if (selectDcLocalNodes()) localDcGossipTargets(state) + else { + val nodesPerDc = state.ageSortedTopOldestMembersPerDc + + // only do cross DC gossip if this node is among the N oldest + + if (!nodesPerDc(state.selfDc).contains(state.selfMember)) localDcGossipTargets(state) + else { + @tailrec + def findFirstDcWithValidNodes(left: List[DataCenter]): Vector[UniqueAddress] = + left match { + case dc :: tail ⇒ + + val validNodes = nodesPerDc(dc).collect { + case member if state.validNodeForGossip(member.uniqueAddress) ⇒ + member.uniqueAddress + } + + if (validNodes.nonEmpty) validNodes.toVector + else findFirstDcWithValidNodes(tail) // no valid nodes in dc, try next + + case Nil ⇒ + Vector.empty + } + + // chose another DC at random + val otherDcsInRandomOrder = dcsInRandomOrder((nodesPerDc - state.selfDc).keys.toList) + val nodes = findFirstDcWithValidNodes(otherDcsInRandomOrder) + if (nodes.nonEmpty) nodes + // no other dc with reachable nodes, fall back to local gossip + else localDcGossipTargets(state) + } + } + } + + /** + * For large clusters we should avoid shooting down individual + * nodes. Therefore the probability is reduced for large clusters. + */ + protected def adjustedGossipDifferentViewProbability(clusterSize: Int): Double = { + val low = reduceGossipDifferentViewProbability + val high = low * 3 + // start reduction when cluster is larger than configured ReduceGossipDifferentViewProbability + if (clusterSize <= low) + reduceGossipDifferentViewProbability + else { + // don't go lower than 1/10 of the configured GossipDifferentViewProbability + val minP = reduceGossipDifferentViewProbability / 10 + if (clusterSize >= high) + minP + else { + // linear reduction of the probability with increasing number of nodes + // from ReduceGossipDifferentViewProbability at ReduceGossipDifferentViewProbability nodes + // to ReduceGossipDifferentViewProbability / 10 at ReduceGossipDifferentViewProbability * 3 nodes + // i.e. default from 0.8 at 400 nodes, to 0.08 at 1600 nodes + val k = (minP - reduceGossipDifferentViewProbability) / (high - low) + reduceGossipDifferentViewProbability + (clusterSize - low) * k + } + } + } + + protected def selectDcLocalNodes(): Boolean = ThreadLocalRandom.current.nextDouble() > crossDcGossipProbability + + protected def preferNodesWithDifferentView(state: MembershipState): Boolean = + ThreadLocalRandom.current.nextDouble() < adjustedGossipDifferentViewProbability(state.latestGossip.members.size) + + protected def dcsInRandomOrder(dcs: List[DataCenter]): List[DataCenter] = + Random.shuffle(dcs) + + protected def selectRandomNode(nodes: IndexedSeq[UniqueAddress]): Option[UniqueAddress] = + if (nodes.isEmpty) None + else Some(nodes(ThreadLocalRandom.current.nextInt(nodes.size))) } diff --git a/akka-cluster/src/multi-jvm/scala/akka/cluster/MultiDcClusterSpec.scala b/akka-cluster/src/multi-jvm/scala/akka/cluster/MultiDcClusterSpec.scala index 9f49188dd4..91ae496fe0 100644 --- a/akka-cluster/src/multi-jvm/scala/akka/cluster/MultiDcClusterSpec.scala +++ b/akka-cluster/src/multi-jvm/scala/akka/cluster/MultiDcClusterSpec.scala @@ -10,41 +10,53 @@ import com.typesafe.config.ConfigFactory import scala.concurrent.duration._ -object MultiDcMultiJvmSpec extends MultiNodeConfig { +class MultiDcSpecConfig(crossDcConnections: Int = 5) extends MultiNodeConfig { val first = role("first") val second = role("second") val third = role("third") val fourth = role("fourth") val fifth = role("fifth") - commonConfig(MultiNodeClusterSpec.clusterConfig) + commonConfig(ConfigFactory.parseString( + s""" + akka.loglevel = INFO + akka.cluster.multi-data-center.cross-data-center-connections = $crossDcConnections + """).withFallback(MultiNodeClusterSpec.clusterConfig)) nodeConfig(first, second)(ConfigFactory.parseString( """ akka.cluster.data-center = "dc1" - akka.loglevel = INFO """)) nodeConfig(third, fourth, fifth)(ConfigFactory.parseString( """ akka.cluster.data-center = "dc2" - akka.loglevel = INFO """)) testTransport(on = true) } -class MultiDcMultiJvmNode1 extends MultiDcSpec -class MultiDcMultiJvmNode2 extends MultiDcSpec -class MultiDcMultiJvmNode3 extends MultiDcSpec -class MultiDcMultiJvmNode4 extends MultiDcSpec -class MultiDcMultiJvmNode5 extends MultiDcSpec +object MultiDcNormalConfig extends MultiDcSpecConfig() -abstract class MultiDcSpec - extends MultiNodeSpec(MultiDcMultiJvmSpec) +class MultiDcMultiJvmNode1 extends MultiDcSpec(MultiDcNormalConfig) +class MultiDcMultiJvmNode2 extends MultiDcSpec(MultiDcNormalConfig) +class MultiDcMultiJvmNode3 extends MultiDcSpec(MultiDcNormalConfig) +class MultiDcMultiJvmNode4 extends MultiDcSpec(MultiDcNormalConfig) +class MultiDcMultiJvmNode5 extends MultiDcSpec(MultiDcNormalConfig) + +object MultiDcFewCrossDcConnectionsConfig extends MultiDcSpecConfig(1) + +class MultiDcFewCrossDcMultiJvmNode1 extends MultiDcSpec(MultiDcFewCrossDcConnectionsConfig) +class MultiDcFewCrossDcMultiJvmNode2 extends MultiDcSpec(MultiDcFewCrossDcConnectionsConfig) +class MultiDcFewCrossDcMultiJvmNode3 extends MultiDcSpec(MultiDcFewCrossDcConnectionsConfig) +class MultiDcFewCrossDcMultiJvmNode4 extends MultiDcSpec(MultiDcFewCrossDcConnectionsConfig) +class MultiDcFewCrossDcMultiJvmNode5 extends MultiDcSpec(MultiDcFewCrossDcConnectionsConfig) + +abstract class MultiDcSpec(config: MultiDcSpecConfig) + extends MultiNodeSpec(config) with MultiNodeClusterSpec { - import MultiDcMultiJvmSpec._ + import config._ "A cluster with multiple data centers" must { "be able to form" in { @@ -87,27 +99,30 @@ abstract class MultiDcSpec runOn(first) { testConductor.blackhole(first, third, Direction.Both).await } - runOn(first, second, third, fourth) { - awaitAssert(clusterView.unreachableMembers should not be empty) - } enterBarrier("inter-data-center unreachability") runOn(fifth) { cluster.join(third) } + runOn(third, fourth, fifth) { + // should be able to join and become up since the + // unreachable is between dc1 and dc2, + within(10.seconds) { + awaitAssert(clusterView.members.filter(_.status == MemberStatus.Up) should have size (5)) + } + } + + runOn(first) { + testConductor.passThrough(first, third, Direction.Both).await + } + // should be able to join and become up since the // unreachable is between dc1 and dc2, within(10.seconds) { awaitAssert(clusterView.members.filter(_.status == MemberStatus.Up) should have size (5)) } - runOn(first) { - testConductor.passThrough(first, third, Direction.Both).await - } - runOn(first, second, third, fourth) { - awaitAssert(clusterView.unreachableMembers should not be empty) - } enterBarrier("inter-data-center unreachability end") } @@ -115,9 +130,6 @@ abstract class MultiDcSpec runOn(first) { testConductor.blackhole(first, second, Direction.Both).await } - runOn(first, second, third, fourth) { - awaitAssert(clusterView.unreachableMembers should not be empty) - } enterBarrier("other-data-center-internal-unreachable") runOn(third) { diff --git a/akka-cluster/src/multi-jvm/scala/akka/cluster/MultiDcSplitBrainSpec.scala b/akka-cluster/src/multi-jvm/scala/akka/cluster/MultiDcSplitBrainSpec.scala index 4b0700beb8..d41c0d7608 100644 --- a/akka-cluster/src/multi-jvm/scala/akka/cluster/MultiDcSplitBrainSpec.scala +++ b/akka-cluster/src/multi-jvm/scala/akka/cluster/MultiDcSplitBrainSpec.scala @@ -58,14 +58,6 @@ abstract class MultiDcSplitBrainSpec testConductor.blackhole(dc1Node, dc2Node, Direction.Both).await } } - - runOn(dc1: _*) { - awaitAssert(clusterView.unreachableMembers.map(_.address) should contain allElementsOf (dc2.map(address))) - } - runOn(dc2: _*) { - awaitAssert(clusterView.unreachableMembers.map(_.address) should contain allElementsOf (dc1.map(address))) - } - } def unsplitDataCenters(dc1: Seq[RoleName], dc2: Seq[RoleName]): Unit = { @@ -78,9 +70,6 @@ abstract class MultiDcSplitBrainSpec } } - runOn(dc1 ++ dc2: _*) { - awaitAssert(clusterView.unreachableMembers.map(_.address) should be(empty)) - } } "A cluster with multiple data centers" must { diff --git a/akka-cluster/src/test/scala/akka/cluster/ClusterDomainEventPublisherSpec.scala b/akka-cluster/src/test/scala/akka/cluster/ClusterDomainEventPublisherSpec.scala index 68471ff5b8..e86fe2a1b5 100644 --- a/akka-cluster/src/test/scala/akka/cluster/ClusterDomainEventPublisherSpec.scala +++ b/akka-cluster/src/test/scala/akka/cluster/ClusterDomainEventPublisherSpec.scala @@ -19,7 +19,7 @@ import akka.testkit.ImplicitSender import akka.actor.ActorRef import akka.remote.RARP import akka.testkit.TestProbe -import akka.cluster.ClusterSettings.DefaultDataCenter +import akka.cluster.ClusterSettings.{ DataCenter, DefaultDataCenter } object ClusterDomainEventPublisherSpec { val config = """ @@ -50,27 +50,30 @@ class ClusterDomainEventPublisherSpec extends AkkaSpec(ClusterDomainEventPublish val a51Up = TestMember(Address(protocol, "sys", "a", 2551), Up) val dUp = TestMember(Address(protocol, "sys", "d", 2552), Up, Set("GRP")) - val emptyMembershipState = MembershipState(Gossip.empty, aUp.uniqueAddress, DefaultDataCenter) + private def state(gossip: Gossip, self: UniqueAddress, dc: DataCenter) = + MembershipState(gossip, self, DefaultDataCenter, crossDcConnections = 5) + + val emptyMembershipState = state(Gossip.empty, aUp.uniqueAddress, DefaultDataCenter) val g0 = Gossip(members = SortedSet(aUp)).seen(aUp.uniqueAddress) - val state0 = MembershipState(g0, aUp.uniqueAddress, DefaultDataCenter) + val state0 = state(g0, aUp.uniqueAddress, DefaultDataCenter) val g1 = Gossip(members = SortedSet(aUp, cJoining)).seen(aUp.uniqueAddress).seen(cJoining.uniqueAddress) - val state1 = MembershipState(g1, aUp.uniqueAddress, DefaultDataCenter) + val state1 = state(g1, aUp.uniqueAddress, DefaultDataCenter) val g2 = Gossip(members = SortedSet(aUp, bExiting, cUp)).seen(aUp.uniqueAddress) - val state2 = MembershipState(g2, aUp.uniqueAddress, DefaultDataCenter) + val state2 = state(g2, aUp.uniqueAddress, DefaultDataCenter) val g3 = g2.seen(bExiting.uniqueAddress).seen(cUp.uniqueAddress) - val state3 = MembershipState(g3, aUp.uniqueAddress, DefaultDataCenter) + val state3 = state(g3, aUp.uniqueAddress, DefaultDataCenter) val g4 = Gossip(members = SortedSet(a51Up, aUp, bExiting, cUp)).seen(aUp.uniqueAddress) - val state4 = MembershipState(g4, aUp.uniqueAddress, DefaultDataCenter) + val state4 = state(g4, aUp.uniqueAddress, DefaultDataCenter) val g5 = Gossip(members = SortedSet(a51Up, aUp, bExiting, cUp)).seen(aUp.uniqueAddress).seen(bExiting.uniqueAddress).seen(cUp.uniqueAddress).seen(a51Up.uniqueAddress) - val state5 = MembershipState(g5, aUp.uniqueAddress, DefaultDataCenter) + val state5 = state(g5, aUp.uniqueAddress, DefaultDataCenter) val g6 = Gossip(members = SortedSet(aLeaving, bExiting, cUp)).seen(aUp.uniqueAddress) - val state6 = MembershipState(g6, aUp.uniqueAddress, DefaultDataCenter) + val state6 = state(g6, aUp.uniqueAddress, DefaultDataCenter) val g7 = Gossip(members = SortedSet(aExiting, bExiting, cUp)).seen(aUp.uniqueAddress) - val state7 = MembershipState(g7, aUp.uniqueAddress, DefaultDataCenter) + val state7 = state(g7, aUp.uniqueAddress, DefaultDataCenter) val g8 = Gossip(members = SortedSet(aUp, bExiting, cUp, dUp), overview = GossipOverview(reachability = Reachability.empty.unreachable(aUp.uniqueAddress, dUp.uniqueAddress))).seen(aUp.uniqueAddress) - val state8 = MembershipState(g8, aUp.uniqueAddress, DefaultDataCenter) + val state8 = state(g8, aUp.uniqueAddress, DefaultDataCenter) // created in beforeEach var memberSubscriber: TestProbe = _ @@ -143,11 +146,11 @@ class ClusterDomainEventPublisherSpec extends AkkaSpec(ClusterDomainEventPublish val subscriber = TestProbe() publisher ! Subscribe(subscriber.ref, InitialStateAsSnapshot, Set(classOf[RoleLeaderChanged])) subscriber.expectMsgType[CurrentClusterState] - publisher ! PublishChanges(MembershipState(Gossip(members = SortedSet(cJoining, dUp)), dUp.uniqueAddress, DefaultDataCenter)) + publisher ! PublishChanges(state(Gossip(members = SortedSet(cJoining, dUp)), dUp.uniqueAddress, DefaultDataCenter)) subscriber.expectMsgAllOf( RoleLeaderChanged("GRP", Some(dUp.address)), RoleLeaderChanged(ClusterSettings.DcRolePrefix + ClusterSettings.DefaultDataCenter, Some(dUp.address))) - publisher ! PublishChanges(MembershipState(Gossip(members = SortedSet(cUp, dUp)), dUp.uniqueAddress, DefaultDataCenter)) + publisher ! PublishChanges(state(Gossip(members = SortedSet(cUp, dUp)), dUp.uniqueAddress, DefaultDataCenter)) subscriber.expectMsg(RoleLeaderChanged("GRP", Some(cUp.address))) } diff --git a/akka-cluster/src/test/scala/akka/cluster/ClusterDomainEventSpec.scala b/akka-cluster/src/test/scala/akka/cluster/ClusterDomainEventSpec.scala index 420a39d18a..0ad9e55da9 100644 --- a/akka-cluster/src/test/scala/akka/cluster/ClusterDomainEventSpec.scala +++ b/akka-cluster/src/test/scala/akka/cluster/ClusterDomainEventSpec.scala @@ -7,6 +7,7 @@ package akka.cluster import org.scalatest.WordSpec import org.scalatest.Matchers import akka.actor.Address + import scala.collection.immutable.SortedSet class ClusterDomainEventSpec extends WordSpec with Matchers { @@ -39,7 +40,10 @@ class ClusterDomainEventSpec extends WordSpec with Matchers { ((gossip, Set.empty[UniqueAddress]) /: gossip.members) { case ((gs, as), m) ⇒ (gs.seen(m.uniqueAddress), as + m.uniqueAddress) } private def state(g: Gossip): MembershipState = - MembershipState(g, selfDummyAddress, ClusterSettings.DefaultDataCenter) + state(g, selfDummyAddress) + + private def state(g: Gossip, self: UniqueAddress): MembershipState = + MembershipState(g, self, ClusterSettings.DefaultDataCenter, crossDcConnections = 5) "Domain events" must { @@ -80,8 +84,8 @@ class ClusterDomainEventSpec extends WordSpec with Matchers { // never include self member in unreachable diffUnreachable( - MembershipState(g1, bDown.uniqueAddress, ClusterSettings.DefaultDataCenter), - MembershipState(g2, bDown.uniqueAddress, ClusterSettings.DefaultDataCenter)) should ===(Seq()) + state(g1, bDown.uniqueAddress), + state(g2, bDown.uniqueAddress)) should ===(Seq()) diffSeen(state(g1), state(g2)) should ===(Seq.empty) } @@ -99,13 +103,13 @@ class ClusterDomainEventSpec extends WordSpec with Matchers { diffUnreachable(state(g1), state(g2)) should ===(Seq(UnreachableMember(cUp))) // never include self member in unreachable diffUnreachable( - MembershipState(g1, cUp.uniqueAddress, ClusterSettings.DefaultDataCenter), - MembershipState(g2, cUp.uniqueAddress, ClusterSettings.DefaultDataCenter)) should ===(Seq()) + state(g1, cUp.uniqueAddress), + state(g2, cUp.uniqueAddress)) should ===(Seq()) diffReachable(state(g1), state(g2)) should ===(Seq(ReachableMember(bUp))) // never include self member in reachable diffReachable( - MembershipState(g1, bUp.uniqueAddress, ClusterSettings.DefaultDataCenter), - MembershipState(g2, bUp.uniqueAddress, ClusterSettings.DefaultDataCenter)) should ===(Seq()) + state(g1, bUp.uniqueAddress), + state(g2, bUp.uniqueAddress)) should ===(Seq()) } "be produced for removed members" in { diff --git a/akka-cluster/src/test/scala/akka/cluster/GossipSpec.scala b/akka-cluster/src/test/scala/akka/cluster/GossipSpec.scala index 25e17d91c6..f4c29f2115 100644 --- a/akka-cluster/src/test/scala/akka/cluster/GossipSpec.scala +++ b/akka-cluster/src/test/scala/akka/cluster/GossipSpec.scala @@ -35,7 +35,7 @@ class GossipSpec extends WordSpec with Matchers { val dc2d2 = TestMember(dc2d1.address, status = Down, roles = Set.empty, dataCenter = dc2d1.dataCenter) private def state(g: Gossip, selfMember: Member = a1): MembershipState = - MembershipState(g, selfMember.uniqueAddress, selfMember.dataCenter) + MembershipState(g, selfMember.uniqueAddress, selfMember.dataCenter, crossDcConnections = 5) "A Gossip" must { diff --git a/akka-cluster/src/test/scala/akka/cluster/GossipTargetSelectorSpec.scala b/akka-cluster/src/test/scala/akka/cluster/GossipTargetSelectorSpec.scala new file mode 100644 index 0000000000..a3bfd9747d --- /dev/null +++ b/akka-cluster/src/test/scala/akka/cluster/GossipTargetSelectorSpec.scala @@ -0,0 +1,206 @@ +/** + * Copyright (C) 2009-2017 Lightbend Inc. + */ +package akka.cluster + +import akka.actor.Address +import akka.cluster.ClusterSettings.DataCenter +import akka.cluster.MemberStatus.Up +import org.scalatest.{ Matchers, WordSpec } + +import scala.collection.immutable.SortedSet + +class GossipTargetSelectorSpec extends WordSpec with Matchers { + + val aDc1 = TestMember(Address("akka.tcp", "sys", "a", 2552), Up, Set.empty, dataCenter = "dc1") + val bDc1 = TestMember(Address("akka.tcp", "sys", "b", 2552), Up, Set.empty, dataCenter = "dc1") + val cDc1 = TestMember(Address("akka.tcp", "sys", "c", 2552), Up, Set.empty, dataCenter = "dc1") + + val eDc2 = TestMember(Address("akka.tcp", "sys", "e", 2552), Up, Set.empty, dataCenter = "dc2") + val fDc2 = TestMember(Address("akka.tcp", "sys", "f", 2552), Up, Set.empty, dataCenter = "dc2") + + val gDc3 = TestMember(Address("akka.tcp", "sys", "g", 2552), Up, Set.empty, dataCenter = "dc3") + val hDc3 = TestMember(Address("akka.tcp", "sys", "h", 2552), Up, Set.empty, dataCenter = "dc3") + + val defaultSelector = new GossipTargetSelector( + reduceGossipDifferentViewProbability = 400, + crossDcGossipProbability = 0.2 + ) + + "The gossip target selection" should { + + "select local nodes in a multi dc setting when chance says so" in { + val alwaysLocalSelector = new GossipTargetSelector(400, 0.2) { + override protected def selectDcLocalNodes: Boolean = true + } + + val state = MembershipState(Gossip(SortedSet(aDc1, bDc1, eDc2, fDc2)), aDc1, aDc1.dataCenter, crossDcConnections = 5) + val gossipTo = alwaysLocalSelector.gossipTargets(state) + + // only one other local node + gossipTo should ===(Vector[UniqueAddress](bDc1)) + } + + "select cross dc nodes when chance says so" in { + val alwaysCrossDcSelector = new GossipTargetSelector(400, 0.2) { + override protected def selectDcLocalNodes: Boolean = false + } + + val state = MembershipState(Gossip(SortedSet(aDc1, bDc1, eDc2, fDc2)), aDc1, aDc1.dataCenter, crossDcConnections = 5) + val gossipTo = alwaysCrossDcSelector.gossipTargets(state) + + // only one other local node + gossipTo should (contain(eDc2.uniqueAddress) or contain(fDc2.uniqueAddress)) + } + + "select local nodes that hasn't seen the gossip when chance says so" in { + val alwaysLocalSelector = new GossipTargetSelector(400, 0.2) { + override protected def preferNodesWithDifferentView(state: MembershipState): Boolean = true + } + + val state = MembershipState( + Gossip(SortedSet(aDc1, bDc1, cDc1)).seen(bDc1), + aDc1, + aDc1.dataCenter, + crossDcConnections = 5 + ) + val gossipTo = alwaysLocalSelector.gossipTargets(state) + + // a1 is self, b1 has seen so only option is c1 + gossipTo should ===(Vector[UniqueAddress](cDc1)) + } + + "select among all local nodes regardless if they saw the gossip already when chance says so" in { + val alwaysLocalSelector = new GossipTargetSelector(400, 0.2) { + override protected def preferNodesWithDifferentView(state: MembershipState): Boolean = false + } + + val state = MembershipState( + Gossip(SortedSet(aDc1, bDc1, cDc1)).seen(bDc1), + aDc1, + aDc1.dataCenter, + crossDcConnections = 5 + ) + val gossipTo = alwaysLocalSelector.gossipTargets(state) + + // a1 is self, b1 is the only that has seen + gossipTo should ===(Vector[UniqueAddress](bDc1, cDc1)) + } + + "not choose unreachable nodes" in { + val alwaysLocalSelector = new GossipTargetSelector(400, 0.2) { + override protected def preferNodesWithDifferentView(state: MembershipState): Boolean = false + } + + val state = MembershipState( + Gossip( + members = SortedSet(aDc1, bDc1, cDc1), + overview = GossipOverview( + reachability = Reachability.empty.unreachable(aDc1, bDc1))), + aDc1, + aDc1.dataCenter, + crossDcConnections = 5) + val gossipTo = alwaysLocalSelector.gossipTargets(state) + + // a1 cannot reach b1 so only option is c1 + gossipTo should ===(Vector[UniqueAddress](cDc1)) + } + + "continue with the next dc when doing cross dc and no node where suitable" in { + val selector = new GossipTargetSelector(400, 0.2) { + override protected def selectDcLocalNodes: Boolean = false + override protected def dcsInRandomOrder(dcs: List[DataCenter]): List[DataCenter] = dcs.sorted // sort on name + } + + val state = MembershipState( + Gossip( + members = SortedSet(aDc1, bDc1, eDc2, fDc2, gDc3, hDc3), + overview = GossipOverview( + reachability = Reachability.empty + .unreachable(aDc1, eDc2) + .unreachable(aDc1, fDc2))), + aDc1, + aDc1.dataCenter, + crossDcConnections = 5) + val gossipTo = selector.gossipTargets(state) + gossipTo should ===(Vector[UniqueAddress](gDc3, hDc3)) + } + + "not care about seen/unseen for cross dc" in { + val selector = new GossipTargetSelector(400, 0.2) { + override protected def selectDcLocalNodes: Boolean = false + override protected def dcsInRandomOrder(dcs: List[DataCenter]): List[DataCenter] = dcs.sorted // sort on name + } + + val state = MembershipState( + Gossip( + members = SortedSet(aDc1, bDc1, eDc2, fDc2, gDc3, hDc3) + ).seen(fDc2).seen(hDc3), + aDc1, + aDc1.dataCenter, + crossDcConnections = 5) + val gossipTo = selector.gossipTargets(state) + gossipTo should ===(Vector[UniqueAddress](eDc2, fDc2)) + } + + "limit the numbers of chosen cross dc nodes to the crossDcConnections setting" in { + val selector = new GossipTargetSelector(400, 0.2) { + override protected def selectDcLocalNodes: Boolean = false + override protected def dcsInRandomOrder(dcs: List[DataCenter]): List[DataCenter] = dcs.sorted // sort on name + } + + val state = MembershipState( + Gossip( + members = SortedSet(aDc1, bDc1, eDc2, fDc2, gDc3, hDc3)), + aDc1, + aDc1.dataCenter, + crossDcConnections = 1) + val gossipTo = selector.gossipTargets(state) + gossipTo should ===(Vector[UniqueAddress](eDc2)) + } + + "select N random local nodes when single dc" in { + val state = MembershipState( + Gossip( + members = SortedSet(aDc1, bDc1, cDc1)), + aDc1, + aDc1.dataCenter, + crossDcConnections = 1) // means only a e and g are oldest + + val randomNodes = defaultSelector.randomNodesForFullGossip(state, 3) + + randomNodes.toSet should ===(Set[UniqueAddress](bDc1, cDc1)) + } + + "select N random local nodes when not self among oldest" in { + val state = MembershipState( + Gossip( + members = SortedSet(aDc1, bDc1, cDc1, eDc2, fDc2, gDc3, hDc3)), + bDc1, + bDc1.dataCenter, + crossDcConnections = 1) // means only a, e and g are oldest + + val randomNodes = defaultSelector.randomNodesForFullGossip(state, 3) + + randomNodes.toSet should ===(Set[UniqueAddress](aDc1, cDc1)) + } + + "select N-1 random local nodes plus one cross dc oldest node when self among oldest" in { + val state = MembershipState( + Gossip( + members = SortedSet(aDc1, bDc1, cDc1, eDc2, fDc2)), + aDc1, + aDc1.dataCenter, + crossDcConnections = 1) // means only a and e are oldest + + val randomNodes = defaultSelector.randomNodesForFullGossip(state, 3) + + randomNodes.toSet should ===(Set[UniqueAddress](bDc1, cDc1, eDc2)) + } + + } + + // made the test so much easier to read + import scala.language.implicitConversions + private implicit def memberToUniqueAddress(m: Member): UniqueAddress = m.uniqueAddress +} From 3be504dd00245b3d4c174e0ca49787ea9e61c9c2 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Johan=20Andr=C3=A9n?= Date: Fri, 7 Jul 2017 15:11:58 +0100 Subject: [PATCH 17/34] Unbreak MultiDcSunnyWeatherSpec #23310 --- .../scala/akka/cluster/MultiDcSunnyWeatherSpec.scala | 11 ++--------- 1 file changed, 2 insertions(+), 9 deletions(-) diff --git a/akka-cluster/src/multi-jvm/scala/akka/cluster/MultiDcSunnyWeatherSpec.scala b/akka-cluster/src/multi-jvm/scala/akka/cluster/MultiDcSunnyWeatherSpec.scala index ff4eaf9156..7a0fff7130 100644 --- a/akka-cluster/src/multi-jvm/scala/akka/cluster/MultiDcSunnyWeatherSpec.scala +++ b/akka-cluster/src/multi-jvm/scala/akka/cluster/MultiDcSunnyWeatherSpec.scala @@ -45,17 +45,10 @@ object MultiDcSunnyWeatherMultiJvmSpec extends MultiNodeConfig { remote.log-remote-lifecycle-events = off cluster { - debug.verbose-heartbeat-logging = off - - failure-detector { - monitored-by-nr-of-members = 2 - } - + multi-data-center { - failure-detector { - nr-of-monitoring-members = 2 - } + cross-data-center-connections = 2 } } } From 87d74f1510ec0da8d86b45404bd0e9bbafc58be8 Mon Sep 17 00:00:00 2001 From: Patrik Nordwall Date: Thu, 29 Jun 2017 16:58:19 +0200 Subject: [PATCH 18/34] Docs for multi-DC features --- .../sharding/ClusterShardingSpec.scala | 15 ++ .../cluster/sharding/ClusterShardingTest.java | 10 + .../ClusterSingletonManagerSpec.scala | 16 +- .../ClusterSingletonManagerTest.java | 15 +- .../src/main/paradox/images/cluster-dc.png | Bin 0 -> 106683 bytes akka-docs/src/main/paradox/java/cluster-dc.md | 1 + .../src/main/paradox/java/cluster-team.md | 1 - .../src/main/paradox/java/index-network.md | 1 + .../src/main/paradox/scala/cluster-dc.md | 191 ++++++++++++++++++ .../main/paradox/scala/cluster-sharding.md | 2 +- .../src/main/paradox/scala/cluster-team.md | 15 -- .../src/main/paradox/scala/index-network.md | 1 + .../java/jdocs/cluster/ClusterDocTest.java | 17 ++ .../scala/docs/cluster/ClusterDocSpec.scala | 20 +- 14 files changed, 284 insertions(+), 21 deletions(-) create mode 100644 akka-docs/src/main/paradox/images/cluster-dc.png create mode 120000 akka-docs/src/main/paradox/java/cluster-dc.md delete mode 120000 akka-docs/src/main/paradox/java/cluster-team.md create mode 100644 akka-docs/src/main/paradox/scala/cluster-dc.md delete mode 100644 akka-docs/src/main/paradox/scala/cluster-team.md diff --git a/akka-cluster-sharding/src/multi-jvm/scala/akka/cluster/sharding/ClusterShardingSpec.scala b/akka-cluster-sharding/src/multi-jvm/scala/akka/cluster/sharding/ClusterShardingSpec.scala index be7b312764..60d3c1a12d 100644 --- a/akka-cluster-sharding/src/multi-jvm/scala/akka/cluster/sharding/ClusterShardingSpec.scala +++ b/akka-cluster-sharding/src/multi-jvm/scala/akka/cluster/sharding/ClusterShardingSpec.scala @@ -699,6 +699,21 @@ abstract class ClusterShardingSpec(config: ClusterShardingSpecConfig) extends Mu } + "demonstrate API for DC proxy" in within(50.seconds) { + runOn(sixth) { + // #proxy-dc + val counterProxyDcB: ActorRef = ClusterSharding(system).startProxy( + typeName = "Counter", + role = None, + dataCenter = Some("B"), + extractEntityId = extractEntityId, + extractShardId = extractShardId) + // #proxy-dc + } + enterBarrier("after-dc-proxy") + + } + "Persistent Cluster Shards" must { "recover entities upon restart" in within(50.seconds) { runOn(third, fourth, fifth) { diff --git a/akka-cluster-sharding/src/test/java/akka/cluster/sharding/ClusterShardingTest.java b/akka-cluster-sharding/src/test/java/akka/cluster/sharding/ClusterShardingTest.java index 2206a8fa65..2c480da21e 100644 --- a/akka-cluster-sharding/src/test/java/akka/cluster/sharding/ClusterShardingTest.java +++ b/akka-cluster-sharding/src/test/java/akka/cluster/sharding/ClusterShardingTest.java @@ -6,6 +6,7 @@ package akka.cluster.sharding; import static java.util.concurrent.TimeUnit.SECONDS; +import java.util.Optional; import scala.concurrent.duration.Duration; import akka.actor.AbstractActor; @@ -91,6 +92,15 @@ public class ClusterShardingTest { ClusterSharding.get(system).start("SupervisedCounter", Props.create(CounterSupervisor.class), settings, messageExtractor); //#counter-supervisor-start + + //#proxy-dc + ActorRef counterProxyDcB = + ClusterSharding.get(system).startProxy( + "Counter", + Optional.empty(), + Optional.of("B"), // data center name + messageExtractor); + //#proxy-dc } public void demonstrateUsage2() { diff --git a/akka-cluster-tools/src/multi-jvm/scala/akka/cluster/singleton/ClusterSingletonManagerSpec.scala b/akka-cluster-tools/src/multi-jvm/scala/akka/cluster/singleton/ClusterSingletonManagerSpec.scala index fe25daae83..efe3cbda1f 100644 --- a/akka-cluster-tools/src/multi-jvm/scala/akka/cluster/singleton/ClusterSingletonManagerSpec.scala +++ b/akka-cluster-tools/src/multi-jvm/scala/akka/cluster/singleton/ClusterSingletonManagerSpec.scala @@ -228,12 +228,26 @@ class ClusterSingletonManagerSpec extends MultiNodeSpec(ClusterSingletonManagerS def createSingletonProxy(): ActorRef = { //#create-singleton-proxy - system.actorOf( + val proxy = system.actorOf( ClusterSingletonProxy.props( singletonManagerPath = "/user/consumer", settings = ClusterSingletonProxySettings(system).withRole("worker")), name = "consumerProxy") //#create-singleton-proxy + proxy + } + + def createSingletonProxyDc(): ActorRef = { + //#create-singleton-proxy-dc + val proxyDcB = system.actorOf( + ClusterSingletonProxy.props( + singletonManagerPath = "/user/consumer", + settings = ClusterSingletonProxySettings(system) + .withRole("worker") + .withDataCenter("B")), + name = "consumerProxyDcB") + //#create-singleton-proxy-dc + proxyDcB } def verifyProxyMsg(oldest: RoleName, proxyNode: RoleName, msg: Int): Unit = { diff --git a/akka-cluster-tools/src/test/java/akka/cluster/singleton/ClusterSingletonManagerTest.java b/akka-cluster-tools/src/test/java/akka/cluster/singleton/ClusterSingletonManagerTest.java index 846460e27b..c848f27d47 100644 --- a/akka-cluster-tools/src/test/java/akka/cluster/singleton/ClusterSingletonManagerTest.java +++ b/akka-cluster-tools/src/test/java/akka/cluster/singleton/ClusterSingletonManagerTest.java @@ -5,6 +5,10 @@ package akka.cluster.singleton; import akka.actor.ActorSystem; + +import java.util.HashMap; +import java.util.Map; + import akka.actor.ActorRef; import akka.actor.Props; @@ -32,8 +36,17 @@ public class ClusterSingletonManagerTest { ClusterSingletonProxySettings proxySettings = ClusterSingletonProxySettings.create(system).withRole("worker"); - system.actorOf(ClusterSingletonProxy.props("/user/consumer", proxySettings), + ActorRef proxy = + system.actorOf(ClusterSingletonProxy.props("/user/consumer", proxySettings), "consumerProxy"); //#create-singleton-proxy + + //#create-singleton-proxy-dc + ActorRef proxyDcB = + system.actorOf(ClusterSingletonProxy.props("/user/consumer", + ClusterSingletonProxySettings.create(system) + .withRole("worker") + .withDataCenter("B")), "consumerProxyDcB"); + //#create-singleton-proxy-dc } } diff --git a/akka-docs/src/main/paradox/images/cluster-dc.png b/akka-docs/src/main/paradox/images/cluster-dc.png new file mode 100644 index 0000000000000000000000000000000000000000..40319e45793f8c7990af5c64cbd1226f92f3228b GIT binary patch literal 106683 zcmeAS@N?(olHy`uVBq!ia0y~yV9H=%VD#r;V_;zTxy~qxfq`pHrn7T^r?ay{Kv8~L zW=<*tgGcAoY3w1wH>HlhKeI$U%86x3%e{$lORrz37M?mGz==~)AS9CV>V{IeKIGd_*xizCvLw??~Ba!cB zJ=+nv&(>JoKkj~$HSUos&12#nxe8W z+`!7{g6V31&+mf9_UG&q`YP|t{AoN#ftv->{#j`s|0#_X(DM4=Oi2vryMGN$T5N`S4%yoS8z8jO~wy&Jpiw zI%=xUVSD8LjA@qf((ShY*Q{9*u>Pl%($TL4kMG_&T3I)vVxPik*EgCTymQvuUk#pf z{tTbs{n@Q~8!Y|Os@c!|X6d@lzJsy+LHEBI3OqBIiW8dR{xY2CinrXmyfJ%M!qf@V zXG}R-;^yybGx_Lq2}uLrUf(qgGwhl-&;N0E@664th8&+Q8DHf$zl&ayW+%R!!NGc$ z{*%tnmc3RCD$0x*r=NDT+}vz=<_jyghl9|CMw5|0SwCnAd&A4*pwYo} znt@%V(L{ko`+(F2Mv(@k4a{%YL=)^+Fn)c&VZnCmpk@JI4Ab=kyantwEX@fldsrSn zV4l&Uz`-A=D5b)`(qYyFyA-ZSXTAy6PqFZ74=nr;WFdI2q4+^fg>(#C_~G`4 zWn=YO)C#vNxo$uZm`|Jc}C%xavjmt9aoRvN{YPUk)u?` zx{V>aYwgi(N%~3e6Wlk*7g)$JFYn!asPf^Wf|EP0?$F(_cZXPv_&Ry>p4kT;fBaNY zyT{g!;ry}mhuj~Y{y6+&^N-m-1plZr&0OGQpjsiYh_l8?ZsC**9v03!JS+TWsCWsQ zc5XYupfr6)*N(Xr9XCAp_{k{xO1{70MMdSB>))QZ-ebt2*8Dna4CQx#@fKetLCjELDA~b#z*kdZ=b; zP?kfM(=4@IC0T{JYI|LG1^(*#vVteh&HGqvoAzY=VCCYtv>9y|u4Jw=;mcKiEAUn+ zx211!_T|VgDOG}hIrjG3>1iux>q+bF4%!ycu@r z{D}UIJqs!io@JGPDt5~9%qGq`+@I<_tkT2vJFlr?zT%QHTy(+u?uj)#hd?hSEW#UA=QME&ZPtB0<9x)OCo zZuR6VYp)7l@xHSA>T%N$lRqZcGE6dUE~sQhWvgYryZGkfpA}2jiZ0yhxjHsGB-(HG z(z%Q8F8S-xzUuLc$qR$m7DtOO&t5xw(eCi?ZE|bU7fxRizOp=4{@V5{&tJa3`riD2 z)D4LbysLQGcr@G2wcTv^=Hx6YsTSN7MG6)tr5P+q7+w+m7e_-(tMQdt>$%>DV=U zE$TilJbL(B*0-GB&EJ~&eA*-0nfaRgR`ul^&pN)`@0DN7@~Gv{&+R&Q@?87e_}vn_ zIbx$??RMSTwXXDSk@Fp!yIgmF-ATQ3_>T6S`4uZFRjN!Xd#a-<)_r^P=}^_rinm{+ z5;QkjtYN#hWBb(aQSZ6!MCJ?3=bFEKU)4UR`+oK}&fm_TeqXuXw|-xJH3LTi>l>~L znH91#1b=u7=xG?NFnSTW!Slnc2lE7?PJEhhYy$s6y@j`(<=bAibRODuSoLu7A?ZW* zZaZ8>+zx&?Y(vSx|pienTIxA zNnJkOTQ6PTUaq^hwpvZz(lBM$l$BRHu1qs=ipsg=xsS)bwO%pS zvNpA9ze9Y0g2b5(XK&0k)cYcMEzN1Bx3Tr67a5x`YhM!ls~~l%?8Le=|F$UA9Q`=; zlJ-yS>hPCqZ|!+i+2?-Ff3@Li%hkKDuUL0;Lt4h#&$pUiFDNa1z3ckw_2=X5C%v8- zy}xmbV(Q`bmB%WNR=ziP%XxDq{M7lg^8XHgZePaK$&`5TfZA@~-}OiSocO&lfAh^} zpU&Rr-q{-7-P7&Wy)W-!&dyy|Q?hSfTbavw``5PUy!+dkw>R&9{Lb(A+^T5~Jr zYoUJm`?Ixvzh`~deye}$|5Jwd2lh74W52y>>niW;0@ItO=lNRYU;RA!Yk6B#>#aq% z@=O2hI{53sFK*s2Uf$lW-rKVus0CiW@o>gr^S&LjFEh+?>~0*&(6r*}>+0M0!}8nn z-N$SCO69lP-K%l>>=LTIcIyV-tmvy|_vT%(yCHb>V*RDxDdo#!(_ibm?Y}ZN$TqYx z>f4uZ#!HNUrY}49Y|h*lLM3~xrdQu+1F|CRrG-*$Jexqz`jMSpevm%3Mf&G|RU``DeSZK)0U5%H?B-Uu zPoJ!AvS0FlMb+BRzmClJtN+&L-q*iJaNo?T-B0KK*}d!gv-i8znbhaic>Ff_x$(o~ zKg)lezc#;mpG!@p-Ofka|FUnI-_4(UzjmMfEU($Kgf_IknHk35$n59A@Id#Ldcb0a z1$P-5R19w`pXZqM_H{bPAN6@}7pJFiI{1n+EZoWPV^OlDRn|hqmn;qnQVabU%#4MP z+fSL9kY@GTHP1bI@}-q4%QNyjuCqS4ec;NKl;2mbY>(Pz*#a19;eI*63l9Fs&r3l{u1?T*tR0UH#6FmbZJ1zwU z1)HLjG^-#NH>mcalr&qVjFOT9D}DX)@^Za$W4-*MbbUihOG|wNBYh(y-J+B<-Qvo; zlEez#ykcdDAuw}XQj3#|G7CyF^Yauy<|ZcPmzLNnDS<3ffB}d*Q!6qNHsuvVy_KAw zs}GXVH`FuGhno#D9wcfNkXezM6XBAXo0?agnV)B8W@u&uR*xZru+avgGZLZG*w_?V zCz1?Qr;R?y14tf%gcevd$i>Z$%SIm@uAnfsoxUH?#&1Z5fN}% z*o(`mg8Iq8k&_jmr@ZM~);P^|U!oWE!0&b*xV?9cf*)!**t?cK_!U0SP8>Z+7sxux$gg&~jm20c0q=sXVTzCFHG zy;|1SGcq)EYG_EvkD$oNOpyYw8&l@2ymw1%%@OaDkD-o_*RmEqvRRu%&Cab(xB8 z%krJNlY9QQs{J6Ti9Xt5mN8Ru=C-o4QLFcFo0qy|Y4qb?Gq0_w6MjAC;=L-< zmwDFPZd}{6d6TZT_UTQVH?J*xe9SH~*{8$}7G51h+vbQ_ zUR&hKoqcP|%G9*9Uo8RI6X!*^!$UEri#4ffo8~p|!pKRYLd#rRcb5GkT}w8t=8}z)PN)1t(U?zDoD` z5n5e)^`unIiIz<-Qs-V$oK+FY&Z+Qs9k11m_ttXRvA0ULU0WL+{rTD1zmcXT3gU>w zx)>t7vclyC;vQ=etifyT3HQ&T%~XDCH5WuP2{p=Oo34Q++0uTnyuUdE%#W z(z!AfI~9uyQ(JcC99OqF(X_nH-@!;+t@_Ku;Gn5Kn> zz?pHqakxtLhvxLSOYdjw>3L!yy(~qbk|DAr+wQ@OVl(}-s-+Juu2s?9zv{xjX}>o; zefm5yF>#@&xVZZJ_0DF9Jbz$Wl!98u?5u+#Q`HYv{yc4-Tl8@H$s?^UZk-eS7pbo-E9c1Ah?FdhPJUgaY9gfe;fmD#{`V-Q(~FQF#1l6zfR8P`BV=P-qWJGfT>^OEc{_FGYVosM)9_xR6(W>#Z9u zo;;oWos%o3>{!un$ECmDRn9Ww0Yi#pyPb(@@s@+kt0p$bdkWd86kPJ|m3tMS?kS;A zS&^~&fVZ^eT2`5&2MvF=w$|x9TfVc}3CZFcEZj$oGiE332@zG*JgKkk5ht{Se?{0K z=Sz#8xN=U*n6osfsX1oGpBHYmPbOZL)IH`PS9j8bz2aO~ri`l3m(n)AISiF4p)J#G zN=mY9*1CF5d+}iEiG|L&KQvsky=!FG`6nN>g4bHefjnvQ)2G@slU$mdCOIoP7dg$I zWtUsQn57z}qJ6=mZ3=_q^7Sm7a;nxJF15RTQk$!i(xWf6_1uz!%67-Z<9}Dias(oJF95u#?Rv^ke9>9WE>VHl$^bm@TeR}L! zrvEc%#hr`V!~=qk&S98jILTf${e!2Nl)$-uv(L;6KEL9h$H6*<=h87hr1Iv~Eha<9 z)!E&7Pk&8u2o1aXM{U_8S=C?_GZkGGORox#xRU$R&)Fn$RZI|G;O!}5b)l8-qk{Kr zC#5ASeVa2yum18lGeMN!NSgUr3Qw{x&Nql~p%?>1??6R93~LK}bkd{lS8Z-#xubom>yG$kp9q=;UMItDB@L zr10ax!UfDytP^@(JeacLVwjR^5o`amMTHlgr5oA}P4x?2i1X)i{bKx*skbohtgppc zYr`(_UU^t$f03y)t#QVJt1~r&eI}lsIz{7S75CiCpi4`R^n_04(aa1|YSuZ)Q=#hZ zwW%d|k_!{7Uv0^yXPZYi-CbuR{ z_9^mR{aDrSrrtG}Wu@sLtLEwkdkK5;oa8teS;3~|li(3Fg=b>T#rEk(I#@c&I4)_< znIu!en)s+CPc{FHYjUu5;G$lhJqh|TD+3OdB(6)@ou@_B%$huZ%Wr|$i4lw4f!%PXJNRfl*O+r4>Vo`+Aq17_HI(D-;3#SzmSvR zDNr)h_FgtaPE}n+!E4!+B@t&CY6^u^@L9YjDWCmX;^YE*t?#%yFJvTPI2Wc;Wnfs`R1BDxTt1Q*IpKp3w4g zjp#|y^ivYvtaWD{(Pv-ZmsTzbfk zlKN6WNn)a6urF^X*U72pLMAR@@M?N-n0dmI3(H@u5&dBxDP;faK=A7~lfSfB+)_z- z7-{lxv2?&97P+~XynFrnS1&ub-ZdQXyfca$}%lqrDwc0gE$rSTke~pr__;dt> zw^$h@X{}P2sTlZBN3gS_!NskU)04}U!M;;stB}N8*~xP%O3i%!{IP0y@_-S)x3 z!0vVx&d94OUSC)rXnL)=(8~AlB6Gvy4B1P}@)dG59LL?+W+})7??7&}tpb%*E5&>Q zD^-nD6ufd)FsjCGIe5_d;!PDHDIujFD;NvD96dbK@T8Z1@}w1o|E2z@bUrX)|FVF& z?gc~1tzBOv9!*gG*yL{Xp=q*TR)N-sK$YOQxOYEQ&(DdNEM)&g8Bs36o7sZ3Do!f4 zUK3VK-J;Q{xKgk|_{Ey(zg9hLbZctXbNJ=?c){d3nj4+=A9H1k>|ML_fcGYsM=G^m zTnjtzzb>mS@erGnBEdb2Vb-rGb+d~#7Mqt9stCW%Xv}Clo8M@fce`)?^nBrcaaqCQ zI-7s8YOlS*I$_hPt6`B_Z-rU6X^HFKIDG5%t<%5U`D`rSR%dl3wf#7FaP{848aKDL z>$;U|XIQxOIJ8w9dC$I>ztUlslT0y-f99N-neXB~y^H^UHqESmw`dv@g``Gv_*?a26t^HY><2AdKk1z_U z7j$uUiCz);X4yIQo8>02Jxlr}meq4ea$6o1I%@f$YD0%vgWg9^dn?njrI)!KlyBjBr`38V8+ukH&5aUU@o~bH zsPLGxYEcah3mF#wXA$M$)Dijhp>SvaHm0t&FomE?Z9cYM0&6ma8z!{t$zFM|MBJ`P zm$i=7^+#Cf|Kq14?T;nxo28svqQKvGWI|cQ0_J(|zO>h_b_23Bqyc-81E@(1k9C0{mb#a2Ld2I#5udeb3Gr6lW&&Te%q*tHUGj;0J--S!; zudPgNoUvf(wU28yU)A`b(IB96V4?}<@dPe4E|0_sE&RKfBy1jqFK}V6V30Ww!4<|; zwJ`4Gg*iw1<&Fh4dCX^ut>P@w`oMH1*y5Iy^sFZb5BkS5^lb!Zohy4;Z%qC#VSaqR zWw4H-oQ9f1cuazR(1Ac<=3egK0+CLna_2XV%jd&pHvT~wxTyBb{)Oqb9 z`AeSib3Z7%TR&mdmpjWTQ^VrMwB?ybWXWRh*9~%@cEf^qx&d3OdTq_Bw%u>eNPj=^ zzy|ZFJ6fve?mc05t?v52t8!QGwYuki+j#4W)-KcI5kR`f#~SzVlZ0x}9qrrZ<=@d7uCGRjvM) zM$S237GFMXEt6b0t=}@ZIX#Z4u0S@yr^3bk_<_9Y+xg{o#~xpBo_Ozropou}M=H&I+@j=at)AZXpMRz?=6;xiPDcCgsi2H)V zE9!g;_$7a6yja-s(WN5pB%{LOXtO2W*-dQwe_i<^TJL0&%yB?`TIkIelH8NFmv)G~ zzOuogSAE&Bsz+L{PB5{k&Gq7Mp5DY}+Q=FZuvo3(fd7G@2~GV8O?IrxPdJJmHQzhI zYR|NcN$8VG$gio4DKgQYY+b%wbia7xfkgbSrQSCGRCoBP?|SIWCslB1-SMdNVYZVV zdtVd2%f6=}WkS0;xUYFPs=@Z>%{@zBzp+$Rzp%x8tGX-m`W?<|_kG*-W6S@S+5Ezn zGlVC%B!?amI?8X`(IWHFr%L4b3e7X!DSVM73#H?ZN_}YfH?zf0de^hFbB^-!3GnVI z=_@bO($)Q1dtSso=ol!i&2;|xSNzji^ZOBwjt8q)MI{up6j#>XlApyS?8srqs{Z6) z;GqYL8notKxuD5q#Uj_mzszr5I|IXu57Vbk{W)tJ*PpE5Zt*=r&Lsx&FE8IQl1jS~ z`PXmjUaR*HUzomj<+r(V>EN#eniZG&Ktq24+r<*@`$Yyk_&)cWv}vpDiod?R4cGsL z3pMjS3_UFUF8fL43XhXwCNKGtXFd|H%2+IIm-R@5weFYKmJ6DDf2H$Pehg^7ZogE@ z|24y)&1o{y*3|r1>|o~fDnjAo+38lVJGFoEaZi}C_s65|Z+||Yzy0Lo z%pyTrZ(l339Ppg-&#k1Y&R3$%5_?>E;}k}#*|)JYqQEVeU{H8 zPMc|~@3n%S-61dk`Q3i&((e5yoM-(c%O5X<=a{-*Yu5Yux2jiSBB)noe}d`F z+NY_e-zQ|<&+-q-w$J7ddVOi(H}#J!b{SI)ZZ^7~vE$*M%o07EFDj zVi`pOmY1((X_<9%NjIHc5|w*#`xjF&F|k_*8kuv?&9S^c>($5O^8a<`2za@@5LVg2 zvml|3H==%Nwc(p>mUq|2x7&pUY&(

x19wh|tXTyy^yfz6IvE*iRK%WHdy zyw6W> zIU;NJ(*#&JEnIs_`s8#ncY4A-pFMizt5QR zQh36C?q_G_K0khUS@iYyNxfe{MQRrJhT~ytSMHp{Icu-?Czf8b>Iv_q-e!JN-st&T zMDp97efUkG<(oAb!hw1MxXP;Ujp{%>|x9h1+rJKb3@jjy*`?&THxZ5JL)Uh_=m zT>rKto|4{1y$@2$FFnn>U^-7;r*26L>%Ye=yw4AC7VzXvS@K{*QB=ml$vJD!EjsIQ zmMzgZ=tjpMbJvBuex0YLz2UU9wET9j`u)uYM&_J%cXpm!nYK&*neCemhbR5L#oH%; zT;{fDc3%33nS60T~(v`pey55)J&V99g2`ZK8?+UwOKYruo+%tQ5 z(~>5sJ$8(XA3ozT_+Rx^$Z?LTax?q3dm&PhqaA0t0<%ROQF zkad64`G?N!6%&^FCY}$@5dO`W(%F8Zi+yGvLAF^(wYc-?;41 z-{X(>9k_GAT%XGELxLAsb7YPoCCa zuLMu_y!GH{9%<=;N7 z-5h=E-rV0ec^Bt>J}zz7XMOyQ{N?!XoKdTLWKQdAZ#ZbnezQ9`{`0;g5-*bYCQtX- z%-OAOee%_55j_RD+rJI0KTq}EEB8K_&;9SOGZX&>WjSAeK6Szi2l3-vv1=4_K7S5& zh>A+GHqd8~Y_M~!(~15eU%viXD>GGT=ljs3ohK+l9{`FZqX!JgM|7M z;31BG&fFW^W2K&c*H+s1zTi#%Er#mIk8OKbEwWwrcXe&I%%3;4-;8S}CHgxrW|$-> zD`RwNrNn`9DT}kMhA$Soe{NClF~6iQcWluD`3W{(0@goMlc_nAocndo-hw|Kn%p@i zj)xBxFie^|>5@=&;i=W%?yRZ5^(oZ#9^=z1&;EFwYc73KapLg|20zzFEPZC1EF@~P zxcp8<+E#=h=LA0#Dw-sobNp9|e^!oXp+|k?r7W&G3SUVM7I z<6F;C5Bf_uiNi^{Lg#&{^t1j z-&Ys1TgGQTtWo*M!1RLyR2$vTWt7jp^Y7E|OB;)iMWxT$V&19TW%=PUyNLdTtL42( z(ov)eCNPo<`@ZM)AEWdX)?9x zm!`$+Nc*vRm5{nYhP#I@cTIXzTeEK5wj=BQSM2bOowp-Km3^Y{sf8(0zbxd>xk?{j zG%fhl$(M`0|D5Q~U;8*dbg$x*8N1G?)fPoP*}^%2|MvsUL&4cK=U>>}nB&D0DXdV} z8@pxKjW|7-%=zNJTkn_KT-y~=@-4gNT%@V0VO$0GuN^NIb-(FUpSL0La9ia4eYJ1@ ze!p*iZeH%2PimK+&rOl3?ql9I;qHc|eY>~sd>y{)_c?|f$B((w4Lr9w?q&N|v{Xnj z=V_&kewB83c$9K^q;TyW<}k0K@LHLYHy5He-ukN^Wk2zWx##&$Rvy20oO^Y=^wYXV z;~VjIyN<^yMW$NL)cteQ`o)VyJ`px2#l&>K?Q5T(rT0NauUM@q{m1#nx)j;C5{(O& z*^Dj;e_!;Y@3(|?UX%Wp=xt{L@U);_6#_}nL= zn~$82*kalyULqgGY@%AwrF`YWN>j02uG4jPWf(6ncqgTNZDnF3uUdvojf`D+<-FKm z2TQjtNZDAI;SMC|g#_n1g%-78`+p)N@ z->&9EK=bEX^Z0!+j3vw`9K{c?XT-g@6tVh_r0u!2jm`Tme0p>6PWs!PRgd@76^6IoDr5TjU1e{xrpHySoF6t_FZRygz{fq_Tcnw4g5^14g1DTGiJKy~?dGR20vdpJ_n-`r*vVMPdwQcXt&3b*} ztaWFZXZ>N2sbcw{)T^7+a+LR)^zR~)%(7mzqTrefXm$8+5_KR>g1p6#yPkEhI>(%YNSIDazh zjr@jhTfV7Qy!F5JKy-S^{2AY@KHb`UqTmw40@-(!zg{rjxP74`_7ZcfVY8W`kI{)h z3HF#3;Z_SW??p{NcIl6*x9`PUf0nGWzrgF8d~uy@x6{8H_jc(9OkT?J`oV+898X1` zmfdyV{_DD0!{U}cjrP{*LRNBn7`Dpr%Dr5;*G|1fT<+cbiF`k{a9-f#XSI=Lkdk&w z&TMup_vxr`XyfhT*)#8*hL^;P!^{gNr!?zVe)zC>dRblQ!oRxTQ%|2hZ#-|3j-KAP zJ(Iq2)g9u0ws7Z{mzULN&dKz;KXuN*;~x*Q-D$tJz$zh;`KFY0f_s+3y%xLD~r!$ym*%xh*?BBfd zLaKYgBjNsQucwC8m%P4pGjHF={kgy6Zr}Q>zc)u%&2Q6Xf7_dE@Mm=9V%|+V#KsscGro$ks)Rrkp&}I>zTd zVyU@s)lJ-@tHI1@)y(c6qS;Qf`V)o9On)gaS;v}Xv-Fyj%==~HGu zx=`ao!|}!^E>|8MJ$R_MxjBeIHQ_~L{MP*4cAbLfJmhT|Y#NR&I@=~R&+gYt<;Qck z@9Ne2@PT=YW)!nU%EV+JE~6gSfX#JJCPr>3>+Fp(I;NiC`mTp_`NO|==AN4`G=n+) zte^egFTS(b96ilW7kG*vXTE!*@&5G1-@dKg*4sC=rX~8&hV@Lh=FYdv5dR{dv^VOT zhaK-#)!ogp8{b!!y`SM;<8-`b{;x-@)+>(d-#xFVS|>c&uree0#)8$k2fJMRlnd`X zsIB;O*{tOI{mA(5222M&zP?!`y}M?<{Vncyoq>9VO8hGQi>;T*B>%ebBzoSvJ7@mB zpLIl0-OBWJV0`xDc_##B?ffnDzvS%GZ?6xs?_O?kf1C7w8yidZkEuI1Prt?fpwI4; zf%V59i(R+nR)5>@di~y0hr)}WJlEgabSTwx-uc*1<;NnQ-`=x6d(w{U_kK*TeRl4K zYloNzEBA)uH>P*#9+Bp%co?xfsqdSG#nH-JZz8{0{jX5;pS|e4S{B3J2cnN(US8JV zZolio!l>U8R`(gZ$}c>0XIGswJ7bL_1K$L@6ow}uN1W$da?8K^==4!JRcFJy{YqC) ze>xWynOHynX-9dNnl>_Qig>%n8-5s?#?d%_B zW)-~)Thz{fQLfvdH_7;J@itB8>fe^LUs@EU@%(Mbd18Go?~!O+S(9G$UhD79-sPRw z;_GX>>sK_mbex`7FIn2Z*YLIM`mX-SpT&PB&oq}W>79G;xl44=oP62Y+qNIKufMkc z?f%}$i84*ks*CNn9eerx*4y@b3EHQ^e|gV~kyAZcUG(U4UCvp_yR(+}@&DtAJzm>e zy{105x#fSsp4v}Oud#cu$uh~waQr^BIqlq>uWvT?>c$5?soXcq^7Jyl-k62=c)?K(DBEEYA`xgLm={c-5O(UN{&4*jZ&j&q+Ko6_veZ@%_gi(Lid%SB>w z7B5}|%-?mxD>v+;O?K#=Q~R4(c3orn-_LudQ)MDM)4H9{X8B6RyY^^FNnmA+T?xfmqBd+|_x z{*R{(&hu;z-b&lPEv&aV=U&&(u(;6ABI3sir$< zoLbj8r^v+rcq)Fva9+U!%|A?x3}=74Y?Y~9%P;w{z+Z05*MpDlsJ?si%4q%s+wXtu z@4RIaowqi7y5F|Pe7D#7KdFAWqrP`@iDJ)==VtPITC*?jn(BXJmRatjz%{?mRlEQ2 zUbp<;H+Zesf+zFD z3u_L`iw_yEESJ6Xa`F7q&hV<4$vfgIl$Z3eUYot(p@#71Wh!&$@0;V?*T;E)`N9nr z3trQ6EWJuLxpO>i9!#8kI4ikfdR)ot(=*C`%;&Yqk^6EnEsi1ivf9k%WtVTgVCDBq zes$9KiG;k|S=qY}7(Ohzd_Cg9^Dw;)c2BKUk8?ifd*9EwCih5(V5yNxnS#Max0@BU zT^qT-9f_0ims_Q9a#C*!|JARru5bRSXXvc|OR`<*f09m$rFCe)zB63f)2eS9^O-Qz z{d!m!|6ty!FGYT0RtNSRP+b1_KnP+EbukGJ^u3$~%fyf;nd}NCE|D7g%{ik}9`xkHT)00k0RzG^Ay{>S2 zO#jBmeU^t-KU#A;=X*DY`uu`5A-|7UoxQ2%wB^9FO%31l|9{DvZzT46x?T0Oy-WY{ z`BqPrvZ;Af`}TDJ|Bngrh76WRIexr6yu3pBZWsTmiRm(o62S591XGq25>Gn2`_RyFp;lzRpL9xY@$|IB^;vE-by z&c9OGpYi*BUNC?EwhR4-WoBIN=k2MySNU9W|HjDa4RLQ4e{C~MlDX;d&*4+g-5pDK z`j}40Us(E>L9UW%!;33c+fICPes*K-NjKHx47miJ?q7X z4*s~W51B7;^F`ijs5r2ezpnmN=ew)YoyGw-KJ;#PyX89l@ATNZuk(9O&$Sobl7Ii) z{%_8Y(U;rhQc{~(e7H|rTJDweSCdel&{pi$Vqs#EjC zMK+UV#}>M=)ryp4);wzIPxlk6eE9I9&4J6)rvAeQ7k)f2G0VNyFmHnS9Om_(3V7n;-}1*zEc~hZ{P)lD z`^#p>emHKtw*0+p&-I)qJEi}z8tOJkr%HNZ(uPI$0|7G*JxmIe=i%y-~zj6OP zW1E}D-Q4AWX5U^GJzeG9+3okYF2DOXuW8Spn%!?!+24GkdAViNPj0cOcVb(vJvow; z+`Wb2w?zEn4gWQ7-QRL^_8a$a`mNg|cPesuJ_C*t$de+k+>J3l`^f6vFZ{mnk#79;!kORXmXx^cmzAYC z7L(nh_v}&+C{8}}VTaQB!gfE$?InT6>?tcAiuTz}FEVG_{ch9bx%KY?c7L8HcXe*O>DF?D+4;hAk%#PdWr^~&@+%dWH&z-;>f(2SK($5#H z)lajOUVnCi@+lXK{yE}yT{5$*?kIdXb#ur64*kM8Q5E~{{FTT%9PoLwb&OxKok;CT zul8f9ehj;|crHAs9+}}_VSnokf5W+(LbZzR(#+O>Lk@6fEcczAwl-?(s?VA&WmgKX zemr0%Q-70vSB@92>}{qkjhi+Z{kJo%v+~;WLO|Z?K`PI~jP~^#C!BZWf9@_l_vA*) zxU0t_YR)vK@Y)=xcyUnu&yIHykBdJkpJlAP^?~`->a#NDY`@?A<&J(h?}>_K<%^5z zZ`UX;Uwimm|BA_5IPMkhH#%T_<(<^O^zSJJkDk?C-1cM^JJ+FhE~_2hE5qXpKUxL< zyX(3Covf{VC^W%B+-TZrLH%-H1U)`*@k@TmfbKCuW)spKCIzN2g z$WYyDyMD$2yU;gR|Ao{f%4^Td@K;*@_uM`=F-tC!zn_;yG^So$Xn6DUizdI>PO}$% zG-dnDz|L=41X;h3y6%j7?Z;L8vXLnv%I$g;3|Seo+TD-!PuYB6yO4=xLTczEPm8aS zl`lk!v@CT`cFTB6vr5kK@N<=~eZWw1V&&0hom?(H&E-#Wq|P5y^iFD9Q!(MDgn`=U zD;1AekI&i4W@(+wwS4ENr@R-Y`#xAdKkRe-)GvAGW*N#?y-<`kO6i#Y_?gGK8TQ#M zhaMlkmFj%!My!Qw_^%Te)Hx3J+geUO^s&iJ@B*9uY%e*RUzU6)y!nd~g<54A#o{Ja zEZFy1v`^dU1#<@TrPrladsTY=I5TKSUfIZg+@6`?zrcd-j|~5f!~Fc}9!`}0@gUQC z(w3XW&(xYXN8H{eAHu(Ib@k1+2mXHdRZCadw*7ADe%bOb54Zg-+v{{~?Yg_Qv)8>o zGjn;Q{0{w(!VyX`MHdwQZ;pIby#L-~|G#(lTe)!F+Iszd&h6aWm+PME?Pc=j=F%}Y zKmWgod7q%7sgA(0(r-JGOn*<9p}1gu?YbMfQom(qecPYJTXIzH#hdJJdq1oa+OF-0;NMSgQUT>U)dCw%shOaB_*SFD*&n~OP0zixW#i)hb(K3`K9iEUZLsc| zcl@?gpNxGW^8PPw$W#X&?*5xeqAgUGP&02cea;BXHNg~!X3Z1)^N?0Zl3IKbMAGjoOHA7_WS2FnwENV z_fEgEZe^VG4}Uw^=kd)4tFHy|vc1`V-pc#e)4Jag_wSaO#=X;iz`Adbx8H&H^Xz^d zx%}+}b9waUSIJe!@78a%dpEa$xqj#R^vBOP8zh#MJu;6`m42;zo#~DBTj!*nTW=J9 zFk%{i5d=ZhX#I!SHgr&7WU&LUZeXSy(cx|F&*l@BzieJ6d@jGv2weKBDcP zzytomhgBlGFYxyNc)|ST>b05)ofBq>-AHJ9c29X$iO0X_1Db^#G838>pOAd&Gs9yx zL);wm5)I4w1s{~Um79a(t8RGw+SJ~EXhu=SkE?C`QprutYLT*5A6_)D&3|_O2GiDs zhWn09nUnH@IdpaXp0gj{Y(B3R@3ed8WaBrBw<#CpDuZS zz}q!r(%il0Hl6$NH~;#}V#%G}&;3q%FOWI>XQ_AZ5}yy#syiOOe^*=LZ~rmj(^MsY z``=Ap-)>(0cEjImwP``wK3lEsbg%LKJ@4S@wj} zb-8q%k`3>d8b4_l-}S7euJOr>_9_#*tw~(3C&WEmDlX?xytvoy2g{d>f{LcP0gpqs zUQ<_Bv^@N?KuN~!Tx0Tu{5bknvLWUHphOt5R|dcNck!+2lSGIVbz)S<7U9yPNm(%RU@@y>CHDanH)f-FZ3Q z^O+L(Wu>qAH>&+UsB9h7UdCOyB;4l8C*_->aw#vI(`97qA4>AuG1z_h@^Crtg=>aa zC4M=UmN<#ezxCm<@|Ue;aem!4CO>D1{}Hr*CsVh1|8jrleV=a4{&sEuy^Zejd(i0~>e!Y|Fm-^=qD<;OVl?d2z?0AHDls{kL~}ZP0R--j=tM4d>t7 zTYl$JYDMR#s9mwBR_+*6#g_XASNHSezAe6WL3QPA6t*@_#ixGh4bvo?rn?y zFImXlA7{Tkq2`@wGEd3_=4-oVRiC-GLu$@3*Xhrkr6n2WTyt4#X#3Iq&x4})ij(if ztKTKAjo#{`mhS!O(_8)Rcc0ezy)vnfzt(TDRkuvzT|M{v#=~`Ud@p}Yd31MoY4P!0 z&$=sLUSit)dmsDTImYKVI5L0#o?ZR&|G!M`yQ_mLa=A8~`pNIV_|s{=UE5CxKRT`! z-SAg6!CqLn;X`%Hz8^;x-%ybHJ?k+~oYjlZwMTDSJXz{&x`|;^m-q?3oA*^;`x?DD z7OqyY>ydEHiMHf}41GHs1r5w(|z;k6U;T3c81!UiW3|`kT*hoqup+`G>+i z7u}_84(x05li#&3KO^{nv7D9Zw8cNlHH_34<~hmRHGEtez3a%~imvG&WOpR9SO4MP zza{SNteZ!EwwBfWpZD$F)%M$EZ)g9uRM5XLit<8)*$5qwy4z=~~n`HK5Z{6~4%LS8X@iI6( z4_fn#QMPXV<|^5F7i&9?NwGX&c!0l3f(0h&mg+pXXgEbKhjX2ohuE$+`nNAn z?x1FhO!thU**CM#&@BDRM z_dL$;tN3}p4a~3dPnYVpcnr_20U8@}dW^?9OEHF0DxTW%fk^TI$riIS!yJjD; zzU?X=yJctFjRkRfoJslDPD&kq(5#y(EfTWx@!|S;4@zSeTweBo`GRx&foYd`<4zhr zHqa|^c)Uou?$?WDy?IY6lDecMGd^4jbL>w!peX%mLd(*fh8Njm|6Dm#d}nG))AJkE zF{buAbVWBjcm96=<+rd$rYdJX{}hie^E{$b{iU&bkL{Mc9N~xd+Zp^@-)#>6U&^9a zZf_&cly1~-@g|^NGUUhexF79Xv#*~kySZ|^`@}W2U%z}V>-7Hi!%{Ed+8fJ%+oPkm z=DhrEzn#BE=zeN^TVbPF9anFy&z}o*VKEKtmR!~GwYRz$KUKamV9VqW@Sb0yQqcLH zr;oihWohtdj%3c+hU9Ms|2!}3R=(KF z^FQIzwI3hcJ$$)y3>+t4e&kW}^I-}DU#D{m-wHd6IiUhp+ri_2e#?4=9%VefVxa;W z2b^Dck#$~F)yl1juR;S-75>tD6XpM|;oi$&YVk~B-F0E}kIfr3*UbHW zuH^kqwN-!bJzB#rZ6NM&eEzna#mTFloVk5iR$tZh!o+Z;Zkgi_$3u>HZd@WRdr9Bg z+1ah;%SB%Oid!FoEabkJn(eW2yTF)rai;wGOAg|>8D|1(uADi3`|!)9Zt{Pkrl;RK zWIv<)@iEi5WQUG<}n zuYG2xFlI{Z@VVF#qk{@rym~yg#P;Dp0<+YUYWs|YgEziHzJG@(cbN&8#?XG?zB5hv2 z(%f*2f|KF;uo3E>%{r-_d?wN;e(Wjc~^ixd#Z5L>JolshAd8k|Ohw_qTpy83pz6$LJ z1Vf%={F}7&@axk8pAR_8CnP_#S>%0A_%1sq2gBWbx#OqXn)z)RKD}tvvoZN~!qDAo z1$SM>6_F=Px-B2yxwJG&nr&~^VrHw8><-dW4D%WG?P9ui%r;$pPJz=Uw$i1)f4?ex z*(ZK+;{+Qs(Gzu7JZlo9->~*7?S37k_~%C8@t~4ERkNiAX0=U1`n#6;a;82M)PL;A zZlYImY{9Yy_9xkk77E+GTFicbl3Z5BmG=IRVY}y>+>&Zs()hib^U`JM$oI0_w)4Hc z&6Ub;_o>DA$DU{RR;ehOncZaiQQZCP?BuG7O#S`y7EJ%>wBtw9=C>c>`TaiWlulo= z)93Drz(?{N9M9tlX8xbbWmstW^{e^)Z~Xck+qYhOQ~fsln{s&gpLxZ<`}T76&DfWF zds*4H1F|_E^ycQ?$QFKkWdFb0_fzVhpRB%pPB!|-)6MbKyO+F~S)&@&>%ZRpvx7aiKQ}7cFRbd*?p_Pm#D6^}e@*_hu3xspTQKM{leA59n{z+`gBj=B zg3S%=2QDuApvwKA=GUe*Zv!4(6kqa1`MAVCM~3GGF9Ie>e0s2?N7LQC&Foj!;|Z6i z?@W1car(w*e15D?Cq{gOodqD{`ugdonfqouO!QmRS$g2GL@8s>%IO#Qq|Mfx`||I? zPSb+p3$o7cnl9}u?Wu3!u>9^u&TX8zI`SXNO219I`+~b9=4G?H+=R8Y$7^5gxG+85 zgWrT9p5Z_@d(>O+nGY=HJ(|vwoVE7BjPB{{`TDXJ&pt4V>q719ynk_8Yg?}e)EH)V zm9F~N>+@mH)%lNE@>|a4H{RMWbI`lyu=vkMi`C~Atf^<8lV*HR^~(Q}@6vpBPaOIm zg(^OuS3gs>_)6kQcF$+$<6qR9PhC>r&Gv2mBkt-+Pde>>_t~DdTf!j8d-+Y*^W&=W zj~?V)&x?(|8Mr$?`EpUW&HbP3?=tjzW@nfGf8CuDQ}IZ-JS^v})b1Y>=H|W-mj7K* zTyZ~FXRqU|x&N>H4%?_z==SfeCU@?~PeIj}RP*$EWQ)$Ne-V{fLJtNoPV zc(<~D|0cGtuW!y|F593zPx|r4Gr@+w#d$O5K71B<{#5LwpLcdP-;q4PHZ49R&-S+2 zX8tstZL{P4-suf4W)r-7B^?SJ z|1}{zzGk6sNu>Yh56pRo*sG$p=jE!TUymt1tCn%gB=DKz%Kc`AoPpc8nN=-r-j%%0 z?=e5w<)9yf(IozU-m+FUeN#vGT>EX8TV<*qxQ2cH)ODA=? zC%L{q6?(XQBSW2s(zWX2N3<(u96hO*SY2`CZk_yv=km+#cE8(luKLbBsh{hrc~9S+ zU^``Jy;J_5&jqtz2o)VH`fj~av&(|(*1y7m%+&_AwzDYXFIdK0*x5$S#{%=1{ z3>L4NTjw8k{LjVBMQdAk+i=={y=Qy-*5~BsRgp>Tn_kpr{;_#|dFj%vr@U&~|D2lJ zS^aVI&TUzxXKz*>oA+eihB+U;25o;YeBb&=+v8uQ+mFXiT3&zSr(9Cr`dY*LwsUV@ zlnOg;aP7cHr?!oaVZPG4Sqiq^{kpsM%}oFFn+p%$-(qj`Z=-C{?b+9itnccTO;4J* zaKZc*pVw(~8`d#Ax{yLj8}Y+1&1G~KUXq`qwd8*nN1ev6%`rI-_uV` zC@lHdy!yFJOzjt@9`)tR`sEnxK6d%7-Fe{fjx(~dADk}#dC2>8sqbw>?hAc?E&R4d=`tY*le3sfpe>d#EvE#g*xc8dv4%|Sv%T=8t#ZL4GV~Q zIB5@;a^RWx$alY(Y0+~}p6)n3yL*~w^-ur1w%HBO z&dD6#bv-9!ZCv)-St@bbhxS+)?RdO=(>68l=zZn%?oA*gi_g>t?FaJ5!CuJ+&tq<4B zO-`=5W^qDGv(-)h?^eZ_;2rmF)kwcy9R7Vy>FaA}+4y937=DwVdhAzJ`nFA#+Ts<` z38zE#3RQnzs!GkyQ8{pI;n|4A_2n}zxBH!JYUi)}<7JjEQ?cNp7=N-Sn@oca1Mgj? z9~T^z&b^L%!98K=`_$D>KT6B`pJkT2HH$f&b*tIOL(Th^eVzSdmf4m9Jt_0h*Lktl z(c9Olt4u$hYWVM92_tVxR>%4Um zrq8YZ{de`#Gqa4ZzuQ&b!n^yQ)a98Q_uKtgk^frr+ReV@+0*^LE&spk&gZz#_U1Pu zo&BPp-?Ms|+@HC>aOdP5ewU8zW9g8UyR&4={}rvz!xbkyw{F-R@`uS`S+&HnO;&-; z*Tu!{Zke3iQu)(0F7{oQ(d!E~r|(BEvt7H(`SY=VJ~AI0Sh@`kU7n_r`Jo~F>>tV9 zEB2f^_xrZ`ykmC`YM+}LwyWjxSz)`3F5{6?mH9Bt0})$qbX#o~#kg}TKphOK&qnK2)7?0!7i@WA5p zvX3u{ulZCtFQ4{KK;8C21hd2JpBjb>cop1dOIdcM*c|@;zVuo4vx&#&Z#%vIpZxA`zh?i{ zcDj-N{ohx`>OU*fCmCgJ_%C6+_}9I_UvF#+y13u$e#L(7??2aR>M1uL$K1Oz@A)OQ z&g+WS<^QhP$?k}?uJ0_ad+6o=IBqj@b-B;G;@+D|+K2ax9c~r&FHUFuqixD}Z{_6a z!E=)9n##(Y{gB>t=CQ-JTCY|q)29x+CzbflOI&%D0CHkxOi>4xXq%>%?p`r z82BbER_Ygid;z|1^U`#qTCS3puG72j-|0JGap+>(e5naG7C&BcA27UjEaBIemvy{b zE3I~%cAl*6|IbsNV}-5y*U0Mo+pMdtH^k*A&%W?%MuCmSA!Y8~Z(96+ z&D=NNfop%t3e|SK6ip@}w1F!K_iP_uIqD=e z#cf_Z!{O4gf9dtiU91;6{X5_DYIIhx{_U&xEz5s?|Kk30Om`ioKa(tby8ic#z3+Vr zt%NrIU3t;?t-|yLbN7_5Pl&tr|EnZ)pB%*Na zYtHRE>k8hxRyVTq-nnV@jKzSx;fK?k1^-{H(7*dPQ#R(&43AF-wim3wx#L^MzITqT zmhb-Un5E5f;m{9{*`-4Kpzdo{*@>!a>d&VqKM)Z4@L)@1#_9!ZdwvyZ%n8hWF=^`M zL;8C^a27bo%=)#aZh6$~HDP6c_qSKeR8%bWHhRIrzF@N1o-Nt0_f@Rlyr!`E2)B*o zHQ&RBPgu&|*x@QXdDm9Xto6zV_x|%2YP(y0|L!^4?=jMLH9M}CzTf-(*3IGO+hT&Q z+|>P1;`6|{z2YJ#@0^Se&EdU%zeLVm3ElOC_wED6vz(O<$%QvV)6YL~{-DxSmDDRG z9{XbH`I#nJ$C&SLsoR!2`|jDj8YZ{ZGjF&h|6+XHUf)XeD>&qJon}3P^ zFP%HL?$ycHcbHZGMJ%7jYPGxFbD8h&OK$sq@3U{dwzOCsw9DY;zu)hs-nkuRSUu^` zzC~)Dm+xJxYrXz{XI9+9t?L(FX1h0c-;Mo!$wyZH{!x8o!sAWPb=TLvixb;@;PVH= z;=+ZzwK-G%{`_J4_Dk*dQhu>1k;la%zujmz*4Z35Yro7XiyHMk=Cu>+j~5f^}-4!<+9;!9*t23#~e)-AY z#^T2J`fS%{wr{Rs@h2u7HE&IBzR>3Hw|-*)bKlvOyj*?y>62LO=l|k;v)x)goxkQ= z%ha`AHa-tRDr>b*b^X~h(b4|1tNbs?$=j}Rp8u8b=ElZ<=C{Ay&bL3my87I$ZMm=G z-FDl})MY&W>yq@++b3_!=Weh6=5XoWjYiYpjAEY+x@L?i@);sGx$b}PFSm*-QReUa zzvZEzuhoUcW;H#HJQe>Zcz(4x@H*mv|0ULj{d&RtVWGUGf~DBwosWJp z)t=#u`QNsC$MoK91^K1TkT!+0;*?`|Mh`nK76e?aEQLGWA>1!X?S@oOyxr>7|-qAAUE|>tue?@ zCgi=CCH*;cepIf#|8`4{OFP(CW_SCA?OJt3HN*ec$D0-mRVvQ~ugafbzA~j?bI9g3 z!F&Jz`+d*u_nXcAvK0>+|7H5#D?X;rn)9+jx9{G=@Ne&~%iny`^zz?6y*~bJoXkCM z^lumVl-fLOmY>z9dpz#Aywo16*{ZQG4uq$~_pvs}RjkxhDYM$P@ZN;^W>p`&Q&x$F zhIcA(^0Ugut)D&p*@|21Iql1`l|0{E+OYWVrnmY!$5n-VXPbR(6pHJeE1^+WH=Xs? zw#?k{xT=FzIWcT+FV@aZX__{D{`+s$N3HZzV%-(WwsCP6`1ITS2{$`od-CSC+(-LX z_xjHHvG6_5x6q>h*MyY%1dO+w|9L#VkR$Bm%4a@5YRaE={aw}4y|F{Ei}g+U7qKZJ zQHhyne_!9S|L_eq{S60V|1I%t9p$%&s9Gtc<`amhl}F%FA`of`uAzdWqlAib|5nJhJjz(H!FI5#^d}3>2JI5+%n6VxrO`nv3;Ak%?=#9 z($FWHdoy4g6MM^_3mYtdt#CbjeS>L9gCS$DbF}}vL+mY#VSiR#TyOYi^6WgFj61vA z&K2~1nD~eDkJYMv)8d@z7mxX6mT%dbdH0xlM$97>0q5=wFU5Aeot9X3?0x<8kT1ug z-~TwhKqZq&UiwPs*RVv!Z<_j-{`yZc7F$y5>2u+qtNV`SH;W%Uc=|6Sqf54n`-8%H zx#btwR&34BJnWumlcIT~Huj~Uz3D%7xntKPE*voCKi1^`xz)X}&CKNDLqYlCV0k-+ zy$>Y1%@pJcz^UH0xy|^Yf`v}5jMMB^yS|FB`@y-7CTUK)x?$to8^1MPU)|JxHuBrn zyVmalrT72fejjjR!SdqEd}Ve26}Nfcmc8r$}|e-+Ghv^Q70wwhz-L+q`aDcGgQe;SX;vpV`+J7Z?9!n`4qW>2Ag2 z-m*J|$8UBBD(C$9@$uUQXZ}jr=vVC7hr`~^+P?KS|K6kPWJ|k!qYaIB#xZplbJV{V z|Hd-m;Jt;h8Qkj`n&aked14=V>qdYb)B2`qC5_B44!6rSNL9X?AU?0a=gYs@8P1OWlv47-%b_(dvUL7efq|uR!4OX$feo`wO3D1ZL6OCgK_R(t+*G5nv?5; zgF7DIdVfM;^^AiR3>FoQ0?P{zdb<@ieKcWD(YVBGU()qulFX$vy;=Q9y_CM95SpjeN45#!)({qAKy1EJ}Tj`-JZjHlhdZkUMgO2x9-E& zvUjh;Z!J;1yr=o=sh5fTvX!%KZC|SHF8;#2?bcrHmj~p2-(Ku~H}m1liU?Z(CO=)j|1yWicf-}uw@o#B(d-8W_1?#x8J1m%NOqEt$Zppd-I>{`U^KSZuzX*xpGhTXFs$2qcyLePud-O z`)k>Ei7B_I?9qSw*>&>YIX87aWH0_Wf2Ns`nLuvsyoJBdFDNX2UHkoR`t`W#wdrrK ztemVHx#`GX^~leMue`qeG5E2F#I5IBHt*(>OVjtOygF~*w@IRF*H%iK`=|U04-5It zwe65Ohrtbw33Cs;jIdz-UG^e=>(2!zzg=?HwcWBX*h242cwLv?4~81CUC)$liW)v- zWy{4(bCs|0eU*N5!JNIh4!ig2EO+hOnsM<^{H-gOQ!l*~J=Slvm4RPm%ZuRReyPIr z{k3%)&q=?PuY03;dt3VV(t8u)mrw3EWpn{Uv5rjFr0a49*cXR{G4w_zr(60Rj6FJX;8aeP`o77{Won-&Em;TJk)SD= z_|ii7drcs7kqsYszk&{&#$t{>|q((!X9Uty?U!@$SND^RAsZSI!xJ_0|!#C!MGETw40= z#>DcQ-IwFOety7n<7KV?Hcjtkx6dwq_o?snoqu*u|Ld%cn>BamZ_To!C#3fn8;iM~ zTU+;hne5vG@Ahvwv?uutAxsfb-r`$N;z5U<#*6`TX;j_*$)N!)8iVvFDz_0zJJ5kd8b(V*Df{ZNq8|y zCO2atUk1~yGmLWYnb%#j&DGmlZ2$04+{M843#%O#O-x;Cy6st}?^^FP_U}ucnO(D9 zK0P%(H99P8T7A~$jlb7=oMR)>}J&Pp{ayvf=Sb?tqaBBo%fI|ns995Q#jT^Ov4tsIYA5>ysh#4-^a{U=y_#FGHhTM?Ye#MzZ+OIUJo5K! zTk&HFNo|j~O-sI6EcknJt>K4@;`&xc)Mq_n-n8W~3x8dT=;a3!^|H8rO%#rM-7r0Z zOD@6Y%M1CaB$*uskQ#>pD#p+Hwk!5YTscC+{`}-qDga0h`tBK~T%z1zKjZFD8zT;23;Cos zF15?$P3v04Rh{`NyX^k*{vTJGyz}19+P1O%j^vKu@}Ir?*PNQAdbnmI_v4rQRW4qT zJ#1$JSS6cb^9iE^6C-~C$2zCD^znNBd<$k@MTX*wE zT4v_V>POquI=w@yb5_2-l;8A8`HjEr-#NbXjC3tS>*TkXX|!orCfaW@x182>`i=9$ z2iNbl`pr*auS*h1-+hs>S7`yST-{57{%<>zWK0Fr0#9hD3Gg#MV!1!L=ja_4=fbHwYLAZ2v1phhD*QmkqQ$KHvz>N9`hq7W z7RrB4dx!sE;r!|M?%w97Pql64&*_vpu3hLKsHdyDr7SX6=X&hEH&?doJ*ze2_1d*( zTli$E+z#f{Ph+m}EA!5NrdKGp<7mEWQx{uA7i zr}w(~+uPf>HJ8uXWOpg7^YX`IY%J=k zD!A~TjPOCr^tp-M;`*L-mVUGC4(ILp8MgbKfB4P5+xhC-BL1Hcn_*GeW&7d5<99w$ zjj|tKJHK7*e=MT7T>g1pZQ0ESJ~xVwh<#Hq{cCWnVX;8>7XMPAMyWW}BA%xC9_%K+ zj#RwOtWb6CjXSBZe8zH>Ub|0MJ}`fo*;#G?e3xxOF`Z80$=+e@{!#@u+>pj!7 zTdxA$ob^!PVecJ_-Z}kJ#}`^GvH#t&_yF?*n=K2gj~AT`I{NBJ{{-jDX-$0bOBWZu zm;7J*;nIyqr-a^DTzY!EaBflgG40)f$@-UfII^WgN9{ZFa`jD#IjqTk+v{r9PYs_I zeLU@L?)`OtZSzatr9Bq&`NAnx%y`3OmiSkd?;o6A>r7`zGxJrMl{fE>Lfq3YS)Z%w z+OOXcVqWr1J^QL*@?V+T7t|y6{)*nMS`xGM*@pdF*S~#kP_GyKIeKB!w%q938;{E* z+HQNPHs$UEp>4I_UgbBly_oK#+Iob;j1kYDO^ zGM`kpot26B%k-Qt3j7BaTg~CGP%U}EexcDWiz}Jo!=b}J9$L(k0G*d4?Q>y=In zh$nOVHfPAr+Oj$GXMClyaoe$Xch)rPYDYhmH7ZY?C;xcvP5B#_HaVSV)-UErePI9l z>S^1#mD$EyOOxMS+}QuOxav_k)5ObGm!)@~-ubR>*P~5I`#BeU3}}pBKU>sPx|vsL z&ips$Uo@BgkGcNO*y@bt<|}4vqi=H_mz^8qFS-Bvt4EH1f0?G=wVQ2laqpq!ckCM` zr1bP!Z9eer_SYi&&n45|Ufn-```^9wwHNl^UUy%v_PRCO$Ei2CRr*37*@gZ(GS6V% zXW1ncI+9sGR%jk6KiTD8b<}z768HPr9~QLRl^k=NevJF)1^@WZ&GXG1qKhv`e92ZX z`4GtP!ixJu1;cdziPP7VR3-iK(BxL>`%yLRh~x2%E4=SM^~-zlOlZLzmgOJ&9?E$3 zdG!`oxyMZ|@R_5(f60NOaJ`KO_ws#Eei&2qfO+@(bk=v99{qXBZQC23FY}N zuF>q9c5}a%*l<37y6nrzcZ`kAcYOcN>Ynq>q5S6O-urgRyqATRPo2s9f98VV_op%^ zTszPvTQK$SqR;P6{ttNa;`ff-Pj59VXnXo*MV>^W5tC>4!ht&RT!xyTxa_ z%9EaHYLoAz7^$E1ycV%YX2H^!Cyndc?xh7oX43lDAFEd1uMHCWbeG<^Stx)^8X8ubrm+X3yhq3-hbL{=4bcBa_Q6 zza#!!!%4+d<`+?4^md8e)%n}E@3^3TEf41oi-rn@SYF+aJ^nqcOvgkSEvl}~VAe~X zcZ{_{qjyiGTJMHq2T~*?XILIq)IWd0Tdd$@bH$t=!t8SUrnx=jcW|D`(Chwj!oSrM zOaqoQ9_#mCoFE_dG-q+Z;UMQ==S68(I&RJuX_NWAYg6yUEw}eo9r(BJ?#XS1i+PJh zZ|-vBwk|(@`OUfYa+_GWjg5o+|9+YD_GXp1baAuSn}Y6r>%DbrWpw%0>V1_eA11`gvLBqBdP-DMeM(*=}cgGezylHuI|Ic^q*01Mu=e*ePaOVz_4UbF%?xt6zED%}pYC~o9 z?pK?*bKBomywsCPnc~5|xSg@r`iORKMQ>!wLRR~defEpD7fP{rsKEn&!*3S#A(`2bo-W|K7V|kXC@JR`V>rQk_X3xkmk+_uEAo#wPQ^xcIKU;m$)!O?v{{Frz zay+V{eB;(^vkkNN?>PU@`a^S!bNaJa`T4N|o1^bq$z?yTo1JjB=H%y%ap$9X@6W$> zBJ%-vjmZz=)tlT>cV9j4G5gu`WsI+XJ=`o&X>rou#;EOm5tGTg;=js`r^~Ic$635_ zH2-((2GjqdEek*I|Gb+0?csy-*?21swa@>tB4?iGlI8!y*0QbJ|F3HIt5vJt*#H08 z|91ELeXEb9u&(^^*6e=G=M(+>7GG|!Q>g3;JDA<@ZGrb4%~#Lfns41-YJJb_!)_4>Ynv6`dz43nA|BfpvpyhptEYsYOl(ml^G!03Y73HQt=%z7^^ zp02j|c(6cJK;5P$X3{(xiTY0V6Y>*sy82IiiEmxiX!^%J?$n_Li3VHuf44*#|NT59lN-J@e})j&{}zXg7H+aJTDswn)C{B4uB|yYFD>(( zo#s2sWaGWP)wgeDuism9$i89Y$79mB8yK0_1TXh1J1$#(qwIF>clI4im#hxv^_g#X zcwWu3%GY}?e@omG_^tfjZ-bJ)nWyFCd>hf_HJZ|eA1Aihi!)c z8fH5kUD2qNej&ragr`PIthdQvf!lf$DUXA0ynHX02!ptdF8PR%(<7I zXqx5yskpu4tXtU0($lXPxG$c*xyk5phhj_JpT!EXy0(4m#5~%C-6M`k{*qkG`nc6Z z=F2T_HofGTad#$8-f`jkxj7q-iCrr2YIaL}(y;xLvQEvhT`Bh-2(dRP$8S=UuX*Sy zAtRZ1XnN0u25$Qgq3aT6{aamNx?#ytaSf^72DX)K&di!T87dD}EiCwDz2gGoqRISv z%eSwd*ytSb!{o$*^PiS3{N&RdlFTa4u_wn)aN=Ken~Z}`BlBIngzY|c?>}_HNyedF zp@-k0Klpi*TIbG03zj@d$A6J>C?YQyZuh-8v{EVGn z^-422?Crhk_h*kTZ`{HbwuMb^--9TzPncU*=JZ|Ex<_9~@-4I>vTPl%eL)fy3bzGyNTHG9LCO zpLrnp@5dv>N)x+`qUMT|z5KQevga*&?4JlaFy|K-s+qoXaJJ#I`lHd$5xoDQ3^w=!|{LQVc3LTus+@eUOqAN;@K zaW&&)zd^}`2a`;eJU%kr&qv~B!38y5i%Tyid|;SflEOaSLgCvA*A*Y?h1LDe>|MLk z??a*QY_qMu>Nvl=;t>ABw{Q3ReYg9pUTNGebkA!vkhUmTVEgSx^2W~pQ-b|A^4tAb z;Ic|BbHT^z&FpWVPLJ1{xF=}Y>Uds*)0y9 zd%J#rZBgM<)rNn+7aae;=Cs`IwtqSI^MnsiW%)5@^6OJQant?OScYA z{>$v+i<$<5mkg;B)~Uzt%flb(qK|Wxq#?Yc9irjVlfcwO=;gae=`^ zMoPe8vFL>p2a?NpVx%@mt^BlDk^4YsdSm~gQ{qylK~I-k8?~e!v z_a3$O^IUkaI;N%Ymc4rAWIHyu>3Xq~0=--1Yrh1(zP0srP~qy+hc_f1UQ_w`S=mw1 z@FPzjZ&~q7ZR2KM6KGm?d!GZDAD)aKY8xV|9N4te^fJDcI0v; zz8SiDKUarF9e$=4v1zW$F1wZO7iAfCNw=SKi+{`=pLmR=;KRlHo&i!HdrTi`ANF|6 z^ioQEfx4W6zRZ>%ADH|9JZR;AaF{thS+s)b#R75N3X4Y!kEW_cuF{t@I;!?n+k;nR zCGQo53>Fh9DJ8$&b~kr-1`qar6H_TE(FMnjPhU7^`LR%+L%wD)rWv0EbRV*EGV|AD zOy&*S^P%u!#azFSTI`z=)(G-Xkgs_WaN)?BdB;_IO+PGP=l}C$^Pv+F5;-`#sxkrp-QdBCLDI+U@sl{rdWPx^}j*{nNR=`xs95s&az%Rffk^ z9_6!ovEaws?KhIRA8+?mT-$hQRU`K~%j}hF{(XP>{M)e}=(DDO0VR z`m>5>>Z&(PY`W(pUX=cf_j+um^tQa6GOO&T`NU;kU6XeC`MGnFF$FH+iw`gD=Um*p z^u6bC#t#ZT{mkyk(+VCq+bl51JZbT$Gg;=sX{%&Gg?$g2+4&X}HkLe6Y3H#k@MPw9 zmyr1}neD*jild$TKTYbXTkr5-`G2iAULC``+*5q*_esh0s^9d;G%*Q!bj2}YAseUd zOB1a_K7Wrd+#V^Fs{Zf6g9XRi)xTW$5ZI{h;;br{>bQ?#ht2ghk;XNLOl{WGJXD{# zz%`GdzOIaOa4UXN!95tM&2{$*Zo>~x9;~_ z#p{XfvT3?r1=Ch&%=~0J@ALT^E8?XNsD6`3XUKheLb*TY)ptE!$xAkuq&%)2*yt{w zXC7A{npc@~Ax5x(Y5dPnVHCpf0DN^Y4(L|EEJAdxY&wjvSxy zU{fdi%SYOl4|sLYUr-P(WJp!gIm|EfMfl?djsGg^KCO)}Jz3#u6`dU+sc~}eo+p!? zPF{DI$Tn-!vW1r-GBkeJTu!*+*}*+MH{(!yoUB)yOyPw_<_j|_mo4NJzh_Z;Dy3a; zu9Q)Y>-mz6DGMJ8`!i2J^Xtp(3WoWrr!@LcJ4aL=mbZG6Y&k8`zQXn2k&iu>3gqW3 zdT_@0yvaVPCpELu=T#na>zB*bS-VkjUIEuksfrg1+uzJ7KBsy4RYDl(^gR7w$xCO# z&r3b%;M!LwvO(w8SAF-N_ttIe+Vsx+Zl3=BZ)xv#ZecocDQ|M`+Lyb0y|hoCVE=ok zXziKC7ca`@U3~Z=u|)5eq%F^jnFbXWFN;ddl0A#o2#eW1^d#&E}o)kLU5|{%CCO_c`LN_lO~5>iT0Yk0ws6I4amC{;_wpu3^-{r#dH( z%h%f+o7*RAz3iCO)F8iw9?ZRLUXO#F7Z|Q^d@$KBnX$dCyV3cvQlA_9g^EeM0#Y&+ zMHiaenLFCq-0v~iRX*^Rd&)0sB9Z!msY6)rJnQ|Cj>S9=99C~y$#Y?~v0AF4|HWsY z3VtnWkQV><^L+iS^82;o8nwT^Y%D%+Yi_o5`c}uFDQ8wE+q`{tN_+hlVLuDSa=qKH zBKR|BeqPM>>gw9;-*0zMm$!;CXK`}B`YDugouAN`Un~3f{(Z;3&|fq>#%|9}i@1VU zC)IC!^7|{ESNP-7@weAjitdWpB|letukZY;6^kl9P4k=1Z>J#Bqtsy7xRJ}IN44hS zgYLdv?PBT&5?aJ$9;wO~et0UGJZU3y)e{k0lP>ihVZOu+#-B$V`FU+5gFPNg&j0c? zTTk+^%HxOa@^S~|>wX;6+xO#ZWR~?L>&pX`#|MP{4=s84qQc_a8r7dd#}vhRqxQ5g>tCt}Ti&$(32S`GspeF# zOIIr=6>-e>Yi|~NP!P5BqUVgnXt4r;SJR^NGC}A3U0oS`+#+sK{{4LqyYr582y(5P zX7~Hzj#n$!G~A!qZ+5IlGWmL3b#7$m#AmawIV?17sIz+gW%A?f0L23T7q9dGnU`o3 zzF5J?%-)(ObZ*s;7u(X~D=aJ4bJZ4J40!hK^|g(=^86PrW-Kx2`?qSN`_D%G=_YY5 z%2hlX4mKYojWx{q5_A|Js8{tGcU|A%xa-gf6`Mz#-en)v_?CMzKRW4OX=LW;eNT%$ zvB&9{%f-(9@rN>WU#0PwePGF1tjS?p{cVf>{y#=}(i$_S**#C^)%oeleZt*2@bN;! z6%S7&EOZoH&c?Y;O7SB@<^`ekY|C@56jZXgpZ{S|s&I(`I$~k4$J4uQVxw)6g`LR* zNtu&OdeyfLE_n#oe+vD#C%04GNFc28W2oPyE~X05ROmv7`oCYVJ7m9%=l>+Ce4ufy ztX0W|z2EQM{`LL6eBtWb_I%4zUvAfFt`RKa)&!^L;gL+KfZ($2t!M1B|!JNA# zhfOW-*x7&I?7eQswW_cGEXCFD&g_nGOE`9B=ESajR`>0bc08K&d3s~)CeITM>()Np z?7G`Dx@byUiOJJR=cWlNcOFpW{p%C)YnFw~p41Z`d?NmMFfZ`7`oQp^;DgVl1I#rH zl?=U}j}GrxxySSH4CYCdhk|R4%-_?=%&%V8b$`#VkkyfktMbx(|J>T1pPzeMFH%W; z{on8R@5fcWTpAJe)Ux%@`bH&Q6Dh$3Zj0NLncFIsHo7ks<+b}!V&u@vE%T^gN%y>x zjE?sGkFGX)sQcD-G5C2ifIiD*y4VY;ynL$l8AG)!S`rx=tTw z{v)Tk#56!IYk}X}Tb72mx7@t&ukP6WoZ$QVY8J2WbiaKXe*Z%9#DAC1?|reIF(!3$ z?R}987y2(;2uhgfqB^G`@9E+7db!uOiETAH*3Xyg+h_VBv6uZtHp9(}9~o*qj%VJS zX!2!m+8&R`k&l^dLhBw?PHSFX(3$Mh7-whm#Do3ALdU!h9}I7ND7?^Zzon&9TwwV| zwit%`xn7&s9FFQ(7Tsc@=~C$@XKC6$RN^(+m($bc?tDth%tXdipvyyf zS)hUQ(S>VGv?NVUG;Et!S3D~CAasGDQ=8*eLDrTf>elTUN2KRHP*UzozvR+?e)F=< zh2rZDEqW*!bIR`c@@dbD78N_WU1~_NR9e(gUzMyi!>96`)$27DE55#5KL6ZDY4g08 zC-DONmF)7j-AqfIW7IpJ|3K=im?iU^qL`m9WxKKXTIKP#MQul8;w^7ft>(K{vo~$0 zQi1MeZvDN}s>2dquidug|LxlJ)vtcp#5a6w*8gvr|MrIOs_3(=`=9b?l&Jj*-z6oh z_qX>?OCdw%11IN*2}k_;X4@QCyFvaLtAx$o6H)+@T_!L_nv*{VydQO|51G%zPU>-o~Hx9h}~Ny>Y! zoWBq1gI%89{bpyBGa*MjE9LIvbs3GE3qz zIK_OGqmcCv2aZ{>db+?Y52?tZsjo7(;L z?S$rU=U*O{ag=1caP&n)pGMZj64}?=JVmp^bU&BwE75z*q<{N?P`k7G;@_rX0W6ma zk9emq{+}w5xyE5TxBR`6h3qmik9$8HY?a?LML+SAkbXg>-zTQ&76qQyD_HqHE^%JZ zDe$m~-z3K5?zdI8$7Y$VNPqpqG+H7mk31GP zt}bV7znMl$cWn0fsk@kM-L6-we!O~lCD{M$*5#=SmV0LMPU)7i_<3ait*hZNInICQ zn6AsNdcK$cZQJbk8w`_=KYfzE+R=`oO6u=vZ)rD=*I#GfDwJNAdHedDl&R0pcWq>v z+q3S7hjNcy8{gj^z2nz+G}j$xymbE%Yr6rTOt1JNasG^>%{5OC9Ci#B2tO~S{)I=E zLDpeUgTc=Ly-TX;y{6A@Z@a2~V_&xKwo>V9+tbd@l6-EJbmWlyo~94$>;Gz-yA?hO z?w7M|df&`%7a?F;d+xAQ#9?WNWvwZ#iwi$&^?0t%Z;)ShG2qG!2gNnVT$&Yn4toSQ zFWS5!(DCqQslC%UE!}(86n;>Vjql}|@8-2U_}?k<_?H(B%PZ(qWwi3!o`1y5muchd z9AWdod5=sg)1;pYtM8?W{fsRPUst_sYt~gQE1v2ZS0tN1@c(g+++?2n{cbtG-sQiK zQtdV`^V1Rv@H?mQ*tlSdhaH3a;(+tB^Y>NSbxb}r;o1TDRgInh41=1tUOV;DL9Z}9 z^2>C_{kN9g{Cw>~>GAnBZL+U6XR^y~xG#Td{f>@{w>PJq?OLqp)+dv>*sXU{=VZ0p z7hZDbe%Z6itk6+(_GUNJ*_&@1m-xIfbECB8!uQ5jvQ;I!k1;)Q`k(zLV9%Gmmlifo zvvRyXx$jbUbMLs>+z2?`f?lsHK zZvK^hX4|psbDPvcpC4yu%>McQMC`ek-u91CH$E+UqIE{zuBM`9Nn4NRlX6~>%mr`1 zojJHI;o>B7zAne+NA61v8Cx_gy$-BfUuIyIRA>4P!B&u|tN}N%>RNX<7 z7nRjQ@&zKtEUxIxu;Tm45nu7+$>Kkn+zXype5n8MY~HQYv*!dVvCaB4vHp^$`q8}; zx8E&`)@jt*x97KWJKxr6|59JSsmKh84AOyZR`J99Uc=ie%Z=Sx0LWNcr# zd_ukHgvcv?C#Gggbk8iECGN)28_PTI921|Lw?w)`_CFuzpjr1gQZ6`XJ9nN_dE8x6 z!O3qpsV#O=|EK9IY*q(8N>0g4|d09+4AK_P^%_8|CI2)PeyDfEs~e@Y})K~_>%s9 zaf=7*xdP(dt?1-IvTbwLVI9zFC}wIghZEZLh<=9#JWN z2b;bMvpf2!2U2<^rGzYguv+(D>NtKuC|=UdMCGaG{F+YoD+?B?^4Nbk!5{Yn)Tg+- z^58+v1VL>9ahenCq9YiS0U9^?2>}+_!DK ze@>r!+bk;2xQ1&^$)EUXO24n!YgK%jDZb|NwdkFV6Fj9%v$o{Fk0=Vg8pLw+gTmZC z$3xy_YooR%CEIMydhziPkI5!oi*q8)8mgz99`o!wrNi8->oP^2*|aj|guGzK$G+l= zH=K%JENquzzvcZJk@XMIlVt9@%CZBiOx@@a{nvz5J$g9I~UJg?X3 z*+xZIZXCECwCKk{_9zCMLq#^t?0r0{FFGePPG6$PGf&~9xYVW84}!-;O+GA8w_o~j zwap~ve;^r!m$UUR?w&$;>^_gU+u#EaF<+V;t` z;8y1Ho6P(+0>?fso1M4o6Hidr7B;VqY!`f|$Jf^0dH-{J-f{8jbB~Ype&6fg+pHho zGOy;>&9Xm*?)t?)w^+1Z;5Pad?N@l|^poj4;_3fXk9L1_bC5V5(VYC}V5W}uqpq~j z+I0#`JCi})G9GfJJXSxmnLYb)l%$g7$Ld1QNI3p4eVW`!_udH`3Ql;N9ASJ_vqV)|yW+!x9J9M?C0YLYZ(?2hocBoT z3qARto_rMo(|D}fn5#Z1#eKRGyV>tUXjIU&6AN1Y7mBd=F3rBa?&It^6^}Yq=RAIO zeBNw@vU`>3-o2ZzzTd8|uR8C2mB8Un@9P&Gr^lAecIk>YvR_PvYFF(0I&WEvk)2QfT__=EGV7+vN==7M;>ueq`Ta^LrJC zEndvcRjUzkl%4YRYRB~0vYCeGlU+3C=T>szKfnF69TzWJv0(T9svGlO zeJ|qqm7=wvXfjj6n+GWiP82+fOsuJ^kTmsrEEy*e;>8@^SMgDy{!j?lzG=n_gVkB* zGpA0RxZb5wO}}7@IGb$tFB|pB&pdVvdsCi7R>&Niy{B6EPv5n3d~x;je=OXv;j1fe z$x_x6$4y;jJWhNuKXBWvGV0r(N8S2Km-DCV#d@v2b*bi()CvRBg4eR&qyERU-MG8` z{*2q*(P6FCk6u2nmE5#Uw&=lp=a#y^>=F0t{^s7V{nqn(_9?G*mrjY_J-y>tM8LfV z@9Y0p&+%Moo9QlFI;Hm8&2;Tt5yxi7qie2S4Ud=I>@z!OljoTb!{(zo;?LtZ{h#r1 zw&CXHIhaZpokMrC& zy&m(qeY5@*hs8>E@_)9@{eIFku+V6chDoMT*$LTZ&7W%wOpJ;+PQN&q?#7ki<`+C= zQFq1h>zjMj=AD$U`XTz{e2?UxDQt5KIJ1xO%bjxm^I>8rZ)?dz#nnBwk5t4HKUpn# zHE-Krw&>&kH21B$n0D>NlAeDTGiE+|8t0j(=^=F^RpnCA=B1mB{+^k)zJ@KY`bNd$ z-rn8T!5-NP@mUEUCM24bE}Z@^nt{QA!PCVtV7_*VesS4=JRV0^I6}XReF7+r*T^3+O4y1z4=*pZ;pImYxES^ z$&$g6%V%;Zp7|krex7#n+vWQ9X;-taZcO}Jx8>GXsT*@5jdNf4-;bF3%5*K)@tU*q zQhTr6zYv@K`t!-#+x$-V<{sM6xUKASnAw{1<)^3a-641*+*dXsdX1Jy)`CF8{bzrj z<*VL0+joYoW<{{0kypoo07iahUIkmu2d5GhJ~peTFHkWl`EW>(T`!U0pN)3>M`hla zivnuB&PA4P%?5>&It})?tbc6gaa=Pl@zjz6i6ws0J0j>WYj|nRIR6f@(h?5Tf@TT#=geSfFty|bm zEn>6S|3-Cx`sY8J&ufUkoltGJ^N);#NyUoTikG2f&ob3->&-3iuwYB-RTT{M`)`!aQU#C0A z{=C?^nVDO!XD3fx6|%Y0)_#j?aeCzG!}=Q{liP3hG~Tzo8?!oS`9j@U=GLEP8vbyf z|6=8vKkV*tm)XB=smk5HIWn2;W>xk4l;-(=mz%!XF+Fru-05e2GMS&#eZGCk%`IBr zpYv|X#hpf~%+KeR&)ev$c51FqL}S51HmRv+Lmn6JjoTW%zgi*v%>+hK$*C_Bc?5Yh zdgL!H2%eFC=z+j_9`V!7vkIHpnPn_KK3%@tQTZ|ZO;6^OB?sA+{q6NWdNKR|_%PAt z$Yk*f1{)7$zc!JFPd2BpO|MWt^;C1;kLs}d7r)(0*dA(UV8EhvuznBY#)(1k?wdYN zl-yUt;JziX{`SIYeG@m_Psb&Af1WdZXTm7h-X*6UP?ssH*&|*gL#ueWa32($C9QKAHIK$z*@AD3N2H-?~0Boz}XP+;5vUSv+*}%Gq)mpT&Mj zoSN}^ibQl`PwM>QGk+=@`@g=Cy#8aC#O=nbmgaXB_HUH>eIT~y^vy3fr02_?*WS5S zaL2!%UV+J`c6H7Ccb&}78LWxO?Dmw}&bh^1uI5!ve=u`e<|^r^vsR4ORYF&Jw4*R=ij8VE^E!s z>47SX*%bxk90Yo#Y@7Wf^4$Y3HqJC$ks=XTaOgp!GpoS#GyXe1aI^3Gzv6)dvz>x` z&7mDLw9cM-aYS;D4)4nct71;pFkSpFxLnequ7h`#$>#m0_Qz(K=8DCa8>~L$d42i( zx?QDjZ<#Kun4Bs)f77R%Cr+eY5cab;sJHjarJ|o{;bEoW2KSfGtJ1nB^mCVb>lQYz z)ogX|Zr{I^w)dmnkn|SRXlEka-W6LHrd1_8(+>%wqW=+S#+!A{IZU~Q^$VR|NnjeTjhU2 zv-;VK|Jt>vvhnRN?75hCA>)r~--kzUcD!xQNqh4x{gTA=hrRuZEgRb|wtd{@HjTf+ z&}_H9w8NIf48`Wb#8j+Vvp|@HJ_4qUXec}5dFZU=etCzk-@^_cAQqz zmv6FHKJ2voV8W3$#`IqY-sc{XF2CJ$e_3GbF?ZoPsk64sR!Wro&lTIf*eM}sZ&c~- zWuJd;^PAchSiz!HTvPDD*+8Gc&VjM8L5ojnsj&a9d8reQ_CIJ1lA5MwG=Xn|xPsN8 zKN%8nS_X+fJj@DQmRHodyhsuJ%zV&TQ+dZNb$0$shUYrk_j!Ij_HDWvd*oxc-sH*3 zmv5}u{r}Tx{ci`^vw)(z$*I&n~3y01Z zZU~&vU-r63iEkhG-Dwl{I#s^fe)E=@^?SqBQTD|uyvGwR_MI$tU3fv#MDTFO(sLIl zAG`5%{-4vo{G?~BZZS9%!(nzIS%Oi7g}JS(i>qM1fs?yx^$e%RVV+p>JhUdQB%3gn>J^fXye)zf_kGeL@c^H1}fTUr>f9rQUk~^H5FHWDI zsww^aLd}eD=Hj;*ERGy!Yml4+A(fai{t%K25Hv(pL5> z`M@Y|`Q`D(CEEA4USE+}edYeavfNF3{cd^g`@P@I`flH-`~CK` zSI^W_+gD4cotd<$yw90C*du$vj;sdp<-6ZkpD??h6a0G9@pVU!ZoBizk)L1E>Wo>$ z{>pbt_uO9jpQCx}l5^3~Gf!3h{d(Q`y7OTM9?;3>59il@yIIH*WD~NWzDM4+>PtuA zWYu$$`!?$6X=~DC zj*BrYNcIe5iY7(l zp=)MM-yWvAf5x_;*y#Ok!M9hGdEegRD7`v%b=jF|8<(6+tJcX)J2j=mw8|NI5~Z*$Gc7643~Vrk=*~|)a0}8eG*smEY*#+yMH_~B(~t|W$X8Ql55(| z{j>p1hQydQCJ4sU6X%q-}me549>-W5=#ullDF(&=HNb*ajNgLw_DM6 zg*k2a!zOFon&0L-q5JfwmiV5(=S(bq8y9g#+*OvJruo;MbMeXEZjWon**~~md+^@w zi$t7Gc#?ENhG6?f*PRdIIOC=WN;hq2{&w7TkA1WIoO!qB6|Mfd=a)jj;T5Te)H1ib zI4S<0mbsu&Kbc*=jpxDcYqw6G|94Amxr4#H6(Mu9Jn9rHr$t;j(ka|*K3h6mbWvg8 z&zql;8XjtxdkMSG*zu!qv%SF2lRI8sSlC?p_SV&RyWiXWzC8b5O1o^?ja6O2i%RA= zmS6TYKbm<_V$vk}!@hGDy0Cf}A8S)&;;ZF2=@VVK5=PprlnR;AUg9YIUBuyw6y z^sLLhwBzIBwXwH*nIAvmKBvcI-0SvVY))j%mJ{sZ@+KGWGJO4c^LhNOb=L2ni1%FA z-Rh;?y>a~&uIXnxjEwyDe7lvs@&2wN(fhBSJhpd`+Qx8T?}k|i4&3OpP>a4Z?SjN< z9^uTudlSXcI)*3Bb&Oxm&5yJ_ zvw6bY8k?RR8{=;Wv)5fth;Wr|>OX!^xFP<`Dnsq(77vVXi!Cj>x6%IGM~$A$bJ zgaiLAba*ONnse}tpYAIe%MV_D~*6}az^m;bYadt>gTAA4Bx=HsoR)4GqnCDJZ^QC9!>+4RiJlsVVV z@CYt9T|VKOLlm=WJac$_d9p-C`OjR2jc0;BMTWhQxv{zY_}%ZfWw)1nU;p-c?^;m6O(x0LGl%YxFqot z?C!}L$=b*I)ofaXfAoo;p7-IS%|;y$_DA+^6P8z|JP|%NZ~4YOoCmBgai;U?e&l67 z_i{q8af;#N=Q|RgRPNzfYoL)hgY^`v-8DDxuE=fg9lu8J5I2N&8xWC)c>%6wp{6`|O%yY}{oHP4+n@h^% zL`{iF`nj0hFE(Z8*zYfTbz?EV{wxFE$UI&HPvg{6Q+8xFL^IE;{dV)ll73l%V?Ni+ z4s;~hu*7?}E!ll=!pyf{4BGgrj~Wz|%wa8z5PLXdZIMlnhI*G>(?jQ3#iwq{JzZqpy@7W|oET3q!+ z>HLJtmnSfv@I6ye6>Fa&&fLqdJ;QPGN4D6&4MD~ku7;Ygf2`m4ODpYsib_yI+QlD9 zi`F!5PM&3+A9pQjP07ES(33t7B-7^zZZWx3Asw0Nr|oyZE&85T#C-jHhuFlP%_(P2 zUesJ-x?zvhk0+b;-(0I%$YHL4 z{9}&9=Yqy*^V&AHG&XxGKjyQNv`+kz`jJ_D)|!jQ-o!|R$xYuqX=07cd>g}GHWMUw zKX~XS_VG==x?DqjPwqdNW06O86}JAFC&plJz9%AS>PL^7KgZ^|X__W?6pDR}{MR-A z$#&(QKmN}Y%KDx>%CWRF3aRd_;rer!LHGIr{&ObTAs*XQS~|Pif{rcK?ms3Oy6xSr z*Ux&4=UJX_k32BXw)#_q(g7Wf=?|v4TRfTI{OCi}J9}BvtdL3_C#_=5>kf~P_0}xg zyYcMY=gqEMs&ULb>*j8|cPeGh#Kg_Zd~^OjJ6lxwdFhMC{|mZ=QS+&D&z!!SD=qU{1pVV`a-1V7+ZR5oIJ52O8M(@8tMe8NKb7^L zOIIfHRK>}}Nmovq|G2fV?fplISyL>h`Px`MCYC8AIg+?#k(`PwzQA&Qj=M zi%oY)0*_k$2Qe1sPRnx{I7wL`O%~g-|tngcRBLN``pRN>eD&i zUrIRTuIO*`(dEwRD5;-IE2pfNO1Mz*{jXf%9)?NVb2m?nExBjEW}f`@$xb(~9GL%q z%XZn)ABsEAwDZX3y|2A+c>1zwIl({9olp3EcKu23L{Hv?hz09!-a056$QzYAYi4Uv zvPw_i8pflWRNA~HOyrKQ{4J5KW3N}B+-manf75QhmQ*+IW1NK-5-vR9vHM|RcT6-s z^O(@e<;K^N{v2WZqZ4~(P2>B%^~Q$V9*7^C{&|A`zm~lWmdd?1H_hn%Z1Kc}?V*6b zl?`XG*cn}!w(J&L`B%!5=lv{b(wQJwyTK+vR)=3-|8nH%y>B+1cG`A3f4{Bz;zM4i zRi^|tnDZ=awB&b>pEYfw#_Bn5FS^UivfToWG^X$9xcKQFL(6t0J9C*gyI!x;ZkjdS z;*Is0>-uG%HysKyUij;)d;;i1)Y{K)`Bz7tJndgtet+v`+iwqScg}WF^O=!h{)2I+ zt=&J9aE_jaR7R_f7GEE2`lNN*$%qIiD%qBHkOR+hZh| z{zjTR`PAd${lb6l6uuF+`*do9%Q1_0haW$jdt3R{`6t}@OBn5cRP9mtw&EvqN$eBy&iu$W?e|b zrL~PeOy)BlTKIR_>>HrPm)3drSC+JT6i2U=no!rgBJnU=*yu{Z2LQypzv$?A}XPSPW znR)2K;q+;l^UnJn4O;oU*^pJYZF<_(1xGy&A8o5y&igNtS>sab2gA#4jL*++-1DOI zO=7eDpBsxclDU7TXt>RJoVcU)W83^zpYvrg%hSd9Zz}Phdfm6lUDo348O6P>+nkO` zo_Ko8;c*q`I=x3atC}2SGJi6~{5(=9{d`Wb-@cA7?N1j>JpXBBy=>XOJ zyUZXYMm*`n5jp+-bhVyeM?DS+oMvPb;5%@luzh2T{~8|CKL#7-@UzKIFuu3REz(7) z`T60T@C(&-kHj}QJk7`eZg22A_48p6-LHlzmH7dzoTQH(KP>fVnw3mxM{XH8l9evzq#3N-kcUt%YXXfMb^)*)x7oWEke|}G3x>Uv$ zr=x9l_TN9-rrm$eRe8!YHn;oi?dS6@I~-lz`2TjT`0mfwq^G^{JtemBzCLqt>s*t} zqMh|0&xjslmdd{A){=O6MecEf3kE$&+fF^6G4;?2&!)7q27Lwzlcb*>jyTzOU&r#8 zJLA7I9qVkPjpi8auy9iDQ8g;C`INMC>Qc9!AS(F!_H4RzWwV@|HK&Xz43yZjg3w3t_J=hF`eHx`sZ(MEjH4BdAOawf3nbny*eub z*lum&n&qGQ=I*XXUD}W4ovWF&+9D;5?YepmR0EiT%z%(C0>D7hEk#O>V>stf#YriXE6m0POg-v7 zJAVFCbbBLQEN58#)HwV_fUf5$hpwB3p9&XqDD%1BlklB&b5j3B(M<>Me{KmoCTd)I zqV=Migyo@1iJy^HPbCk0V0j?^z=ZJ$XP#O9BMxQbfBDM4Z%4hgX4f+3Q(XSaP>K8J z_8@ak5vTcXYn^!SR6Oqe_H1_kt2^7XJ4bDMCR*ygxZ#?!P7HyL!yuIm zJM-$AS%xAHW+(&R5# zWVBc5lk5%o@853c-+tV0Kkxmbb(3mL?51gz-%OppVEII$X97`H@qrD7&56sOURw0Q z%Q|juTzrd)I+Bg>o-C)qH3DI4=D4aS7A60^ff>yMI(oK9l@@#%~F~GYv<3!osv(&Y018 zsNy8kWAip1fgf^g;tlrRZ4&91d?oUYmMQTcKiuHFzDc>-;aEV8ESLRr=|8n|xR?!$ zPP|d|*7GjfVE9L~JGvrlf0}9Z>aedLUSD66^)>rr%-LIOqVMy)yKyOB?!zW4{ZHLS zhr|o2ew>?ccX8RbzesTO$P6WiH@I~n-4m--OA4YcYWKfqT=Y|`7;>Uc;&Ku z|1N8ms@%3n;qUcrxwm&5o3Pn-_4Vk}TBdiEc;tJQx~_9)n8uU*e)|3ki~MK!US1P0 zBW_)B%*|Mz>{j8gU9oAa`F-%p9ozk2YT#n$Kr zi>JR>{*%GTsp935J<9tl?_aZA;VfaxZLlH6qrf0A$ur1}>rl1TF#$IL0sY-_$2gMR zxEK7nkjQX!^ZBlcZH1~b=PkmF4E`+2s}W+@mv|)gb6l}+eOu#aW`lnb$DS}ZA3Z0p z%=I&JR;yUt2UR)SenE*sKk*$udAll+2z+b?_YZu4 ztGB1gX2;dWW(O~yx-~jm88i|o!++?XP#^z+2<7uNk_sW>OW*cOSe~-`@gezk+3db; znY*_wom)FAo^@Sg)YfTlVvnq_`2WUuFMnF!y9GCN8ozy8-?BTtx^m`s=MrB1e3>{o ztE0U9lI~MCv9Y%WNd*6s@t(V`+VB3|^UShlCtqDzk>y^Jl__n$GQC;-%ogkW?{n|- z&K3FPXJ6ROw)M@I|F^H0#jj^HoBP)!Y17@?i`Vl?*((Puk zvb)Ybc3Q8+R~A=x>Wt9W*7{co=KF8oPCGMm{_Id?UXI$AJ01jw>lCir=yvSDzUT9* z-<+@iSNwtViPFDN4d%5SM$byWM5fP8^|$}KCAQ!oYvIjpY(Msh?UHCYERcBl4`}4H z-|kn&o7;;vKUVXd6~be=MuuC`K*Aa{bszWY^D^82ucggO|9yF}pk&vJ6@@oS_DptV zJL1^A*qLdH?`#)NXYX`&cKPG)ed3mMhP6%qk+|gPK9Qn*7c}i(RIgvzZmJs*RFZM?eO{8Yh6Q` zn0rvzTKG_y!-D9**9O7@7oZU*Rruc=kiynTl;sHZCk!i zB4PX9%cq~|Mr6d^GT-j*f9I%Q{iEXFg+Gp5*nD+W$fkDx7de;H&u`DZ_aakS?SGnV zw8@s}{#yrm_e+}9{d#kzXYO^sxl*BTI`eklc+PzMR-}J@=H_R*8*I%o)B5LaSv~j5 z33aoIM`vCbo>Lhl!EPb@jG|B8clfa21>Cmc@i%;2e6-CgJ=8+;A4 zSJBV*>y?OqM+}%}I6h8pyu@au%$0IxN@3a8=jON12eZ4hPcdxTQxaemB~&rZ(M`hc z$HFCdEG8!&G2YX{&;RgZ=kv=mH}l2xtW&Yi@MQO3_hA3h(;vL&MJQW=;k^`zReL$* z|6TBpx40ZP`_I0wbN+4qyY;5xwtsh?S*9L}uG=HI%Vsrmf%xN_;xTFMvU~Q+et2HH z_{7Vn&sHDS-~TE##6Gr^uQmAHrONiDLT(H=gJmcm|y(ye$Dsht$9~3N&Pym zrONrDQ_;CiLO<`$+kj9k@qv17aI$; ze{9uHR=;^HDK>e zb60lcPksFMe)9ay2kXw?`|4eud4i2X}Y&bNvwdGVS*Jjpg^Ei?&J>s$LD>*)I9G z)w=Ea%^UqUPrQ=;B2l@h_|PAPd$Ds=pH)3?-j{4+yZJni&7JV(W52~G{tJ5b@YhxC z+kMie8=vo!l(D%OdaL}~r|-^doje z6jGae(|DTcei89}>pP8PLB`aU8Sy49i z+yc>1<5Yq3oWEiOOKzr4Kg#1J*%9z8vWWFq?f1Llo3rI>zf8>Cb~8;?OZXIHbMm=6 zmStDd&2K)?JnZyR!2kI{i^Y%CdsR4HY#7$n9D3liyy8duq>txjznNgP?9pjO5$3t~ z=N5QOSC-qyu;-v6Z|aHSC84}=EFqwoF5$%%es$?t1sx6SkJ~QC8B?2YkSW&UjKIjzqt24^it`Od=h>5 zcZiVMnK_d$#;*IU^1o+i-9E`3@2-8`n;x8gc5A2cvnSgsI-d93{G7Asvia)p!^eE* zKi;wR?e&kB4>3f)soJpWbIgNHACEq3D7?(u+Zng|`RUnTYyL-0|MvRkySKB{j-GsV z+;mN&bVq26eA&${Hz$>Zf0X}cbIPqcZi?op$GJ&Vzj>!hAL;HTe zlU5Hlu06;4c`2hr!1}NCx2yc$$TW3MF)_(pHB2KJ=iO zaoPDz4|N}R8O)VqRf>|dJtZD<=mb;zC&7e4dRI(+2d0i*exDRX5UiPCSSPd zu=~3S=P$dhvpi=f`F-o#(Aw)i5)^id^Zpa%)cDPBTfd2o_tkTrEnOk|w&$qOyc2b&+ivf{o*39psLf!`CgXG9peq(6OiXtHy9rPbze83FIwRx!@(Tqwe!-)+w&-(w}Ukee~eQ&Ck*cD$>$=eE+>^oc*}^jK_DLwSHf< z6tC^b++1Up-+4|ceg5Ii()a(JEPuQ%u{S$>^IP*M^^%p`V*kpUkKfL)cWN#=u;2CZ zu4}6TV_#n^4{sZ(gh*KlVedv8Vg<@%p(9?|)Kd3&mN7MC&=32tz?E#4R~ zZ&CP?13zQt*?zxceC_l4{r{@0AGQd8dUxR3r4`nE6;I{Pa|%zBxp89px|l_EmwnAe ztLHAzd3J7g_W6IGqShTY@JK#zVcwNx9o)(%v>Pp6m`JO#PC4TGvVlKVLuE=p*g*mI zhWs#J8 z%e2H3^16tUM2UsFSN*!TzVy|#4HN$vGNjM!Ep1-C^8B)| zpH+PurU!kQ#J=Wc)Yc?n`|5|s{C@VXS>wL{;>U*!MvNy~ce?FsS^jg`|NXYVJkw@g zeySv=kZ&i)P=5dHwaxi=GG<#I@4s^5)5T9~wc{o{)~@MUeM0?o^}6@@al0SKnPtED zer)gMJIP&jZs#YJCr+5SX~Ptgbv{`Sw%yJ%?wL|!?Yn=AR`YKQ%Nx_&v=gNMe!U*w zzk2cxrPUUVe4h@_xMx;(?MQ;O>@{odpW-E_rdLd1*iv*v@YsYyHyDp_$Mop-*j+ln z9N#9j<5ai7fsJAh9GT_#>~<_ETvpNdgkizOjnaY-|EsWPK6ouTKRre5f6AU7(+UCm z%V{^)8@J!`*?+BLuka+t$|>pp|FO$&UA^x2^y%4oBFd)cPJLcK^VGgIQa93G>%Q%a z-u0Ys|5Ksw$LCj_t9v!qcG{uNg{|W6Mg%8V%h$e-y*bpKfSxLG5MX{ zOSZMQZoV;eKJ?&`woeJ0K=rOW{gP_e4I8qr8LUeRJ@|OWRIa%%>>4hbmfgyE{m6Ub zo8L;Y+CsbYH|6OR_VF}lZ~Xhhz3k1sb&+S{Jm()wUTbBVdz`mc?M(26!p~yA_L=;5 zNbrw;RPxZt@579lw-?{t^=*Fro0rB#GeeoDDNmHkyx7a|DNygrlW*s~UuH}`Tyfar zNJsKFKJm7BkCpS^q%QWE7ysh-snFa@{qnb-cnbGkHF`9SeP!deJyKdvzZ~n8zTLCgtPnL?!OPzB; zGj+=fV@=;v49%Wy{B~>+liD9Gn0)C{j>ML(#X1tn3s=OjeQRlCm(6c|wD`}5!Y8eU z?7DjvFpAqf>g+$%D*p0Um#|Gnib<3EoSI8chN+c&GtG^9j9)xlY{l7B^e6xS-)|d( zcgxN{H*4zS`v&t}md8(-zVG6OMCR%r|EzPTpR<0p>chO>a@(}mz08|m^R2b^?f2Gs zAGc3lQ?+$fUBSDv`!-FioVc{+%p!hyJDZXtXFe`o6E*o-;)fSk{;z0XGw(pbM889y zIQKp;Jd?NasB+q@nEOYB7GA#dEWNuh?q*x>t0!NwZ>RCTZK~b2q1x`v>#wifCcNI8 z{)f5Iw)XsuhjXJ?`M(5-h9~~F*|E5Hj?v}QTijl_r&NBZ|GQ>({o0#}f7uc}Hhx{Q z^mF_3?)$u7e>)nlmXT=PX!6qJMa0XHQ>|rpE=;%h-9J<6_uVa74_UdKbxygiv$3{S zlQ{OWDbl#cvwU9Yx`{J)KDom5GXA2)tM?0>bPHE4c)GOs&%5g$E&*-r@^uz`n;*0M zy2|?PUUmLqQ5L=}Pp2^+ToJhV(1h3b_U`^D`eqeFY0sq>=7k9oj(fhIh*+q*vRH^s zJU;PLF#F*T6E{xr5$COZFr!jQ*6Jy*{8MJWOrvKUi)9#oUTD-;=9GDBs?1aWF7x`|)wfr&p+U?2a`zV(r%7a9kE`r_z0$O2t-8Bh zy6)Qlj}<3&($n=?Pqp!x@>3s?y}O|Pw&ouvwyF98jsca4kgVEV_Ne_}E05?Xr9K`p!1hZ^?|>Kh5^%QHSb3o8y_- z_a0j#b!ox(f=hExZ1{Jpv}g16h&I=}X%;u%v;DuY|DR9Ii-PBZY|HJ>OPZ@ptJe-) zpZYo7uXC+ZeEr|8h2@_&9q6@QzD{95;}Rv2%oi6HZuI=uz|7a7dYlkfH1X?|XNE_F^uCjYSkA970s^`rI)29f_Xi8>0P?nrj(Q>eL z`VU3*#03wfzP>%9cKqnQ&a|_$j!JrHJp9D=w_Y-#{NC>cu{Hnfi+-Ow6)hQ5d09%M zSLy-xh4TCF6W`bWDD+$Z@!)}%%jYC4cJDW{dzB=%Hf!(JyBk&|<@)b7p7twg=7q1F z)2yA@e;M!6?J+3n5m)%q|5_#7MzDX~L5Cza-IB$Ra~`+}njBU;%M;u3&guP$f{AiR zB(g)cFXNRmznb8p<&~npYD3|+!cN1 z8tYow$<8{*ae?vWg&#VKOb<>iR5-xy&ZRWPSFGJYHrVE4;R7vxE8Dpe^36Wd?@tQL zT$*I#%<*4b_WtKa`A-Zd4GL|%*)FLjYlzjm)g5~9@PvFNt7lbfy8gV$+WS9=*6sUU z`}@Dp*S0yi=6T8GcdMMU<~;q~ALjRT@%@LVjBEa${a*IJx&LPG_IsjiQomMglMlL^ zIeYDz)9-e>T)d$9dfo247rj#1rrUh)SQN81UKRCj)OmaKqV2l)E#ci;E^K(A@#@6S zHnzjI2mNz!rwWxiYvy!i{5^~}qnq2Z+4GO-zfkJ;`1yA~*VZFD*EIdx_u{Ag;jjL8norEsT+RLA z>od@zl+A55PdDjg+}N=2ZuR@UmLfcJ_y2jSf9z|d^_vY}R^C>a=(k*xBc^uZO)e(mNr1qSMs;@0{P>cp3QfCd`X>*6R$b3p zQf9v3gtWWlhlA`_-lZlS)Dc;mWlidbGO^Y8oKWJv~b=jWgcm}L+f_z z+Fknhbl|r8`>I_9L&BeKv{`uuvY7V5OhmU_y^8chZ z*Jz==_Rf2}v8Oj%U#jfmSG3u4;Ixm;FG;IIj3=ILGJmmQckkPeB{eDzHQw`W7xO)2 zTz_bRlVnD#=mwkPADuzVL!a*C(AKq%FJzjte)FaS*XPC8d~7v*eWIQ1%n{DIKOfz9 zKWvkp7Nr-aKKJ=-^Q$)(FMqgTn-F!v{)w~SQ*{wd^Qn$Thl2|ziKa4rSYyRh zf7Yv2Tkz?`^>)msZ>G=9x-C*PEqLFlIj5#xbBMflVCM36*3Or2$z z_0A{0^+P(tVV1?Lk2#y$Z*IOI^TO$cjhr$U$77zkPb;`>d2L?G78rinqT*%4SFl-g zarx%LC!+eh&$08QOt?2?UGw}e>h2v3g&TKYX5riSoL$b!P+@lbnHv+IDID(JHPLX! z#r`u>enw1~nwfYlp;_E}Z+6&P!7snsCwBKp6^qROqM6S1bOGDGzu#`(Q9t|C;ZCTS zAan0;%kb34^|!a>KF&UWT7SO{ACnK)!8KBCx3$;rnbgzwA?f|a5)lWsP7%h!B^Mb} zr(Aq!FmI{Hixn1GdoeOH=9ON_q~#l3q>Og_>$=KPk@{8AZJs(CUj@UC7lvs^9vt3W6IYV`qg&r- zq5Q&!*BDaR`@cl&`;yKuGgo@tN!WXxQ_uwD-kEg_KPU_!07(Bz| zQ~10^!o24XFxjiGuwh7@X!b0}W!iEpY;_5z!04By>1|9y5fJpL>1m#K#}9K{pQ zw7fg(e`KXPM#o&}I`bznX40`uTDJpt zn8ZGAy*-|L&3mxE66 z`l+5E=Y2%=zI@$}M888DlaK$)yJ<0{t6Zi>%Sz;YNrIKF-6^+yRx+6-QC3QBv5pPw zB|DZ}VvvusC`~%5`96Tnk9`8a%>#Fdn2(EN8J0M5#|fQamb$bc*ly88{=O#%*W zG+Z$Mr_W>XCG&;MdAl!)RaawWesp=9V?M0;rK`Qu{eHq8LH_tZ7wRKrIo?;a-K*rh z(0QrbbFS+?iPj9$CynxK(MKLvOVt?29qim=yGGJ>TKGx*eOJCzTw+XV3qSM3((lCU z%tvQ?Wct#1uC?8Mmp^sYRURuoQT6}RbfeqkdAfgW-{Pjvq}i+YTM9H2^=AJ6Kj~+d zOxmrW9R|Ek7D4<(bxR|8K|M&*!XnYLsSJ@yhn|b#fn_c$BF< z-OBQS-Nq$~u5&&-Z2ri?Uod4^$)kgcy!&hxp9t`<;g+BOr(=2{s~&@G;iTA8oi-|~ zFC9>hZJTw<(W>SHH zoZWA?Efl{{TD;6}%h7$)#hufyy}6mVB2+bD|*Z%XTP|=bF$m*r1>}Jzjn8F4BuV$ zR<%euMloRFGz-lV*EVmxolBI&^~1OQez#k{XVqaoYaQ|JT8w%z`+q*0&8=$tbV~4> z<@4*JPGxvXDQ;1fG)bD!b2wwtMi;Bh3)((bbA=`psvdQm)nJifD(z-tT%@7kcdCv3 zrvtMd-_8JyhDRry6Hl_nOU57h*yAt5XIJI7Oi(TU;w1h*N2fopVGvRA<~h(A+u!>s zaGA&@DUa+0PqGu9oS3-Ndh$%i)wip80Agdj2%d;4J>L|Lwk& zrYj+ct-n&KkYz4y?fj|&~<+j(2P znGm&tdMn9dRGizwla# z$R57xZ@Krdx8j#|&oAVE{&RHdP2CR%4z{xk7{~RUUlYD(%fjt8axyYMlU}&qxUAy- zWa?AZ#{zYN@As9+RRt7H@{>HG{prJw2`Z;&FAe;*_#it&`YZkWH}0HFemt{Y*{$ct z-QzhskK`zrUf{asVOjC-b^L#+?EiQCS0-zoYP_MXqqJT9&&0#&Qw7-sucwHXhKkAN zZSz0=__Cm32ZN;9Ma8|%FK7C(HMk#FmOZ+lVoFCvqqwa3MNh*+6F8%Kw9`&K&}HA3 z;vrRh%DciwSWTZnUY&Jvh}I&}yh&!7^D8Fk9pRM&rWMA=C9as6wPyeIp!AK#eK z(>M(@`W?i4iT$UNzvsz|TQY-RUs)O4<2qaJ!r5bI%bSmUeKBj@@l{t=OuQ(*&br~o z>ZcziA~JN-rW|T0x-aUZ!~W=biTE$k=F_ozCM;^V;bUIsbot_8w^vwxc-R_jw?wo4Psewg4~!y$el}C=&p&dEnZ#dm3|QYUm#v{QVgjCWJwgh~kp_NosL8Vg(H9%b+-ytp&HuX$QQ z>W4cCUF%hCWu@X4rS4#z`~0blQ~jdL6FR3ye_O_QsoYTd`Ikw-Rc0xL^PUL*x^h~4 zQ{^0G_qat%c@8YvxjFaV4aa%@X+jMRWv zW&a~jpY1NWw{goR-_M@e3TqpkvKaPW*j4)ahEvnOIW|lBbu+f5`srs{3SPP)KJ97a zxq}H@&ZkTHc5x-Q1^>C&@--`Dgt-n;6njNwVSKRfEfX7z~wI^WFu>e{CL<(cA@*OlG}2EPfrFK>QBKZX6m#MtsX zC))hyy)4q-l)UZ!g?*An2jzaOi1)khb=*7LtNvQfrAx>5{Ezr>aev@qH&%aJ@n`xM zKeaAhR#+L$9K(0cb*rf0j>SC8Q}!k;Wh_yWnaj1@&-I=}xP;yhhL=V=Qd~ZYx!Igz zQ;?JSc=3$HDNl$3<$h+TH;_dUApU1r;Yn^{?U9Z!1aP#Z=7c@ENivBwFRn9n$H!F1WbHCR~o1f`! zxjgUBp}sx;m49b_sHom^TkqPg!n=2TB4(Gqx@CL(ZOZ$n{qa%a{sPKmV&d7E(U#=)?qaf>~!@0Z%gv&Uvd0Grg*rs%m1jbB!; z-?u8j^;ykaZv8zM+IXeIw4P2f6!ki4E$P*4A?dwEbBaJ|ud>d9=H_^T1rr;Y<1HVV z+?D%nIw#Dy;se`}__vXB=hPfvoWEfTw~0-&y41XblX;FXA5x5asvLLzkoUFDK9Pws z52l+G@Sb9w9+R5C?`xT!_B2z_`4wv$|Ey?CJ3H&W*69<^;~-{d}))NAGRB{rdiI_JqyPZhi3Ff75wB-v{oi%cGZv^){RTdVhZQ`t;B{ z_McCle|ucd~@91s;6H{1NX0ZwPD4VJ^i3lDs!*Lmg`=b7hwqUJ>x^65F?}TW+T$N8f*c(MNG@ z;}*70o7tqyax!Xk<=tkKNbkB*$~MdI_rVJ>FT~@#E}ApG*zEf5Li##q{&zPH{hPn{ zGsBiu7QcR6IK4QW^`+}UwulYl8xlIZ+!nj_#)i1`u9$DzA!U7Jo#C+?N9A@e3UYB3 z5Oi_a_2pjm?`P)U|LM1jvpo6OpkDr9&)vCmXWrdxZEanCexB|Bm$o{o!gCeAZQ6O} z`#$;fwX?5%xoWw6X5at!4Tax&taCqRgqU6aSS+TyIqRuctL%|=vb)oBtMAU;AsWZF zx;k*(^sj$+NSa-cb!4zUU$=eotn1sv_eo8(iZRojyy@Prye(U^HYM-5(qPL{_21^` zvNN$~7Ejute)D93neF^7&-mXM2kZ!$$uYU!!X(zrveW+<_aC_}*VabgW)+Y5V1IwB zQ{jojJ8B~lq{q7F(oPh)&rDRYD4grrj6!=RZKE+>@cTZk9xJz>ylu4-s@D^>7M zZE9zWlX0&5sp%_uLF??^32p9)A1ePYWaKy*Y|Av$zrKy{_s#ganc2VJZ}*>4@USxd zd9Y{X#+za%_KPL(YEee==!uv#!((jEw9Lj_aOgyeKC0^m_L?zYBKSR^< zc3LdrxBU`Ovt!!1Ye#0yUZ=3M9CTyp`+dK+MNBQ6!m>xB;oON#&uz zx5r}g1*RpT^BtD_Kd`zwXz%2ovyJb)?cJVxu!(hB{OP@m|IH3A3k%!Ex=D9KWpu)E z+43!`LRTMqbmCNMUCM;+=e-_pSiZSzYgRVxc)aA}=KiVrH|HAo&YK_Hw4QCvwwK&N z{>B9>xOOJDR5pE^$8QmEP4mt^xlP`@yZ640Gdn5oTxXfoRr^vq%D>L=$T`as@~%}k zr}UrIirv$p8?i^_YqOMN9A9ek_po)}^B<3|3gMAHW2^rCRwHvtbLo8V=j;A1EXk<)_9pZByz0CQlj6kJ%$#{` z>TXU8ts|n7JI#f7mwHzmS@mdkz>)tO=G1<>d2Ok;_+@27xrvRZWfpz6m#K7LP_Qe& zO;dH@qf_UrGha_H`1Nx6`uPg8ci$76aPCt43*F2|Uy?6n2FfWmVfJIzsvKaN4l z;nPum+jAjB2E{cj_8JSO#;m>8*sT6)N|~A_e?qeTrUj05LM8TxRNuSv&HHi0TPh&7 zbG6+n{T*jnKi^ftwO`QMqmYY=4uC#h34& z#Z-GsZs&x8wyqyux8^Q?lY9P)lw9@Y3Hkz z=Lf65DA$QUmG)zv$qV&!v-&GufBYOW+bTWr)I2M@eNhj#>{1UqT_Cm7{9ICWTl2HR zckR_Yzvt|WpA*>f>UiDW;D1^6cRo(_{+qiYQg+LZ%Jn*JbK?KS_}IiexMKLqpmqP+ zQ?Hjjd!r-wmGOwBdXKh!L*E70bVJ9Yjk6A_oOYFwnalll8k5-#^W0k{-lEgGW%!YcthV zF4@uOq_FL#4sX6stdCCXx<3_Gnyy`Ey6h>J{!Xd(RkNQssxPRqoO9@@Y0c}o-{1V@ zx8JJw{l27Ml&ihC2KP-djTgELHm)uVo$&7Y!Frpt&41qhyzFOrGD^0r;7t7fu=>0A zw_cCS*01}pCN!fq(L$&2f8J(}WY<$~Ikxq>H!}P_xIz8H@Ba~zkKdl0uXc9h;+p$6 zR@b__&wF#F@a(G_scUE5lHXOAGWBm@#?47b-}v5-ZYyqec_Vhb>`mb6MZGia^eZB- zNwF8gvWkj64Cuv~z)Me)adG&LflxJVt@Vr)Ds$YL$ z^0mCp?z*oFp5&Dsa-P3y^VD1BK5XAZZ4zJ4H92+E@bNy2C>D191+vHX{QBN8J@>|; zXZ+V|{ZM0mHFLC}^G=G1>zFPB-?7tI#`I{x3lbF4&qD@zS z)1_(aqOwkhZ@hS3`ZRw|xAezT!k11k$fJpG6BhW`ueW;x8vnb9y&oa^b&NmGx!ymvx7bW?~v| zf5*wKIov7H39q*w*qJ^*QGeg3S++mFZ2s1_-?8>sruuEh-*0EnR1S5{VfvPM-uC+z zcDafJ-)vs9ML)}Ac;3d}vHlacdizxE93Js+oyVs<++lf-*XKe)`oE{r|MUMk7fU~T z-p?!X`uR-D{KK!FzMW;Z`}plCtGPwDNp?@W{YUlm-`umSzHOABd2se*OH0+auNIf} z)}~nQGw{Z~OII@{;;2dmi%12Aq=5zW4Qb?ccNKS>IXi{JTug@4tS1^ybT+ zx;ZCQPJQFwUv2w3?A*q_x;80&ex2xDI$MLD`&?iT;QKy#+rz~6y>^Mlmvaw(pK^0r z(5k51+2)U|qu(6Qjc6IZr=7ay!v2 z{b{E{adMpOqYp)yp$(p{+G4YEOgsgOE}onsDm(XC>LSf|=_^947nMB-7yUN1$4Rrj z-@4uZ=|O*~gsI)1Sg$yjO!D7%DsG>WY2n1@>HBUsg?8s$>RM@5a?#^=&b#nw#`~to zhOJvFyYDe$*vcf;)e##PP1O!RXYsz}chF9){NnbtQCqthFYl|ZR`$EeDq7*Q*8SS- zDt`0(HJcx5%r8798MfSSZoKH^D~u(czgCDUzg+vmrCF|k@rk4Q-l;s7)~r~x`GQBA z`yqyvK_9&AAM9Q5b7t7q64Bj@pEo-;U3S^`#W-@>sp*1Ke0ung1?}p&)BI}NmgcOj zTe9@3|J>>PcJKWBTOE;$o%L!?TwNUY%5UbC$nTmrXA7O1YkvM-#p~GLpI_GZ7`?i- zGBqah#5RX*tlN6UGB$Z9?|z(b9{Hb<`EBd;U6Iq<<+gAC|9juxpFNk?+?2}>tJxrY zW48XjA4+!S7P%4j+VRQXBY2CG1^s49Po831VjXnlc~lOE8Ef2|_?Wue76u2>?fo_g z{o~ktdgkxn(lfmuPfRbZ7x}ry+J8+>V)u>qvfAzYuKj#c`zV7)t}(2L{cWe5bMA{L zJIk(4+ z7f=67I>DkHma*0L!}4in<%O@ZRZahVd{JC?=6Jov?f0U~i>At}Z@R_5a_ZjW%Y@YD z`^OaeZ<@M=zfWmG@cx~V^S9?*o#Z<4*?F7o?Mx?wqaLOeUYKE{c79&g++TO{ouB=Y zkzH`I&Fhllsx4e$>4uy_*HsvF=SrB*oi`y!cJiy39}C<0-k0t)=hzZ6m4EHTSux#9 z=h(k6Ie0+4YvMEiWe1BL`B&KOVlOCZk#T;O>cIZ<-Kc(L7l{sxiwFP7jYM@xN^Vf+ll2ib&-sV z_~ptUF+5s%;m?yllP*qQGI`4?>94JEZT^L)IG*!7K5>t8`OcE}`)@>6&&fLf|H9ce zANjoYYd7!5S-$n${ciH@x7{0S7y0~clvjANNaoZDZl75uM|agOUGq47+SIMKWw%XNeYi{?{;6#dK+R~a6>pp+WuL?DcvOe!PS&{pN2DO?l z_0!U|UNa|s0UAtxf7K5}~G11$MQr*9HE{neZ z>Go;2jVX| z{u&FuV{6y>ccJ5vt6HX0!6fe;XTG0m6z1K;;8ekJl3ByNQ@yV!)2Xd)k?-c^%l0HK zR{!}p_J5e9zP5^gCcop)FX{7Zzir%puj=%w(ACrYRdXaau3?&2e9p2`>&@bRyG@44 z$0X8^Zj`#dF4p?-S!KEZe3C{g%Cq;0d)P)LsDQoKb%;&#X#T@tl zm*!vdea>FavzhidPr7pXS?c4m9bJ@ZIC@0V?v@BTz?PFw!& ztnrW1e?OG}S*X08^E7W+PL*{Y;HfElIpv9|NYxX z@llbRkN^5L`(IkW{a2s1W%tXO-^_ZQzfsaSJ+)x{zHiHF|GB>RscMvGvrT(rF@JVY ze%IuS3+imM@8y(R|GTmHarO2G694P%9$)DxX7e+s`jYXun~8h5KcDo$LxJUkSGW|ANg}xQ9v@iNoc`9bMMRnqX zlZV|leymsZn|e8U?V-TpD!#q4Ms;obdv_iy+jZdz*R0Pc_dQJU{j%M1Vz09M{SV*N zN}qDwe)T1>ex=6OBB>gNImYj5S6+Vnz{y{BM_2d?zS5e>#!CO#_cK(U{1-m&kZ;)q z0Wa2RIN!t#5fnF{zci>q5+US59t_4@sJ<#!6(zrMb%zj@A?0}mM<%h^_? z%xbM$ynpA5`meUX9B0)XOFaFeyZF-mFIzuNuIfwpuxrWXB|i-vDjQ0=Cm3xxwQboP zQ!C%O3@=qOKYI4P>Xa#1^w4ID886@VgO}T)-fvvF-fzXb^ZZq3%e2A|%kGd0WWU;6 zv?a-B@5JR*?yf@h7GL+@6}=wu{d~3BhG$jwx7WR@SZuHKQ=wTuzCnI>dY+Q>&3DlX z$2Q#-I=H9u^V@bu`(4f(T~(|{a4_D8wK2#_vrF5f7 zJg!2q;E%=Wm9PBQ-+A{Th2gjGw##pq_ub`)eo?%_>D!9L<}JJU@AdjvgnW1uk=R{O z^@B&`oBFr4m7Qmf&XY`iETPzL^Wx!B9|pM}g&!hxo7=YQA9a?rNIB6i-tmd!vYGw_ zJ@#lX%Lcv+M~yuy+5gSB_EJ;Y>J-DF%#fS=zRN}YIPEX^DgDZdz(BpPe@s0*e{H?P z`(WjT+lTG89NgTtRO+y{=VR4gOOujiQVk~NUMH4HtM?f%tW7*|ai)JrGg~*`X9KIt zsgEQ)r%Cxw=Gby%C3lggdjr!-W{vBd&3>O2Ubj_XmU^M7ytrl_+kwvh303ZYZStKS zuDq6=6>q)kN#e&t4n_L6ep#uqJpXupiRa{=?JibjZzTGEeA3A4-u(UL<>Pk0-x%-y zdM(;K!fgM8f)le$GKIFRQJ7cr>7?NDxj$FfXm?-OFzMvIEuiHem;JvePA_q)Rkn4M zKaqd`6O*(-=7jh0CpKO#w?8Qro+V)KBlqt2manr-41(o~vZmiZY4xUj;oHEynSW+U z+mx=bz4ks*+O9U`!b0^I-Yb(-lUcLx&(dAvbh5}S;Z(x!>kZ=0|0Nz>@}F^Py4L(f zcUQJP710*$*W6w6c}edqOS4z(yubdG;P`19z+c3zvsb#}`0{L#$y4=8J~i1j?qo$~Y3 z(`$<&!m06aS07)P{Ju@|S%eO6&6(R91s-RH9X*iPeR%0di+ry~X0EqBpJe`$So-LM zlUd)bqMSv?HP5%+G}7-s?>_O^ImTPU?)8i7eIg^6hFtMTs#|tjZmkjDS{a1AR@ku>zZsWE*5q7ia)QQE82ZdMt(YSqjlDPk@ zO`HpZcYaXpE-CTZCG+CIVde~{#Wok~_vS44e{1GjF8lixF|52bKY8aIs(&)kJ?iScKTp5d-UC1rH>=)J{7(IZD`3C`}!f& z$Zz+%+uQSHmYvY|>q}bbJ86MD>w_2Ob%K}9Ut%mcJ$HNH(qCmK?>�?Y8#KxqGp1 zzg*Yf7`MMBIrdh`w!5pB&T;-#cm0+1?e6-TJIBuaJ*ro?e(~R@Yx&FyZV6OBTe9xj z9rH_z+^0ltis1ZRe!G2*Uu)F0mCyfP`}Xwno5_K;Z}u(D+k7`>-j|N5sk&ju;;g=M z-WT3@Mz8*TmG#=&uHUZy`1kD#XY3lsqq`(!UBpWdy35Snz5D#(Z!Y`)H*o!RHoLGR ztC_?7tc0+eZ)(wdhCi25k2&Tg9D2N>r}ocvi{HWj{@(Haw(`BP@SG?6_pA=rztMlY zRR3nd>#Yp>Gd3t`&v@&u@%7E!;|I9!Reoo#{@&a_wdDBg^w$MvdCki1=u5^dw|=*A zUBAsl&(9t2_v!K9Zt0wCyd(cN?}o;AD>%BuwdL;54w?1gM{@rrXMQ_{=3A-L-OAjT z&tB7(zfj;#@$=lWyYbcRrp1UUdOfdf&rf!B`55AvxxBkbeE1%B1n)+(h-h?HE=N>Sdl<%I{ zZBk#bAmSj?^7m0mn+p#)^*!F`Z{clWdm(A|p=p+UH?N4xFKO8xx#m!%mD6-f61}aBz(2qi2%!oTNE#+o_6Pu%#4&#f5hbE)*8$INNlasWmBuo zuh;EPd$M|tg4dtO&1EdHqix6EWQ z0p53tTMixM=1p8yJh#ncdV_EGg^QjHm6Oz;bL>2!pz|m-$WHv{2hVx$I4dtOO>))P zQ}9IDIr9+fwi9l864{|*A)QyHKb87lc&`5EvHX97ZyZm)=AQVo=JEQIes%qmdp}K& zDeA2Kez*LhdGhf-UbB4`8~*?MeOoj%KJdu(Us4BKgVbLB|zG$WX0Yu zm%Kl&)_0!Kcj5jQdx!a#?3Y{=T)V7l`LXp|Cb@dw_Eezdt*uO zUlZf6O+LSC^*3CvuevVx@72o*jenl+d;cu#?3-18_4$_f7X7>MMZ!L2GbxzjFb+2w0-)m4gd}6B3yVd2tw{M=8ynmCWckSY}_t%%0TZR=xWO6@k zZTK?3-~Q*B8>ctzpZ~Akd-?v`=at=}KHHWXueDRj-2CzFde&v{xiWzC68rR?yexWtZ2kH0ogHmA1{rtSpYQz46`|TnY zuYA2Y%3ZFB-S4+yFhi!6#8glJybUXvo}4`4-u%0F@^Mw0LN&cou8JQ|Oin5?>F=E4 zXjjQFne$J~g%gL>^qx8xc04}g*gl=lzQ}o{%yOfX!t*m8OwXChz5dh6_KE}jk!A;H z>E6^0_S^LC!ZY!LQ^vp7b8%g$`+mQE|Hp4n4>U3-|C|$_v8UH6=f;MDL*MS*-v9Gi zt>^y2o|C8b_wTuP`*ES#`Cs?z|JPbS>QKJsx%)==(;r7&e}0+$tFP!q_OA(_{`8m1 z-#uNgx~zm(W=s5UudvUu<^2CvrE1+~-1=?f?P}Wzg;ORp%I&xO^0(l^swrzyE{mVv zk}S1t+x7N6;_1v@-&Y)du+si$zr4I{pxOQ-emB2X?EWz~+bFpwzHZ9@e;xjh1GOd# z?B-p`zv)7Cit1OBTW3Evu!X*gTJF5Q+kMU3UH2tz9;{RsFJ-$TCUNn{QHdK@yeZ?d|7=h<_mudnpIdK~ff*C*}& zJlE~Q_C{SjP+I*rH~NV`tA#bUgkjN^(r?@UuKT*zfA{-qCI$VrKi6A0zdCn2zwgVI zm&R}ApSKY=da}TGp3&7(J=0|hTF-wkoAq{Mbq+VD+W*6gKP~xZnswuV+}o=4`zP*w zcJboZ>uVy{HmxsPJNM1{@HjO=!B^Jnos#0sz6a`WNp@s=bG(h=_Tgaed0dQF%kP~Q z+qsw1`q}sOW(Q6-id8FByUgk~>DNsb?KUgE?Rxs)YO5CAqUg3_@zwDkEM*yVS0|r{ zdK+%I-h5BSq*MLJ*DaaLp3ZG0WAnu5IP;C`6+AB!r|`;tS)%yCVoPDZ*6q`sMYcUv z0qM^l*Zl}wU)IpJBb|Ls)V{R)3hoXQ`89f69-UXS%(%nSR{esZAcn_{huFi zx8H9w{&vRr{E^DkE%Nt8k7m5vdOZ%bZ#+d#_s@^Vv)uV6Z8S{0zAiSowU=46p7+J( zr7vc@7Wk`X+Z?9`8l&*)N9K3%?#bqudCSI_o^)ay&}#m_2HHk+>&qhTjsAfesrR@->o+B z``5}VV(TL&U5x#4!)5oO_fxk$__s3rPnGZMVvz^+cK;Vm-(O>BKS|X~WV>>dyjcI7 zxT>$c8HX?X=W1Wdnc=Uy;ADdx)3Lusc4n)xP1%pXdi*Z_#zFQiTV|PNvz$GB_g+W; zrTD6kKPSrBrmgvV|Lk^YzH@Q9vsmBV-}k(W|N7?TTn{Eb`dp_c*8kzjf{!!TJ(`#D-H{PljePhIA}xwLum^4HhbXaBEywD63aLB_54DnUoKh}_+8CWYK> z$+?%q@2dRdxWl)LKcjp%9TmF4vhMl3>T_lHYrkuhKUf{Rwy=Hqj62C!b>4K`wsX1} zEMvGu1*x98op%5aru+4(PLQga(m-mFlDwI-hHq?(;}CQ4SC&ai3}h~w$$ z;kIb}U}(AI>4OIhlYC|5PWAMCF)9?BDX=i~<|$_XIRXv*bx#Dtodd&9iuRf8dHtv{ z?t6R8Db@#!&qSjn3QxKmo5phLsmum8Ua6FOdu|3j{k(H6BQx8P$DVbCq3h>WO(}Rf zHT>2B$L2c;*SB*DtKHDs{YEG>Y5$yWra669pR`kJ&q=0!cqaHu?eN07MemndFJzw= zH|-y*i~K5iL)k~VQ767{`1#oD3xC|prY~)_DM$YXGJNv-?e^c*qTG+YA^4_ut-brj zLo1KJP4!-4X7c#U$ItqwN~8K)7Vi9hy1n-E>!Q^mtCH5-o7k_jj&U;k%_EhQo$b9v z^zvf;cHCYyWnIejedTj&?Vld^nR)W#p?`iAq3WM5@S^=|I3(sy!U(Wi|t2lSN(pwyZk}K)mcwg$IppmEy!W+7kK^CYle);l{W8y zli`bs-;4gaEPp%gjY>)Boc~AD=bxEV&HApX_`K2yhS`_S|35w7I8({D{AT4^6Yagy zng#SSXS>-z49ud+*0B(le*b z{V1?p1&A{8xB5M`aJnZ0ZZV&2~QszyxH`) zaYO!@n|jQZ*?)g5w5!{1vuSDHo#;EOuXFC4!oL5_z9*ZT++&~nOF9dhl}EflagDPjbz=Y9UiNb7hYsx*UkKt zWaNALiJ|IK?fwaFi*in0*~_i}^MXLT``5`jPCqoKTY1lu2|Br~?!#oZ3Cjb199{i) z@tS$-wK=vukbiZ=T-UnvmB{H!HBOrL4aKnq#|pMt7EYsmH&h;y&|>=!Wl# zxte*cj>H)U(` zZ+>{--nMR5-}N7}O!Bq#5^Un;KKgxmf#Y3yI=il4JdhOp~X(j*1thUb{T~6tj zuV2qQ-}2PDivMkAl0)zMzhVp4ZrajbB5w2NHIrRlAM=6!MIp17I`u|=26e$zHqlXU(4`p-2_^}8py*E8g^?z{8WH@ozPo>xllW$eq@*VpIV*W2tkXR(OIwFiRrIqU`JgpW^qb7#fEla8lPYQEKe zkg7D_eyVnO&w8UPUh8`rtsna@-*Il~muV$yqfFP{?mpkLZUWo6l36MnK24RrzM$@| zif2xc{-bSX+|qfB$5aw{r_&vk|_zZi0Tpkb~q_|gX|Wzeu2Z+ z8ocl8o#WoIVSgX@?#E`~W&wS&6)*NU25YC;%=%Jc^Qq*gVsoX`?imvnf6}o3)wJo( zwDe!1RIMlb)`+@u@QV+ndLCR_WeNn4#6ANekKQq(V_?+ZSna+u~El(RPRo;H5sQc@io5tov6C{JhhB`+4P;?olWRX+GTKU)uO>#<8_;9~bZbdAuojb?yuOul%*& zetDFi&n%l||7uQNdHv4`?=N~t3VnK{yYfqg%30e0afi}Nf7y&aw)b9q^ZDk%`b3lfCifRFubR|0 z{rEzQz$s5>O5SI+K6*-e(@u5Uys3X5cQSof+U*_7G~sFCv6?fIa!gmIi<;&b`l)k9Zkh|7fn6y1vuP;#H52 zL*6HH>y`(L^ zqveaKgN&`ByzGNV%SHEG+Pv<*O{#2ETt=_9`3W1LRk6$3V#@=ntCzgre`ulewa@l@ z(%j7~uAIB^`N;!!HSO5)E&C3totgVYWTm|31-rj@yUQ-drAKntOENlqmkwK(eqF9& z=iX=eJ)b&+>l}8?GYMM${@S&;51*#=_r2m;@_*tzp(1;)*Y|!fZrW(@=DA>5?|kN? zl9|W1SzeOcvAp;h&&sbS7w9nN{`=e!m~B}m^W>yuC{x&ncl-9Q=Kb-Hec_wP`}U?xJHQ~+V-M@Cte)D?YUn7&+eebf@)AX$(wO{?Z}-$=$7THI9;n{yp}d)+ z=yK`8cZ_mH{STbiEnk^`$305!8kb}tf1h&yPvv=9J0_ocd^WWE*^L>~t!}?~p2xlM z%)vbe*n8*f3u*lSyZr3=yZ_JLR!G}=pIww`g~Ie6Ym2)a=C19pnE2PK3FQe0*iT*d zae*+u(S)_}7Kj?q}7n=x10i_Wben2eb3{bs8VGxYcxgl1&~<#I&ajclQ5zs$X`-@Oa7%e~X7K zk8jO6V41OJ>&*KNegzAeTYMH=lE3t-{QZ{`;RvLbxzv*7`^?GzCa%;r z@%(XlwfL+w$9_xXWME+U)h^&1e0{{NJYGBZ?VjRuCg-}De$|^_sQU89 zXtLg;)zjxx?X&o}Q%a6|nvYH2KAQ(`E&A$bLHJ;$?HmHALKth!~E_(q3e#DZp4Qz-m#$j_=Fqr&o9fw zo8&HD$eME6MEtnSi>I3#4qtAr*eo3R!!Yw+MdyLat-p@0iZ;yI{rvR3V7t;MU9Ac) z{yr**l6$|dAnMd2vB&qTFLg~{|Ek|S{a5g;ppCAJ5}mgP+HCyzEl_sum-+*9BlKUI zGfh2oK}@_`E(VFz>rllZ{F>G-*>A(=jhVuaZ-Fj%RkL8y%rh0 zW7^MnnZKQK@;_o;ZOQp1_;+#si+G2>o>H=nY<8B#OBO1|7dq+5Y`*o6*XNV^nfX(q z;>u@U3T?d*rXOr-_P&3v^}T6pBBZK3D?M-id9*0!+w!SeQKycb`*(EOFK_k}6Y~x^ z$3B=`<8+t1WD=)e<&lSzb$Wd-dfd~!su#art~es&-BisK1`A*PA8y4Bh2_8Jevg!n z_p~bbU(0;loBhpItE2w<>?^d@XK(Ib<@M`~YL8aw!a7hA*v7iekK5q>=hSKQ>;GBK zTe9clG3l^{QM}P{i{@BoERVkVt9r?s{{Dv>e9v6(iQZpOkup2!v2A7D#}yMczCCt% z{<)w1_rF-Q&*xswwY>AH$r~A)3+*-eY;VLBtb1cs`K=y4)|~#BQLefw?a6Ap2ZtXP z{}e2b+EK{0IPb{iuYt3wlnmztoL7Dl*Ld~x^jjO&iATM><(qr(MPu2G#P*xt@7K%w z&$L+D!2Gl_N+t5(lpmATwU@JXrSHfyF!fw{P-BW)P_og_iN|&%Em(LyH$yeJv_&r9 zvGzJ?Co>}r;W?+mq^cf$aOUoqx2a=w&xtSgOCBov^T}*sQ2g1V#eMUXxc-(SD}y(^ zd$67Dg1gP5Cq0JuW~rQ8`HNkqz(M9!SMjp*%7ObI_gSB-`}uVG+Q6&2acWhL_Lsw7 z-7gJosykBYtbZZ3PIDuVzU_kjjIFI);+sCco1rNFbIM^>{ih!720_Nt&uokIe%`W9 z&zm(*8W&#K{`r1hjCT~by;q~R!_J-k;Rig=I)RtOmPRw| zKFlTXd&RA7x!HTGzi-RDtQL7uXmjqB?zwADmg*nvcD9JUkknq$a#D!B%(YB&+vEzl zmrK@aKm2x(^YF!kjE7D=ob&Kb;{6E|x0K&7xoLY-H>FAWNz<|&pHG^#ZFV!Z(mRm( zK)2;@NmNBcip0+gKU9Bf9v8|g_^1AATg9&m>F-JEr!z0ydG`O#`E&a=%>8SWl%dn~ zL8R;3<9VU?HFU1epE7$o_s)%;QXR9GGUPR#T6t*_W3LqZoNvF6^e*YV!f@hX!DRQN zjGw0P%SnAQnHXReJPa7cz>R`%d*VU9Nw? zxS-YTt8aS##D@Ox&$`~VXQnx+`_0K>0~U8m*6Uy zdR6fy@0SaU^0k^bg|byXSh!s0MM!+cfzEk0Po0&mUM(}RVJJFy&|5};cLy({KG&S; zZ!^Dbu-AJcnE3zS{oj(&bN@8Ie82vy;!EC>C!eK9FIcdR;jvrt@&)$}x4r7!d*EP# zzEQl$gWJa(Hm996lrZ};&p7AI>+ioG@1N2;DYo-?+nJM@(L&o)ZPSzDJ=))X5;(wg z@G+Za*vf(zNu{T5&#*djjeGgTT2*-mz6o0*Z+M5@-?05jlJMTeLDly!efbc$?AHXN z2g)k?-wsvA9@^-0pLvU7HE+a4 zK2>Y%)aLKVIJtcP!REzxId@*Lt?P6=&)WN?_Y=cMLH|b~dKDiPoS&}b`%vI%_T^IW zD~1K^tE9g+oA)fZ9$i$xH&M~q{q90m_7e--l9c}ys9k&dR&R^&|AL%1p4DHsZvSKO zU};%c75^=_juVqoq7`m4-Ok`n5T9XRAv~Y=y-Y;(`Z(Ly_rI@RQ8-a(Y3{oV>)*ED z_TP9nzvk-x>X#YYgZC`eynU?B(A*)LZ(ix74d#8v^;&ZeyX}-s-p3Sv=kZ4V-pbpT z&rSH1r(SGSBQlxqBtN@eZuZ3XyWG_;E2qe>uF~VSR6L(Kso!ql(wJ8bI`0~{oU4w0 zJO5Afz6tr>GcP?pdn0{Y#w8E!H^+6}Uic+5``&Y2?WgxERz982{dk79oq30|_wJ3? zB_m%q+gN7IJXNh|VwI`q8z5295Kv*MzbEpbFLMdkAIJB9W{Err$TV_f<7LrJEhsL! zHO0;D=cJP@%<>gin~$qJcn~i8F#PTVRsJ~+rIYro=uTIYj`;InL3^9Fru!$6MHMyP zlk(kPZ3uh%l&|pEi;%0V%X@tP+|gg1U=cOv;*sZ*ZnT`p{8Myj!qhug<{w#YTReUH z*2{Z-?o_?^XvzI6^S7`zoXa$5ejFqf^>u3rtK3Va!nJKj5BR;7t7^|%v1dt~hyR3j znNJ<7V=6p^y`HCRaan!5ewFmDxiZ!t7Vm#jtQftCV`cL(jxC28uSPT{CZLQa&Fl+*T?O@ zDpTK05Y-m!>#I)KR&YtfzTn#1<@G!2K3*vdTa%-z{l+ih;+#+0iXP6fNX|LArSM_K zM@v<{<*`=TUR&b))~ap0kebJ*5pyVBA$r%1u4#JHR~ny-@Vk3=ZOYMCar3#Zy;$@+ z_gT68?J4KybuXUH_vO9F=B+D|kN1Tw^_sflM*BIgS#RF2zpCrcC*?NpB%?F`i51Pu z6P6l&Xi#JMWccHdv8IPu$%CCCl~yvT3TIW09BIjky|9Y!BqwLplPm-5$T@ve#PwFi~ke+k8l_eh(c`}gCxz1wryq7#byUoD6&zgzn4VY~b> z_Wkug9HOm*;Z*$dVRL>gsF+&Y|2g>k?_YP`+%^_FvvJbZ z*HM!9Pn(|if22Pv_}sK}b9R*Z)?Ve(c=FsaEMwFBv;Q`htNq_@k>1+%LpfxB*i!qm zDvDS5vv=L<7h?|lvDkY-)@l12a~8(mY?{cNq zUCE$m@oL7N`zOBG&k_Ij-2T5MUjgfJFNxJr7D*Mi!RK{LD)%!dzP_?jc=iuTo~G*e zzH_bCeOcCG^Gn9IYRf|wyRTE~y>q3R;vPObl|Lcx;_@$&zdZh5EWaert5$G{Ib#X? zi{4*R-q)WLa&{<7*PRsL{j}715_9~e&L;L|4qfdMhs5iWU*zRp$`vx)yl_my)m$?D zrHy6tl+GB1Y2R$0SkE~jWLI~oZ!s zm-U&h<7MOvx@37d+{Z+;;Q>s7QT6UD!ikZw{q1{!LT!r zWuDdj`}zF%H)WZKTOB`VZ#OfYzee{A;%ngq()q3!iJSbc< z>FHtTc?ywgUu0g~+FAVkfgrEnO{V{1^1qK%n5^7%;{S8||C*c5%9&+f)A?4h_xrtS z!yN(h-ij`9|1!O#=9=*wsG;X=c6CEZwMzO@w`7wtbDaG&M< zxX0p8%LV33t``Lktu9*7xk-lqMe@FlM-RMr+_c~zqxUuU+X59a3cCa*{hgfK2->=# zaF^+ZbXa%6g$=?t6rQ=rY^neM`~F8KsrwQCzTM70Z29x$^7&?~-rl%vmg@D&aH+(P zBFA@^Sv9(sXDv$SH|rNFoM6GlGP^CAMYccXc#ov;SJ3Hvz3cBVoM>g6eWq-oq;Xmc z@6@@WKle-hirbQSxb5iaiqFr__b={vx9jz~3N6>&1shY_KV{Be;{Rc_k*%`-^98cM zf^MI2aXX&yWT)afgCCjc9_F4)XIL;;n#@{qWcf+ejz9brKa}pPdxSCma9zLG_D;^G zm>))8&T&uRH|SK7XKMHc=Q_a3M;?{d_Sd*xC+p^fd`~{z%=t)NtHiws6aK)dYo2DRqtW_neux>n!gV{(fctALl1$RtHWh^)6m^yvZi_ z5M%PXlx+Uh!4?iV{}bg>7d9{VYl*T+{MuDj_)cP5@2*EV3#X*R&H`vrY_BrQx}p)b zqhM3S-^aquHhUMYTf-F;|H|xmzrZ2EBYBTRKB=;vdcrCG^`YF(8Qv?U^;tdy$vvI0 z%x=?5>BZ8OMHiyVtxX?)xo~2YOxH^8JqCB z&E_8+6`!i7{n_+DdTOM{RtEXFbkmP>%kSNM6R){|Zz#TA;Mxt217B_fvjVpQ772HTWS-yW<|+4XHkH}GUm~|* z?}25Xip073EKDqn9_%bA@(|9l3y;3&w#hb2_LjiThFb!5AG_KE%J{4FZrW9x^u8oC zomWNvckkY2zMDGFRGzN!mfK>HWRYf5skF}MK&C4HT#t2g4sPE#VY&Mp6BBdJ7sot| z6APRf<~qsoE^zbo(|A&tS;{SCtM_PP+J}mLr#i!<_9{+w({rdiA$Wk@K|SiJ@G89v z8IwZPCoKQvJp0LXo~i0`A9m?KEO|dgaYg9ruo|t-;9r3(a=Dup9ShRl&#-M-vsuZD z0PEjxHYZPh-gZyp>fa~nC)V>T?7#nH$@~|^C7t_%?_aV%B3W^PWBwJ*4-8wFQVKcM z=PJreUK0KgBPIR9w&p~*TxP`%0|l+!)AF;(W9oa)0f#^-t5S*`z9v%g_B>k^oK?3Ki7 z{~M-ye^@WAiF>%<XH|vorUH6RegeE~q%!73!60J!Q~p-qi@-lU?`i z-iK)`FHK=xd+82$W`zjb{QK4>42qSj+!c!dHqQ5LjWNGpQ@rku?)((VvNtz2mVG=b zeq2cZ>-|{=HktnZI?Md~VgAp8SJJlJr?NR3w7Az)l2OZmG z9zn)yuH`X)6Z^PGwEy0{s@H3M)IRpx{n7}#YINd$CCl%oyAKOh&T;?VwW0Qgz#OC4 zC8_=#3lAPkcs@a~+H>BE466?}%x>DhIO=S-<%F7i)W_5B9JP0Q?0dy1cYFD!*6^82 z7CsjWh)*e;AM;`IGpBh47Zl5b75w?6rV0c|_E=2~PS)t(qP3#ax$=%dr3u4?IUFZ0 zpS^hD1ltvlmIo(<*S>1m&ZjVYPegBV)G2qhuc7K4?3NeAzuA1$3bFifN$>*G67U9V zmG_=s{hsA_7uGQrwMjkS^l?Y%@yeXFyNpg)hO6&ayqDwRf5}S2UG3)jFP1M;*M*{`U&RaQ;)oqyCD9d#k0Y#c+aWPjCX{jTHkm!Co14uzk#sZYo~crRn!k+y{Msrer2 zikZm|)aTA-t137VAh%`8p@fd1RrotMG_ce?XjCg>T3sb(RH;D)E0V7-t$rYR>2ESn8Sal;i2Yukxi{ z{${UyrbSL7)l>V~sR4aSc zr4tH$IX^B$J5@~jjMkDvt55zA=UW#qsGcm}f#808tBK zcb9$pa@qfQ`-avhbG*a;pS(SfQM$$VVe5K!xe5h4&5BE&>Q7gCPTI!tbEo3W`PTZL z`ny#0%xyC-K0W9CN%is0mu9AO)!BPH8JzVM`T~z|lQl2WI8I*AYE01( zk==74^OC}R8!11~4&*IIRM~$reVpibD$tg5;_0Kc9_v0#TJ0BnSB2k-@zcb>2@U4K z+e9BUzk8GVz-HzjCQE}?=D&O#3q8N-sG^G7qWCF@C#_oulo4ScvHt@^}8arUpv1Q>})yF9QR$+ zX>`_N#07lvLf)J zS!Jg1&DHUfH!iICn44#H@@`DxJX7~$ag&AE=2=wuI%#O|F6=(#%-nEUg>~+uC*I3M zWTH;C_Hic&Df)5nmvkLIz4TMl!T%m=nJG^CKRu&9EMh#*$|t90u=bE5zs%1g6OXz^ zedTdveZs%qym*4%^ToNopex+oJe?kY?z&cme{0PzElyAMPP>r!v=8_8H!X=s39u*-rYk4(3z(-%WVtWR@wTxYy6m>Lhn>hGu_F!=@1Xmvdfu+4af(JmsubG{udt zZ`Kn5r^O~EQ??f!oh7a~VK!tBuP=9j!0XEAbEi8Buy5bXbU*0XoU_H}EY*t@et)|q zHoNM6WzhwL2TW?RJeCa`)z@v|*&D{@VceMT`;6CDJnZ`MjZ4<5WW(crd%HJW z_8X+l^JLy`+Pm%evU}z8WKGmKcq}LPsc~I0`uCv0;6vUI&Y1!W(l=drvgD+rFkeN3 zXF!RM*wNa0e&Jp<*%t*XW=seX`^>R^TaXyTJnz)z#Wg3V>0CUbxI<6)=7odmH+xr` zZ@C>6n_YeVj&U&i4_2*Un-A)rUAM1o-@+SjFvB3RY46`}x1-nY691vNH9WR7bl;Tq zb^3d(o=&)s{>q$did=i&!sD5>O1|^H$VI*6`}1V+<|CXZo2Lof4WILAVXK~{T!{V? zrYGFXH*xNKz`P`VQt+X#@u|jfJ0>uzI3Io3+_tMxc%D&-e(RpOn++?^+T&G%}vsq|(u$EG=A5r!|87=AirypqA_gV7%^ z=jn4QPNX|2u;|Y`(%P=xss8RrAX}>D{KKCbPW0cHb$r!>N#Zs>J`Jx9-&>R-HB0Fq z@5b5uKUbfPvr!Lbf3O=9BY$j(Uq}CVGeI z_11;dYlhrECCGH1b)GzH%a`>9uj(&dYEF?|BK$)Bm(tEPvs!(wJ5OCH@bAvAnO96- zXz)k2Fes)9d~!23iD7!>7<-gkc6v$Zmr2gMp1jI^?Jc}(dd`g8#@v;x+m_nyiC!K1 z!@xp(`S;Ie+VVlqrRR#rRXmI;{&a0syrZVMKcAE^!(^sovpHKVEFMWzUGiaBBbBk~ z(h>&WBB6jNVWyH3Ois&AKls7yFqvcFlY`5P<|-LWPW1nILSbE1z_usC{@3POZ2F?> zbaC#c--UXI-z=UuKl ztmRbsJ*DKug4ZoyI{t)aM!i%}6gPPxqfmLtx}a+{&%*=lG1tXk>g?k1(A%_dvfskx zr`Xx;w-o(YpvpV<7`utodGM!zw?=(fzf7gM5jb2)ao8E^gPoE8VIhpr>tfPmlU*vZI1ih|ErQ6v$C;wp6xj~ z=pLplR()Zgx1X#d+yClqv0Y`}YpdV9oNw##U$)tYm-q8nsR^ph2s-Ko+GUyk`&v$- zsMoa3jKOTT_p|MpvCjFF=a-r46-YaoQk|FaZ`ZqTlesvpulJYZ5tSvJK8YKTBriO* zRO{B#-*eK%_tkElyTf}n&)zwfzC~*kR`uQC-(+&oHI?hde7)FREyAZIGoyumeSIw+ zZ?@y_x7&{=YtG)C^JQI@ru~BQe=QfZe`W7npZ3zE?o@F7qH?BRQ?|&c9=fr6h8+6~ zTLw9+7n1xvypN_Z^Hv^lb&h<*$nH^p;<5xYzpPQo2BS>IJxiLepJuKo;kSRfQtpC4 zxNL-I3wzx~C+A%;4>t*F=|_rBoz1%$RIlW4UtqF1`s(iP?Xi_lrw%I+>Zz9h}#zb#;4fmdWOs-RZaI?%jRs($mvxd>4mZ zTk4y>`^(nD(2r*{yT5uQ|)QD1$p^5ru)6hdV7EW!L~CJ za{U3_Vp>W6?OrO*-d?h=^i}2l-npjtqORp7ewQts-8;+l+%nxca$gf4X+^K;*<12T z_}f+e`?sFHkInC%roT;DS}x6h{sq-JYXdhQKepts$`RMOraLn~B^}!JZo#)}Z;zMf zyt(u7PTU29jT?g=8b_^pcQR60KJJpws{QN<3ktQq6&{xrzn^p`=(|bL6OZZ_3)|mp zI<0s64d1L^yEeXlzCCzy+mtSW#+E`{td|I;bdP>8` zw5<$>-_<`7Xol|oxV`G?%J{Eq%1rMknpVLh?7ZXcy8GO3zRth5wS9k;l^Vkn z`|q{p^DmxRck6%B?jL*m3>W@c_|hz0$Bm)|VTmEU%~ zZI5)xo$s!{=jA#tTegCH*A?|1Q-Q_A(_6eVIY}N_S z*Lic7U+Zi9H-I&0+Irq<*pW9B9bL+%C z%m4Aa&`5i3gY(Lzzgu3|9JsS=BV*qVr$?I@7m4dW_Wx7W>02}LVf_qylf11A3pVoi zrl=NP;k60REbqGG^`kCx^QEKRQA_6tg{{ru%-)pDdV71FX?EQ5P0!a>3N0x5tbHV` zAT{xFl!8Cst)%}SH1oR`Rs;#GQcOLkF;V*eyT#QzFRkAj|Lpb0ziWT??PjmPYBcq2 zL)7mZZ}-Zb;9;2}-dbDy;`hS^p_-8=m+8;TJZ_ScyPm)D{IvC6w(pkYyfUbGu~iZW&+sZZzT2?+fnl%jGm*DMUZBWo&%O+i`x)~>OY!@PqKQlWBVW3A zoYD*5(POF=eErlE&B;H1tUuMm1Zw;SFhn&x{OI=o;W0Q7p#$pSN&MBP&t=z5OE&9y^Gc@JAb`<pf5Hxmox8v4^XnIQ zpIGeoAWH1^VXd$g8L6kIot0UlJyQ`pR(`vJ`%L7PGwbT#M!&W%OM9{9Si>>yWJS&; zlAA43q|{qp*j60tJ|@5L#Y9D`kC{COyLAQT3B-M3?A82m(xgJ~;nDTg4Pw%&$0t-X zxvbx4TY4{P_v5>b-!4A*SoS{ichu8OOO3ZFNSGa-KRa*N$*ON}B;Rh6Jd%4|>C=-1 z8^hZ<8#`HCCtO7#cBo!c`OF8a75XtKyDwW=q2(L2v{{Z96M zGWA>UzwUx}m%KENx_-MfwXVeR;T5+xn^cRO*!<+rM~~LkwEO$%_S~J9!#{~L zMC_d`q?~`?iQ`8n$J=VhZ}QH+_fTKiq}=@qd*}bv(}GPuMZMXuyZ4)IesS1Y?zk|s zM|FEcoZh)ITWEc<{&{(Ixc=`qo6oNaUhcQQ+uEuBmYL->JL&IBdFNT0)Rio_Es^+X ziTDe}zfx6C%IBHfxo|Qgn_s$CpV>?9Pt?JwIw=Ni7s@l1*iT{9RSaKqr0V7cm8WVi zJC)h@2FuzC)w;~8i9NxwGqX})<+>}qg^?ZA-#3(PoK=?b^>5GdvrkV=?G8=J2rUzt z0P1Jw?qE&RkI54Mzr4S9=0=6Z-(T2IKQ{SGWW}YEg)28sKGQz^Bb!0q0`>V5N`lNP zQX0fn-m-1kWNyfIxwN^zaJ_r$3APk&6>h5rBNgw|qj8z%o$Y=ytDpJ$?Z>To7Q2^* z>c*=Xe{*(_`&Y1rZGPFKRHJ!yDT?{J6V%yz&n@2^IAy8LyApO+Vd<+$k2pQFXNkD~ zxcYC7ybPCl-noj^vx0AWiR!;RcjHq}u`pL&>tRhPbL*{+^{e)?1T22Tw{Lq+#Rp~Q zaNpybh1xgy%`z7iH;`NJ_wjG$g^URTMn4TW&qZ%LaI3z;`AMPv9r3O^m+t?PF-uSw>F*S5aBy`a6{o0(N|zdPvMsyFAX-*0i|x4jaz zHEZho?G<-RuOFRhm3w>J-8ZQVg}(%!6jFWrt$F>{viI@X!qfdb?WRnedavc&l3s-> z%~-{Cb>jRM3{pQBCBq&)QePK;7%DD33S3e3_{qZe zCj#~^@Lc!mXzh~IOZ%>Ng>KutdfV2l(7&RLc7N2?uhp6lf5M6eBqG0)CG$sb4#@(nWs|APl)uMQVx-w(4LcV zz}U|-c-bpv-b$(8yTlKv=$m{{?B2U(lkJ5@bs6L2eX+l@;;a9>VXnRH)OkSJUO!-G zQL402%89i4h1&Y1S||2Y-jZo8TEy_i`Ejx3lBEV3++M9o9yR?467-ZVS*{{s!~n_8O}$uBPqm^@sJlkV6sr8kgxnA48o4sHu-!A{Fp5Ishj@VI<_-6Sd>D1z9X9Ra0XayZ5Wtw&6 zM8TYGEH3pq(N(vTbKh;WU1id{FXaEi(}iE!E;l}&bnfJhK=UUB z3*(#IqnNsdab!1U^kzTD0`^B-Z{Xo?=@bEa2Ul*}Qh6Wa*b( z1^X5}5qxxn(OPs>-rKA>nP0Q7ZO*#7s`X-OvGMnwJ31-R3DNnCC4bJ=Eq=SZaI;go zWV=-HlS0Eb6FW)u2OO@e4P8#y9K7*_X@aWd1!46oqK{7==g#bv%wU-6Dzo#@WH-Ca ziWT!8Z8D$f%C}t6|GPrmj}Z0>4Q_=c=VraHn_+Q2?(ehbXBJ;nIQ4jb)Yhz{Ue$6( zQ*Q1BHRWI4o~+^FS-#8QjRvb%gQB{RpMnP0Pbq~Q&gSVRTVnTDp0e7q=Ymvt-h`Vy z@w?#g?3@={{ov`hS^Eu7Df|6kd?k=r#iZmrk4-G|(XOT&|25_ZFn8MA zl@>NRFj27lZt3)mLPzv__kO!|dfm=vQq|w@mUB-z>~_EG^|iDAe&7FptoG^m`MMu% z{eJP?y|M8U-?#q7?>2bPnvaNf6Zz+j42CclsRLlPB74irttNopAJ2=(qRl-BQ2I|9C(mQKmRE=cSwk zdk^<9iN}oW+;fice|fMlaL$%TliVe`1zvt+=M^uSuy)f#{t3q;SoCizRGnIM?xac4 z0`TDoCKXeiIpic6QoG&1oc}JH{`r~yi4Uv~^ZyuEe|z&I@AH<-%XP}>(`ssUL6a~g z`$QThOf}SG%ABI8uy)3x=}nW^0`wkiHr%_m#X72AzTAIL$OA3ee%tqNziy~C*i?Pt zL;9017eA&wi*>)3^4!$=?lQd@a{aRN|Naxzj@+`uo^P$p*Q8H>l6>T~V;>1tZ#etb zto-DkiYF%@2hQ2^oUN>TQ)qh1>OLN}hDOJIQ|29CbRlCy%jc=fMEl=y*Tu;M?DCwf z_ON2plOCUUzc(J2+srOsW5C9%ot=AYOJnZr_O!FJayMGtE4!Wh^Ixp>FTKo=^BHBE zWbc3Qcx_*}pw|AYyR>2Tma|cI*TvRENK~uF2LGrPtYVO_^{KzK{u1v?=Owc8s|)SeB9L65fFMRENQoo%X5L^N|n zyQXZuwKVI;>{(xPu6y;a_3@KDeez_d`z(vJKN6E2!zFJT+-AC6#J%A4UpL>4n#IBM z|0b^~FqAv-m`Oxy-A9p`FYP}VOe@&M@Py$4SG!CG$K>RRC7XDT36=aZbe;Fa@SCTy zxa1`H}n=0uXd8#(vTv^r7!bylm4Q)TPs>?O>3HDFRn@H`Kh97BF$_QTKVau z`r6ptWgBnpGqj1E#k<`>@%@V_)ADARIX7-9@cPhr{;%Q`Ccz*X5hf2m0S!Kr4~b7Y ziyfKwK00LFcttPX_>=mX`A?1)FWFNs!Y=7Im8YE|17F=$@j0J54y#Ark)U2D_{dYjpTe4c57ROoj(HPCBqpd`cMi<2`ughTarye3>i2uUvuSL; zxqmC`r!7ro7e1@Iu&dA2&{9~crq?PYx71lv#EDzOH>OK9HOS6O_34yH&Z}!4X^5vX zUC`3obLoQ7Pc?RiQU-s^BPW?Vn4c7O^8Gwc6$PYNgY)r$^>Kb|~0p<}kZ;?R}=oILF1!OJj~)-KfFs5Y^=svn9wy#NEsF zL~F9h`hV88HWwW9Tb*xomM~{Q^CH`;SM2+crn;tNHn4 zZQiyY4UEh^{=OIOwtSq%_G`K}H)uJa!@*3(oPTqUt`tw$aCH{{hKENuQ~#T`3nZOp zd}C8`L*|7^`h;gnyuOzlFWos{qcAf^bV1Ei!K}UsK3i9PnB>3n$V0{j({-FKU72XL zy6=eU^nhZqLWWANiQIJ$KV3DPzH*MjjRW$2N)<1cPJc7WTkqtVnbHX-)BYb5J2AO= z*}0`YQ&=1%cf2Smi1BKk%;fR$!9_38LK(Ri7T-CeM5e{d1O>1%D zo!NOxe02;U?EV692Y}~ZdIy!tvgg~a?CiZ$ zE1pbrPuo=KJ5x1nT@%|EQ|;L2^Xvbm*|IM$eSPiQm0UQp=gM)Yd{@51bwj zOv`1w`Ri%j8Mbc`nOTjx(|fPgEd9xOyKk?$z2)}_*KAsOo-{I>$Y<6uvFm?{xxuLS zXw5Rl4@TDyXiHUGW1r$FspY@r!6d)g&s0xcsa?5l3FD2oxY4oMu0{7 z^w_e6=@kA(!- z^;?DPE1oPa`QXi0-c%dpJhU<|2g?h7dQQ%l{_PT z#{p+&nF-0veA4G*7FoL6oKW!p+*vWDU#3DuxJqF&!-LeUe+_Ph7Xy~PV&(g}Nby{2 zjl-WOujN9^KeaaZRjn!8b)QK_^62pkKMm)#{>g5f^6hD$U6;lz_Bwz|3YZn2Z9kB6k!-E}z4D)6a8 zXr@Kvkx;Ww54#=TR9X~Tv)z_=xk5f5Mov!QF+aG1CZGM|cI)qQZ{E)AjSinCvMS6j zdU*Mcqa=gkmk-9}s;OQ4b6%)e&-mMa%3)uM`s?M@?felNl|u7W%pUO@t>5?SRn6`4 zJ%XV(^gKRoX;OO{kiOU;W=Wuch<8_05eI*t$)86{w=PWy^3mk~qM^FURk<=skeQ!X zQM_oPzifzDW=e;+6=Tr@_630+H!n1D$A$Y8oM1oUET<@*%5cHS`R6>r9lbvf&ogl- zy&*9#w{uzX^K;iWBs$-o!+!JJEYs6@dp^2Zzh1NX&4OmWThsE)fBpM^vs##8cbC@x z|6iuPD_Oqzy|p3>#{`y?aD|4Z2RvLI+!|dw8v6M97WY{FlMuag%<<>S9SU7KiyAr_ zel(v@Yq8Z)%;CJrar66<%_UX8>+P4{%Z$6l;_0ET|9|DHed}M%vnpNnuCny}-+jTS zQ~e&cipOOnAMZ2$#&l@1#|(?YMaO?CXDehUT+dK=W3WJL9`lXN91BM2vz0dS8?Mhe zEX~kUQphpWv3b70Hv3B-dmPM+9_7k?c4&4doK&n@~eS#3fe zpKR$7?yD0$>pr+j)#VACdwi=h(dOyNZt1h!`wsRqWFG9(dGdy@acA)v?;8^t?{4xo zJv?VA+XaTSmg#X-C;jaImL#wE{8;|~j=!I#@88lXx!~=Iya}nPyg8fM4$Vt`eAsPr zn_Ss^lWo@X)D8W6K5NK-n`czf&#~n3OyTDrc^=$!k}I=*av=WCnS<+`7yHOuxg3~x zLP7tQ?At|gDQ>?P>lnJnNY@Bv@K*+kXQu3#cz}anS;aX0+!tNTdmb~-Z2l|dk++G0IBGHaWCEP;gLpdEV-$uFa-YGKb@$p=J9yM*YKcE=F3HS)4oB z%_k!}vC88zo7vf?33-ywFD%xb^7!wfGlt6gPlEHW$iGWY5z-L8?k#iu<;>e3KQ_z% zIWR-zyjs)l=ou!Nlg_(WZ+@G0PVU#S$zffIFE1_q*R}5D<>k`rUn_lhcdGF5v8hSL z#j`iD{kxvA;0D`n+v&A@w`13^I(=iRU+k3^PAHx+z{JFLj>7 zLH@kf3mF^z4E|CjI z^=c{6y$=t!@1LkN^SN)<17F51FH|mCE>3oJZ9yGG=%e1tPV$kll|8=o{*Yh{A@2jStoo|0X zJkL)g;ItyRsK`>tsj*?-aFp@PhVzAT46Jq;Qy$+G-6nm(MXtssUW418#X(|^jr#`9 zDXr!iM=rKK`t(u9$?wCY{x>U{OB(j9;J>${FGITKLZ{_|cAiTt^OOVi98$KZr{wF( z8Krc@79JJdclyA!3ZvTNSr2@DbCRAa#pji9St-aiDO)khwq96cb7#+m#le%@XEpP= z*cq1mm>|4q;cdfRRomJhH42eADO8tKx zsPkFL+Bs;Hu+OZX3KHA zeb;Z%`%CZiYaVjHJ^MUd&3X6lH=F&fb=5b4(nv4chO0K#6>16pH{NflSXZ*;`JCba zH76VQNAuj(K0S&&T-+pZp7m3c+pKia^NdViP82uUJ=)1EJ5zDG#LOcT-Df^}9`WR` zb4T%|6S{WpY}ZBiC{8yMi`P%RsrKUE@AupJ>wg?RmT}_O*VnlZ4m74b-5_lGVVO-v z&BGFgeF=|V{Mlk}#b$GXdCtMzo_Tj3sP~&Bs9A}~3fP@%Zksc~J8nUJ#*>RT7qHrY znZP^O#OBBcm%5T`FBZ&UkDu~!+p4}tRXbmqk{(AqSj42Ug~k zwOeMsnCQ9m@9*#5PtP~cpT{4(=~8j@VgI^M!oThRKmVVTz0vPs`nj8m?J`OAzb?=J z=V$kK&fYf?v(7=&3B&Df*{XFZus9)5q89w zXf?7XAJj;(R zW`?(KHlP1?b(YSrq*kl^qLYcwvl2e%r%JyL3;!T=QE;(^td>)W+@rFpz~c)xPdF|( zSFerhVLzWcdsmH>(Q}zAXD!VY)}>yOIhy*AGwzl0e1WgW&N`i8oXfC5*;jgjvan@g zpG@AG3C=wJa}K-L>fYXQyeE9$RQ`M3m3K?8zYQzCbm6Y~zK^`$weFVP&fS)9uxV|~ z-D@ZL?S3qHZ@8_rZ|-Dfi{x`RyY=_Y_|9<4QtSNI6yBWeY+du^6L&jHe3O14be{Q> z=kekOi@H1PD;nAOi#VTrj(Nl(_sK&2K!40U?kUQ?9ai117x8?um>~M~cya3{8OL_@ zTa0r}Do=E#3zSYwW8tj3^x=Y>(-!L|7J>4or|W-LDVwfocDRiafjtYMQv-@WE?CA)OUhsV zsVwBj^ZI}3`x9?)eCt%7cOx*#+LuqezT*4~SMk^HB2HaB|tVJ~>$h1}@pd)+(~Rb)|bwES`Ko zUUs_4PnHJ$pHB({!aq#xkI9&#IZgQMVdn=jc1(u+;RnPTivsy;PAuWe6FBF2SUPu^ z&k1EU1!2n%41Ar4Wa(*|VEJb9^W&V7?Ugsmt2Z=w$!4g1F#)kHZthnm1>!-@8ra`4ZWx z7Yo0gQ0~7W=q@Ap>a#?H*!}ZqlIx3`#NRQ?|2Xivpd?Q0@T~cNo~Yk0J@b8@^EHQS z4llDCs!b0Foo71NJ)iSNevV1WJl>Bw4|fWk2PF&jH@1e2@*f!wOgMPZlHH8&rs8Cq zdAuy278D<`+;gI}e}>JEihV~sd<&zUGzdypQe3U>fWJm+rabX(K%;3=NT+1KPnZMO;+gc z6PwFs`9aVrt)#AEL7`kj=Prw;=`*Yr)tqQ*KVbRwAani&XT4GuUb$Q1Mkkbg?F>^p z7RLtWPoHLz-e}V&D|zGKyuF!TrSnbVewtK%O4j2q84{QhvXK>mF5BZ2daA3bMyIze#H5@)kY zNA+hr4>(HM%?o9e(sWkf4{+@XZrGK?!d|)L@lT=WC$#KNs88ts^QicVTkN?d{HKEW zOHPTjGi0#K6g0d)TzNBfdgS)JyIqk*Rq7ewB2??i>9^Ont}kb@;(xwRK8kUTg4PAA zc)Pi+7FNu%YJ6p%`r~yz6dq{zNs@cPa_MM-Mv7ptID4r}db3>R716m9XE;=IUgUc* z<*7{M7VU4Q-RI8q%A_)h1hwtVo}$R2|NO+OxVZcG^gdaXh1{rqzqi)!)?t48H^Lvw zr`(a%Y=8aK_0gLx_4g{D@6_8?Dkt@zBh=M6?aYmdzQ5Zy{rfb1|Bu(RXB4-&8?=Lp zkGt;7H|lc?%sOTiU;1d%_1yJC;$~(C(c9lEmbJX)o5#>F<@p0ire2fA8R^nb7stJ@ zx%^(<4ilS>KKnYs{;jqU`Hq*{AWC_oBPAb927O z>6r9;rgILjyLh1NyyIfw-j}wgenK?7}wLF9+vrv8(;{=Wf6wKB4`Gk9_#0eD87Dx^M2u zc89mIOsj}5K5M#9|9o8a+pW5;_eJyDtz6^Fv8NUh-(8`SuMrEBJPYF|YsHk(OOjrQ?Z_dK|bbu!&;5kU!0D+vwam>borr@TU|*|(y6v~ zeg@IsQ!n~`TEu)<`mL%jhukd=`R9deKRmGQt}MHFY{le?e!aso6GV2ejQzm($HFh# z-lgK})$qTu6ZY)bBXam4yZj#Cvs|^o9j)wNSi?&8{l5GDUSeLkS!&8d*2YBNSDin_ zBgq?UJ(Cvo^PKCNvXOf-s7z_%m$EtcE9Cp8rP7z?X_hEh`TY@k zV3Wu9-rWZ>pA^ssH!9zUt!N@AvPYvwFS8T4iCS7&7Q7YB(wl6)nqG!X^mIvH&IaM~wCz|`ynYTH?xS{hpG&=RYWk$>1?-Oj_{GEP3cQ)7W0;T!il=?qi@w8fJobr4}iOGZsm&&%Z z>u=xXy>;%+^DEyjP_q(`uP~gT`7Ywa1b6n%KBupK!m)d|?0xLSy`Xt|7oXdLn2(I| zja7dqyvcCBBlAI+A-3g_bdycqqz9_}2JRZ`gl8(MKM6KBd9HmYVGjG>4XX;Ttxnfp zy?%ZEeGl`LDpukmS3dGXn|Zg%rU?goot`(T zTS*i>P<_UcAU@afiP-1KZhj3~|D!^tHSm0TykO61-8(WLT734a-8#NiTQ1J@NS|cE zj*Ta^^pf*pDi-IRDsxNllPdl5Kk+K>dWpo&?@Oo0-CC!0;9a}jmxbRxo6kN_?Dk-~ zf%EJ=U0IKgbo!l^k7SNy-o2h}!%4vnx91%Hlk_67`*9^Jzx$lypV{_p_$%nlJ@e?# ze=-_ZL~V|oP?>Ol)gNAIi3XqZ682)z|mSRhgJF8dP8PzW&^0^^rM0 zppc2aDODrmm|NOzuzs7f4r90GKDvV_su@3 z18vFMAMX@A-?-uN9Oj^N)@ePqruj=tSFhXfU4GZ{x3(MF&qnP!R`=)n^S7NlPydZ{ zlzhVTIyQZN?e99Xx(a#i>%|XlR%*Xd|8A46e=h!^nOyG2nisd0Ub6dtuCVOKihtKO zZ|k<{H)Pn4edo@04BI z!hYcD(nWX7^1P~d9R2#iCM)YASCH(9L}@n9RhG@|OTULON?%w!fzR^jTn@V@Ml0u={9&<>^{tWZTk}GJ^lCpzAL}I`JDa#Ki|I0_BGgZ z)9`|M-M7v2-){T$s>1fCP4$~d zzip{~u)O^CUwyqT-u4wU-kR@dzg=^xPx8Xub-%A{Gz&iPa*p=9t^9xg$jvrNU-|df zo#dQD9a~P@o$vIr-L&r450~4E^Y_MnIj;U@i@fh9>*Z&+d!M(t#3ymf{nMq-Jok33 zGTQy`a{hL=v!cJxm`;BC*M9GDxvI*Iv6bfn%~r*m=i8r`pJ%;gYMINK{&y!I*;f83 zcpWzRTcvXUum0QR8*HtL3$5)-_)h)!yy^qbGTsZt8n(PS+u3#XTF*J9%U`Kp5Lw^|FkaeW_@ZQv@CyaTMT?YIrCajO+p*E=Q>)D< zXTHLcj7ujTb2NmW;g)aARm?U-zVBpZver8CL9O=ljpTmQw)UC3udk2a zFB|GQZL;YE(+%Aw3=#*|2{Wj^(RK5AFzSjE=9#B{Yc22F-XH&U0rP@jKAF6$ zp`zdP!|yzI?msd6%bl9&O=XK?OW$2Ad;ECwURfJ<`?4=~-`=d&eiOaj_G#&bZLj5L zX3qMZKi?)T{`V!`{%!W!EBnhn=qR2o{5)l+*@Eiu-Cvu8O|MwScQZx3y-~|{bN=#k zo2%nup2!)mt!*fR7?K97KZgxrm&T9I4s^=pe#^da47Se|Kj_J{%7iM&5N=* zo3>_7s`jj~Xy?yXf1MIu^E9f~x~VUI%u>*N%+m1S`Kz6GxYci-c;4VAX%)5RREr!# zk4fbb!9_C9pNO7*al&2t!AEI_!ZVqd-|PQ1TDd_lH8e`$;?Y-HS!>f>%cbv?ZV1bI zm-zduT=LylyMCVXY%|Hu&aS;DY3^7kdvV!ZQD!TSJwjSNm(Q)LcE5V~``5?v|0^~> zJYlXqTYP7~l<6tGJs%G7oVvHhsQBlaMxiSV+hnZ$4lMgT=aEb`kHw*N1p@D8nVml< znfXcc_l-BHW{DB`_4mH4TfI%$oqxmsUzdaVzBIKzKPNwLM(%w3hYZp;7GD!vle3}x z}!a`80Hsq+jk+^aq@uk7Dd{wi4>TcL`5cmI9SE^AD=WcA|X z872E$KD+j;J?i%5<+4OS-}{0AYhEe2cgSrz?D|@1%5|m4EsCcu#I5VKVptyI{zOr~ z{nEt)v)M9Yyh>z}_T}U?b(ZFDR(6wlB-?$?b@>CfhYN}Yl)g>A`=a@EPxEWmF9ij6 zbyzw=Z7%HKd~o+F;{-8>-0@zi#)tEb;AIKP~<6cdNM( zcx3P12DWSK zHX?3UnDx_5)@xl$<~ir)uKhOuzsbRUrN0!vwcfXX^>)ErnTut&TpFI0{)+skq00M4 zpg~Oh$aJ6M%a|YdrDo_mL^5oCArf6J(Y7Y2f9E-i9eL|4A9=Xft&HWK{OG3tn=b8j zH2oi(;S{t$Lb#=xIP?h{jqVzW`NRMeV_hZtmp zwsuLIBHrGehx8s;=?@38taoati_U4kzvghJ{n{K_*+m@GnYq{~f!!Me$oAyVr zuWb4gsU>w|AwzF>m;P+NL(~1$A1IXc%-tbfe}q^3dQ45+sp(%qN2&Ij-^(6uf1Lnmxw?wy>G9wSeXh{{1^WJ-+VdH2wH@2||?(%eH1;zqP=zx#uDG0kPF7oz~hPoT}n} zA9=Pzq9*Q!T~SJvHOsL{$_Hd#=6tYvJ^Mt_X0~m<%#0NqjBj+$Xgg%d_~Du3Gp%>g zj}{nDIbqD>u%+ZJTl5OWqmz&IZ?L!j>}9>9asI>OPoo$N@++CVg14p~|G3EaQe4VM z8I^aD(YpQD8Fu@AKE0v4;>o<)(w%#g_bZltee&`gx9q)gIfE?R=vrziWxy zrwr@j;~Px9ubJICwoEMd=f&&G_U*d#`9Wj({eyc=(s$hvTXXNlbdLH4^9qg&MH;cZ zC0Y*JcTbMgnJ&32r8+@`+oK@;~I>-#_Z&d#;I{_KUR=brmQ&Ii*r zyUW*ZIXlPla`?7&F)RPJZRucAIHqs0?ZShrAr(jLe&0O5YvRML5e%CrGCBF%ex0(v zDf8?sQ@%5G|31&ZeW;at@6A0batudLUzuP3@8{dSQ?BcNzvQhi>HaZoXJEtY-M?O~ zetW^0|7*7Pym}A0Hb`G2b}{1t9tD=_L&rAEVQ*k;`FQx0qt`>TqkV<i>P}c@w$n{jK)&8@==G?tQmQ z-pKIub=Imkx6@^^K8LNFeCCVtv|sgK5Agi{wUT}3_P3IaIysN#F<0FX_n((=-Ol9h zLCL3?`ne1D#<1t!UD7N%UwrR#h2Ps>%Wt(jE#x4xU}wP@$KQ7s>;HblDCKo?huEVH zcVqJTmac5%x#0D=`ljaEB<8yu`+pqO|8sX*{+GLR3yYI|=GA=i{Ga;kseb**EaFi*B2nSGPrFr_R>RjY-`cKkNBty&J)K&)$UJ zN!YE+TUi>~GE{}*=lX~qWYb$gvxXQs|bVd!R7c(-(>{>@KEU)FB8YI^nB zVt@Z=qxwR_=;cc!~HXCf2>@{=D_p zr8n&9a(S!GRr>EOn76g;th4@YQ~vWg`SyDlm&t^@lDYk9@qWL{i<*mmzpXr;#rm@9 z`>Bl{5BYDMdR&ydz`H>|>6UnrNAcPtmQ&`jo%(Y0N&d1g`=-ymwP|jOK8eYv z)hHKVdSN7?|7-)J7ei`zZ0S_nUoRGa>ofM-l6v^s?KQ{3b9_%nN$x(BwbE$k^I9pr zXMOSgJ{_jf_TJ8uH}$99^4wje%HOq~3@fXxYQjAk6e#qlE8=#Uh^Xc~ctO-ah7Ka%SD}VD|Q;xu%|cMoJ3P zcQNGL-906SE?{s z*YrkS@?0@>9TA0`$!C|!J~`;!u_Jl=A4l>IFa5YkP! zcwlDwyv+EYr^4U7j{jd(JzFBq=EDKz+vg2E?tEAilGO2K*Xwng=l}b%yzFqV`Hc<2 z?zeYwCLI-f(GbqeawxHG(hFA4J*z`9Jl!VPyuGS?;F!y8v!&6|hO>4&-uixSP_B_) zQ;S!&=Wd?K4<>$2R9WeqbMF2oMakPmjeD1+_Fh{QzxeLAH!J7QP2H9`b=qm~vdXph z9`4F&zWQbT*Q#jqlNU}(T%6an?$=js2hJJU!XJ!(PX8Mkw`l9ixxU(4Pp106{l1*b zu*8?Cc(%#@#Xc6Fj=r~XQ@+LQ!294d_w|onNe!DXW{3OxrSO)lV%reNu%&bQy~})C z7?c=WR?PV=_?z(tr-9qf4^2D&3i1D(sHSJ9Azn10GgiU=(W8w0-K5dz1G{a?WOm(C2b>q=uw*E9zt(=nS-Jnm@`aKMS`T@toO7O|`pNapt26`k z-pYh_`MQegU^U-aDa+>+buok2=)aU;<&k}VH09s*>U*E%W*e9H$$n}szj5l>RVK#< z9a$r-rie{iT5YF!q8VfB{mL_&qM~@8wlc)7H8R?9nPmxsZ8yX-o~QAJ{4m?)%6*fb$^`fuBmA1-*(h8 zmE%OKTjZsd2aVI5UuG)Q7B9WN?#`mv6^+*Kb{sxu`CR6{bx#_L!(+8$5_3wgMe;ph zkUQ&MwsC9f8Pmz{y&IN_UU=kZ&@jQ-K$4Mx?-;YPg0iB*6*Ygc1t)o1tBy!^X!O>(9(?yU2EOr$YXQOPmZ$9Gd-( z*;AjdJ=2>fbXGL+$%)nX{eSak{+N8AOo89_OTf29s-{oRuW6jSigABucX;zn!=FQQHI8!x9NSc{`tq z9X?%lW53UrtJ51KpC#^e_uj~F&h=@@xljjx)-%oQJc}Q&d3Jbw7{sDQ|K9v z$tGw2u3B?p{dp(3w+DBJhMm)nW+?o%IPc=$U{0IFH?bk>it_Y-7Hm+Rm!|(vw|GMP zohHRx!LG%z35_iOvmJrg({2&-T{y6@}-N zxc2l)xR=ZT&o-UA*l_RXg&i^01{kvvPr$ zqVGF(?*(44i+G zj%r;7jsq>t#cw3p8MvjsXMSK`@NiB|gLvk*pkxojQ!a;{?QCSGsJp4{31qV>bz*l3 z)Mt!47x~v$=H-g*5*~qKk}OGYxPD&XY%qCoL8(CQy3EtZb3R#4S;iLhUgPYxr(tiE zczYy`kA44C^=xMPt%FkL#)s$dTtC`-|C(*1;5p$P4}%3-Jsx!bO#CIz5cHN!@*8eoWpC@7I_qpy>B|EF zYo_=_m#OGSNO3J)@Zt{RF9seVjmO28SKNN>`E{3$dPi)@{7SL|suavyl zcCc39W0C7chO=#i$`-DOSy=TqS$-&)*nUSt_~`=0$;zi5r}l4S`M|WsadFHVj@J1OK6aHdOBUxn3GVwg zk=?dpq3iFC&AvsY)9$#lWvG6iIwh=`X^rnpqoZy7@_GHX-)7_obSGxSu0FY{L6qH` zDKyK#rTGi@5Cp~_vb>;1pLtl5VVUgR-^Ws%{jZW;YEq5!@lW(k>Hs^%YixA#|e&#tl zo^OaOWbpEsEHAg5!J;9If z7*0)aZ4lct=QzL3r767D)<2~m11BA41^nUCx#hPP)B*bBwsqftWfw$RD?&-RV?B`;<+AB5EvF2lx*kw)|7^Jc{*zi)jDEg{OWnq zhxUhaSF?CHF`v*BE@M#rd_$x2j#;C(nEi$c3Om^~FQoArf0bJ8bbzDnqO1Elb*V|9 zlGb5)Cc_7&f`|VOvlX%gDDhe}tG_7BzohV@g~5Nx#G`^HDXtD|W%}=OKdlh2J0MtC zDEEBI!vn8HOW9?2Zc%*oXpYVHwOYu~JiY^D2$eGYi) zbh_EG&(pQ>H`1H5#Gc8S^Vd0zcyj2yTtON`}{&?mE^jax2-Qa@ui+{=3_4v&}g4y z@!;WtxC)C0oOSwB?7h60Z#IVo#k;>WZ=3#d-=|aB*QyKR7f!zt$FAvQ%pjlEZpgl7 zX03+HYJmhsiBK+ypBI*%;Jek3_9^41fyM)mr3~pj{7eNYYN2+O`!tsO==2q(GJO41 z#KWzvpUGT(D(qDCREBxRQLQNzTpwKSI_1PMpAquCWk0viX0B+@OsN-NcWn15X^WWp z^ye1I$Sr+kK@(Ijn|^4Ue`pS8o?! z+G60if5!CRq92?hFFiSG8{@-m$)NV)$Wg^pYzrRQFv!#}=)K~+d4*ZuLCZm=(vjQp zjAu84?k)E-fid%}K0I<%53pCfC03&TtbfwQ1J7(%rp%fu9kfbH!Zd42e#q{Ob4-ns znG6Iuir-7*DIXAFTa@s>L7TxsfOnqOvu?&-hBUq`ra7G*j*JhsoRD~V#9fuqDzU&x z{o&W7^_#csT)RM~G{BG5z%lb-(>dX zn9(3{Jy<5j?s`W~A@c)`$EM5GEPi*ji$$ew-@^Fh*4FI15=jEZC!|hD-LRBm*uijM zMzJeT!=nS&7`i$b^^aUwSSZ45`G()*6Qdd9gas3iTQXWs)O*%?%he zB<$CyQNiuFQ?K75R5?<@y+M#|HHSptX8!bD;l0Lts*)CDEw#0na3tc@n!67axdS~V zoL6UR2(Mt9Z$AA*^Yyn9Ygfq3G+3SU_C!^7p2y)iIh!9|KT-PX1Zairg3qQ5Cmz-@ zPY_^#urTf1Mp?!d27@IJb?+6BHib?WaFY_A3-3H-28(!$ghwo!HV=rFBcy7w`)m|T)DrL1+ zRK=`2B|L|}^Ta~GPdnp0TDR~Q9}DG}@-skdW5(KE-D{s0{L@?Q-Y;jCe5@zro+fzT z%7V?N3X>lzGWcH+Pv|Os_p(nx+d=SW!;Br@4|^Zw*L%UZhsnUzPJ!ctNJ4jUgMjqi zNqiGrEcssX$32K6a??B zHBX8aIu{=n{`KW$^5SE?*H(G*gfqo)hHxra26#_*kUYS9kZ}e>P*cDhO^w_9N)Ewu zCcjK@SJ`%7!CSsT*N?${gTmSWHScD3ow%?^%*1j!=fA9m&#PkjG*+@mcHN!ne)`A* zhW?hO>rdGN56dpvsqBBW=5Wk@g}SS1k`fl4Ua33SPQ7l}m3J&F`s)*+_nnuHB;KBJowb4S3X3iSze5{?{T2r2iEd?A zE>G;=bmAGqk;1;3q*KK%)f1dGzHM{e-FbMjo8&o5hWah)Oq(X~>L*Ov^e7KcEPd-l}&sb z9;rUw7r92ylu?o=jwxJ1_#WE?9_5{!Hx6>%NYd_omiFzFV;_%>@ZBrTH~o}z%eq&0 z7l%H3dx1T`Q}lsk_T(I?F7ve&kM_UMJ*Mw(dg(I9RG!YMJ6Ilkc$RqK($$Mb3lb-} zTJ9G+)y{KuW8y=ry%x5*A~&+v@4eFzWCUJ~Gj}oL&t zt$v@fuVuxfSr>_kCxjDpSLU`e{Q|5t>-ac3swmJXKjr* z=GJh0`!WF+PxE~lnVStJOsX((XFh!FE9ZTUmxVW&KS`*ulrYw(IF<5QB%N!kyDQo- zamg<+>7=D<<`J0-HPS!KoLhdveNBCA{O%|D_DjszBTp=0DZa%ZBs*6pXYZTi|7$>uiYt92u{+iZ2*G;vnl%Hpjr9QTJ^UGVJc z>@{0I*!h)QyJ%6}{K*}(s&skgf~a6Mso5LVIXepZnU-}dOx(=G;rP6P?}ifJKM!^T zCB6m1z5O;XD)u|+#TlPsp3rf!f5xT@oQd)p-cpqxE~=#n3U^F&JQHJCVWAYDw~l|F z#R9=5`zHl)pJ#p6J@RA+i^+#u+oPCzPx@-)@*31P8I-;Xi8d(Sw|4GUueh}{m?w(y zGK4Ys@vUg7y0tf2LY3Y2RCC|P3-%vQ9h59SU&3(Cz^*~>lWZmD8-@j1r%cu_UbW%> z1d9{LCqLWIle&9?q?-q)PUOK3HRfZB*s`A7f8M3ZsNhp$6til+mDY^u%RXHdy;^oy za{rcO_kCC8YQK8^zI~_tt!}gWjo$bBFIAPjop)eHSX0s3liA@JyIqvFzuu6y@07^g zyXE(5-+p%8u@BO|2xq?06u0%ygqqwY+m{R)43d0IW-JGsW}ax5doEb~p6MRbodUTA zj(-z47Zh2Cop?ScG5ASFi{dMlWjuvj ztY59&X2X7VYHX7H&E~MAc#oB4>UkNr%o7eUNSB{!+*Z;5mGk`rKH-i-w~x*bn8wo} zk;=i4nz^ZVm(hWq&l>#3PoBTqcWEZq$LV{r8;qVN`It;ODRXU`SY70%6AF!1_0d!E zg}UFQobfPh>3n0xsF&OHj9Vg#v2s)6zL~l<23gll+r6$Z-pM*zQQ3BCW^Kiv>fGew z#dE7)KhF*B{w5Q2FZc5ME!lp1@0_!GQ1bg^@_s+ z{<^<^qPlpkuY;GHIy}#4_`TXcw{`C>HAV#opNh>4S}h-@um&`?t$jSFda8p@>qPU7+Pre4pjt3DL!;SvixIPxTjazIH@r`=O*3<#ySu{ng*Z zvU3uO5wwVEkjy&Yds% zC24`H<++E-3c__SZcOfAZeX3Dq~@SMt-a=zBj4oTp32sH@=r9k&oFcBFLP85Fnh)N z;36MW(E|3~l9=@WuUH)B8rDzgVcW#MHf3Mer92MvB8jOxi=Rh@itl_aTXVAEq{0io z@7x8-4;Qd6PXG3@**?o(ujbmW@bb;Z->SF2y_@bodu!3Uzb__gzxmi2%$nn1chaHn zjp?0#PdD`6-0?5@ZnatINA7Qz-RpCW!_MZJdxZqwUV81{*A4zR6A$xkns?Rq%}Uqv zw?zG8F0XU2JT0^2;xxIdci+bI|6d}zX5q|LGrZcTEK&-H%)pJm(Bo0B}( zZ#nPy`u_g?Ute84{d$qz`ptWfT22HlGg3A^@b|P~nZw+_FOP=4U7&eD_`Cc8Ryn5z zrAtp3i%ux>B?uQ*Ed6jyZ~^llhNwU>tEZ1`a@0R@FiJdJSo49cqG`UFAo~Sn*|%Na zg8Bq(PIMU^xx&w%%Y1=V_le8D54TG8DxPnB)*rjBk>z)JrEw8+89Q&m=S@eC9b}i! z@ttk*o#)IUcDF-?V#gGC^&dqYx^3##@Mcf5eUkbc%ZqydZu7rcGyD8@_Gh+9_7*Rb zr0oNQpMPcdxV`u`|Un+jQ@uJtA%`f@9fyq z9sBd$>711HiQjsE|2uxK{PE7R=VyMu+3){H-+IA+yLE2nk-uIntV{~`PD}o*_WMoz zz9jiQKW(a?T-E%xy8QozZDQmqTrN?)u+{Kfo54!c z`ZV5>X>2>Lf0GZ%zx&mwf5&|WH5rCE3_5}Ke+pSF7R5|y>@Q*1Ji$?hfpfPb1Bc)Q z@uv?v8Dwuxl+U~+QZq$aHkbeA1&a)J!;)9sR^7Hmg|b(eHSTOkbgtex;r`WGIVs;+ zI3Au}SYk54He>1M*eNC6210*!b;ozQ&##;F@~JE55>W@Q=*@MloF{g4PP*u!t#Wpi z_U;?|{eQLb8#CQsH|J9{d+dLkXDqXK{itO3oNxOkW^Lb|vwGL}=<)C=t*`#&7B5?Q zP;maH{(m2f^}bK3y}$D0@7OoRFe=d76gd4qnS z_)|*_@zjUQedqMAj!fMe`#Un{KBI2?r3(&irxRlTeeTbEH^#+Qcil;84@41LnV zR|`LXF2B*z`1|IAUyMv)Q@^b<%-=qn<;koq>QbMN?NO`siT>5ZUmA2Ryev!eW%W1P zo9pIPEq-6mcGi4d=-uu6O-}aRew>@RPU2Yi>H`wHzMoltLhJI6wc!nCn%jLgZ2WuN z!bAFYeZF+W{rIo1#m$fT{Lfr1xTj#!v$!K>x3^R+UR#y5GCsJrcJJ}TlFfo8rBC|T zH2Pg;U|64Q0hcb;{m}5{v1Kw@*UXTAgjws_ z?UuOQYkRB9uZxyG39ynnao_D_jkxN4p}Ze2FE8KScw@)Pi+mOjGuYL6vbSZOH<~H> z-KsH*A)AHg=%-%o(s>PYCd|!9sY z9<5ok?fu@r*<~*||EJ3TIsCIqbI-a^CCfj%zDBMKT|Q;4Y)n4y`Sl8i-|c#>cRPRo z-<%s866@yKg9j$UnHhw(vAw#xdwOlg{iWXy9}kb5yv&1P0fQ1_mPLQF7-JgKmkDj_ zckq}n+1xO9GThMl=>lVc?CVB3lS^BKFSPQ!Qsmv?Y*xeaVvc*Qp!@E{Hb#qLKRD>K zr3LEez6rgVbG$r$<<5=GOBdg}wl>;4-}%J1uFuEyt~#(~><;<6JN&5xZ{5UAsYmX( zu2)&Cc!9}`al#3AwOI!`9mMYGG6}LZi%#FMros9ogUaD!>AepAi??kQC=s5({G?NQ z@k%A1JL>Dbck)d8mt}gfzVjXT1`&dP2 z($D*o>ldvxp0y^wHs3nwhN9-?3HGTEjiYAm_s|Yu)hdlmI_|nmJL6-n`M%YPmv5in zbFS?w|CY)}!OPZp`Z5(XiN*TOTsw2ynN?M7&C z8B~^u_sG~Bu-o|FYWKgVzT1|3-Fn@EF=2=JVPQ984fX~_z6KYo7ayG2c{W}Wio0}S zLusH{%5lr5oO5>jmK;_pUTAS3(lDPvH{*kX{;h+SB?rW76vgK~XqTJkaOr67o6Eb7 z9lYt#7{1MyX#r!J=4|Je84Ao>F8$Q=+NYX*ZOz6;W_GEiZ|IyZDY5NvQc*vrwj znqkJIzk$o%2;DRK#+5B<9H;JXxQMwrF4Fq`udClmlQ(fTYwpaGI2yh5XBzXXUpH5; z7XNYK#iBMemm+J|K04^HMxr8VjNQ#-|fmWU3oi( zCnK@hw5{&(t!qBfdDS(a7_w)qvy@}nCJ|gR1w4-WJe$FQStGHEQGvyvblcg@C)ew3 zKOS~9A~-dCzxqZ~pB?OH3i^xA)t(YiYB-uZaTfOp&IFe;mZcNg)DHx^@0;Lm*I47k zpn2)U0}Zys3lDiOxcolR|BYpCQOg2%t6K**2@%idj z?%In7jc@&G`k5^9RnbG}(ff4|eV%P(?3-{~W&Q0fNA-Wb3e4Ej>2S~5Cr#n^?%-Sm+tBVw^%ry-wssf+x|v!7LEC?2V+{r+uddU5z!_buC*lRMk> z|DNX7sJi=N-nLgSP1ti!P0`GY3HJSI4BEw-p3(4IsG+I7<0Y%I8_)J+w`;d84_~{* zS<0Qe|5U2kikh`wS6)i_zs>jBvbV*zwrFqbXWdZ?-nb$C6om9}k9H;bgNmh=C96m7qDPrQXivh%I%<@Zv|Uxs@7>&{9ue>HbT>MfaV zyY}4LSNbRX&gnKGZSAMurk~MX7L$G7=ATn_-bwFU-MO=*?@rw7wqo9A)#v>CG;<}d z`Q$PrYS8zfrn8Ht)0Pnw=Xf7q^A8-z;THx3B(VzRS(gPttVVUE6#u|AuRu z{JcM1H@(@_E@qYc@6XS&)4JPlq}zR-dA3c-#<(7|oVR2a8-rZMqNG`j484_^tHaiQ zdMWk#`ucKx@p&41f*!uTmB_I6^17VUP0TZnmOeh4{H6JIx+25%bMtx{LK?JpoqUjU zde@z+A!iTX3$zz5`?~bWFKch+62UuPR)??O)?f2Pxy`ZqK_h!k|M{NHzc$vkoW7*5 zpEUoDeogSh1Fj{!nMOaJpVM5xbC!x-@Qe%yV`XlZaMbFWjSbN9|@( z>4WD70$$dY*8ZKbwWu#Q=hB6ie@C7czBsgf|Aaenv2xB5RA zO|Ad=-!7HcILtK|ya0A?-LIEYaWCHQk2355S6gpvcoXUwl6X9g)O&lQH|EO7GD@wz zbyu#RaZ88g21CXR60cY%Fa~sQ=e}8a>%3|1YPL=6xzVQI_Rp9vU3Y1TXW5H|?NPql z*Ig_Ae~9}4>td4)ni-QG23f+>~WUIli$ild7>Fen?gL2O@?zq&xX-d%EU*!tn558DE=bkb3qj%uU zhW%IMwnq9eoHna`z9xo&+cKti8?U1nLt*M;spzFgSzhuozr6K9R7st6iDEnFG1duT zp=qJEank(>`;I?4+I{=G-mSYl^}EjfnQ6MAI-5aSu$wVyh4A{D*KE>v->yCN)m7o_ zK>>C4gx=-oeb03#LB%9#pjUQQNVv{(nv=_ebRJ4Kv94H*Lc-X7TyaYDbp7 zIaGOT<*c8ve#~l`XRCD@9cBoYy-lw-`Q(`WbkgzoHA}vHPvQE&Fk{7Zrw0A!<$exx zZ>c04dE_}gW(M=Kc{WcTR|Z%9)8x^3a`a{2%PVr8Wi!{_`f*LW;Dy_yGR7-Mx@4|$ z$1uADu~q7^`z$Ye?D_d&;Kf$My}h=4ukv5VM?Sut^Pp75zHW~_hfs&l0zcEI6F`0I z$)*e*iUQ0x%FGqEt$kJWd!>_Le)i8Z`?F0ZFy2~s@poRhwz*lti`)1AZ3C|#ZMXm9 z*x~xXZ36d<#}C}o_sP!^;M&m9`Rf8>$kVBN1o%H3YX8S*5#<@c=r?hqO44oa(1}ir z`&GV~ZxJ&$0ol8k*v(q z!iuM9%P*Im-sm!YY1?DTC2|h3rqe#VF)F&W`3QGh-MMb<>j`;LDi`iD-rAIUy5^rd zTe|1L`(!4R38Op1m`(lbI4Hm+taaGCY^UrAKQ%eAD8Oao(wOtn!Q+)(J+Zo z@_y)i9tPeY?gzRP?R(}wnDYOpL1H7jz;gzl2KN}Ie5R}hovHo}0r!jz8Ftp_MLM=| zBrV_yUDTj{y4{^Ica_YABhqYIUr$Tu%jED~2Vi+02A{&DzSunWlPIm+fjb$i212-8%2^ftB+e+<%-pPZPut>B^4>Wg+6`~J_YUi(1kg5VEE1;$;cw(|S$yf;_# z&mxOPm7S}F9E1YAB>eX5*y+L^QfLtKVzstxgXfwmjeBM)zXKLC2q$!hPX06Tv7m2& z51UoH{1S0H z!q%>9IyNEk)9l6_CFkY~`2XHx#HiFTrTV&zL#Kb~fz$Gg57--A;#v=gHe3vxeCt7O zP~Gf?g%T$IzDG0L{Xev`iSAgjAgOcF467BlqnB(m{_j)2CgQ0tC*QdR3`U{)+A-J9 z$90R$e}3qdFZ&X|li;1K5Qj%!cVNqizV3TY^Af{>Mb+Qmy>mS*y|wJ9)arL@WhGd1 zZk*la>0ITo_(?RA{zfjNx$WV+2Fwb{=5_aUE=iHDP{#zGcU&x$u^l+xUiCMFZZPk|RjCZDg8cdnn>GS$U$_7$dM3>lwPre;f;FYx`( zm-3?|K-H$`py2#1+Bo=7yo{{Jvumfa_H62>=zSKU%gD-lb~iL^1HWj z?Ikl&2ZaNZ`4r{Y3$6TbaK4W`n5;U%wP5~?ltag>jV^zao$@$g>vHaox|RjWjBgG^ zf3UuBxS8!+(R#6;tu~)Z8E)0sD?B@TQSW=#MYaR-MlM0I4OZel(V5~3yfZt>8MMyJ zpQp4=;F%$g&CEQWqYpw6AG2U&h#Qc7bE& zrQg;6-}=lKKX%99{>ojumKpGCR~~DLc#`RLc-{jAcS|SZHQQK940pXuaD8!X`}zb| z)d}ny7TdIWNVc&YP~C7`Vacft8V7Ad&Ugy<+mt7j%;bB~vbtnhac<+nO+}YEV?-T9 zo?aDWk7-!TG(#xVW%{~3hvpmIjdqv3r{l0Ybb8kJ*JbZ#zIrNU`C-<@JoetMD5>Z* zX)fl`u@#ZKJj^OTrR+~zXU%MC0$GUy+WB?OWWsmH+q2S){=PS15J3}-jXHn*D>=O?=5ZVyylFa31FdKd3vsjj6#6^hJP1UjDgv;}TA z-%#*wYM}ksK#r4iXM$pxiXJnZhTVq*}|;Q%da*n4TmjayllYjkWv_qEUTQo9gX5X>0!p+K}vT979q zsV`jHxrfR8?&{ZX@87T5Cu>vDbTzDWYo>SHmiP7*JI}^`US<5d#<+3DN9J!^{bm0M z-d=zIUf9aYeB){7x6QZvmt9?R@N(Sp8LE5Fyjr#Um(6zPb{UnMci+5pz5J$2^SWw) zD&MpDOV1Xb-F5xSZPxbsQ+$#|3$6x!zcl>~JOBSXY3pYwXEOOc`un1V|JB?DpYM8? zU1MB+bMfc-Q}$%5b}IUBeDL(Lq$tC-9d!;dsim!F_t&*OKXUxJ{r}3{@Av(FWO5u* zRDsCos}2WkH(vcSXQJtbnQZsI@B4o9YIuAu|J|bfFLF{93*5N;1vtySEn__A8LC^i z?`Sk~kpGjHwl}-_*4OkucdnV{Zt(yA)9LpC-}#%|pZRaRKj)va^wso~PkNs3>}tPl zk)Qh~cX#%)zu)HFuRU^Z*UQUhmG6|xZhB6?@gR>`;HBK=b+Nl{OkE!LOy6z)i+lgK zeUJNZxcl?d-*0}a$E5uJe%sxYX*J8DE3d1|-ez8(SGF_v(!bRCyB}+pJv;mS=Hbut zHpK7!&2F~qyavmw{a4+-O69fl%}oFe<^F!XhW*X|vpbIOO{@6$N6YlzESbD4mK$q6 zT;Jun+JZmucGn?CcG)Kkd;-rM)@kUgBMOc#(+P}G8_kM-m6|rRcZA!_Ex%W}E#+Pk zFK?D1gDOkVfz*9jK^aHe6(spv_!NW`GIQBxywu;5`RMK++{Yg z?7VXN4O7~<{BQnNS$*l*j{m=&ufG*id2X8dx=q&U_p;;nKDREr(s9_)UvKisfAZEJ znBT1h%?xjeT>b3Uqphdk{7TQ?7H?m=xAu{)d|APZ+|Y%=-|Cjnt-n3}P5G($+p8Eg z{HGuLkcG|4c5&)3hCs{O@_jXP>_k}Y-`2EV?`%bT` ze8x7vxNU=p;Qp(5wIBVg>#kgUw=&7@##;Tq_P?)AzAk?1_pE@EI@?2gSLZs!-ro}2 ze$M~a>x$hkE7uxaou4I=!6G#6wlVkep8?Cc8Jsjf+y0)Da^0qSa)TJ}VeVx&^Jl5M z-#z~JV_3YZR3-n3=P(+v%RHwr{uElBmV_@ z&0F|Rom>t|^B_DsQ{lYhZ6Dd6PrjO5STpzE&GWB8ONdKXt{2R9Qm)ugFk_||dj{|F z>iG=DIzkLjHe51pvlBMZ++O$9cI`Ir@apF;9=~m|RTpdEyTInKeD3$B6JJGMj#bF% zJsV>gzGrLMmlMjlIs5nKRP--2TlQV!w)5eBf8B~}jiSW|+e+ni@^8qPdNcire|t%O zeMwd3O&^a(xs&fy&Hr6?NbdNJkIMa@z8yMlFJE!|*R^k3m-3!2y_U5(eD9euB<6dXMAjF_OnVl=)$TjiI)I|$=9Hba}e`e+~<;yvEa(xZGD|138 zR$t&qjlQRnw(l16-|w33y%OBMEN+uci?{#gy)EHw(rlaehG*@ouhe}!V)|`i;qu)& ziYim{Kf1E<_P$nS_w3tdlk}oAIC}EcDdiWLnz_!}zkc_0Quvw+3Ib{lK~MMnzu<3T z?t8oBMHEAHw%MAmy%k&2*X=()xs`igRq&mEzcjDL^=Xq@!YRC_sHii^qy%?xv>o zJ}XoiHdAP-s)4$?cx5pM|PTSshglHEWr|^8@c~XndE6x9^DYQDMB3cm)i2R^=EvXegFHOswtF>GDpov@`RKXZS}Sz5ZR+b>_x z;Ps{5$4+H$x$Az>OnQqNc%{mkMi4Y##2CY7nZmnAk@tt+FDD&gb-yk1{#J7`JT#sB z{menHE=f6^>-W+tj+{uJ&3l07hwmBAOB#Du?z}iJ-su)@Hu;%+^mJR*%T-J_1bBpgF)eZ?3n8I-IeH=CWx<&&~}wKk-|u^7-psC+&ZU$$6Vt*5@WS%c?c7 z{$TvFG5*%{{y%fRUXa`Y8J#W@^-*S9b!@T`#7kF_6X2kx#6$3i$+d>ySIt21_3hy6f-Y1IZ>X$5> z^Iob$@!Zbh=T;_D{7x}22;cN{aSZX8{r8T}>u+Y2?H`n0GAtJOs8kp?@12|~!>kDr zRl6IE7|nitpFWlQ%pK#`j;mL`NV2#1D7~%dan|<;-Z$6Iud8}h4ju;V3TWfq|L1!5 z+t77f`(;0BUcH%RU-yM;_STwpbL)S%emnd4-(fqw z4_j0221&=*UszZ5tDLE5>oxbMSGd?izMq?!bG3f;rvH`?_vd_dJ8O7ZeouII=*!Ss z&y=4XS-Eqi%fnwg!r}!OG(vAb`1~?^R`c|@HS40iy4gqBpgT)2IG-#1ffyP6$eUtT_*m1W}3dp-%Nb1Ms) zvX&K@UC|&sD)-_I_O*8e9H~%WvpD*-ImPO-|jBLgE z8pURtwA@+0kNq*#S3c0Te8Z%jIgi#~vddUId&`3b%>N!mS#Svc_WTE`S|C{Cl0*A( z_dq|tG+vQRhJtNLN4Xj&o9%wsCY|$jQz&2Dr492W3UwQEXB6=NE11LVaQXBV)9pMC zPZpeI?D0FoqL6G9E_u(Sfm`AD_H6;P=DjqT8vKyuszC)e=oHd)~OXWU|=`)U9EarTq zv};3VUT*(aRY(ie8hz^acNPf+dXsXcR>D{n}6`1Z@$yD}F? zh3TErekOfUi#>k^KPS^J24lXAhJ;_MMSKgH{2X)|CT4tbXf$JYU$h}zGGERk;l#uK z(+gP`_!W*cHHoOO#?57(;gH1>ZN#n@x;BU@ZVT&zXRCFmx=rKPwTfEIqu_0HRqT_( z^z6lVigNEyWMMqUv_v(aGyGhoRce^w(8bUrRA=xf;^l)@SC*)& z934XTHxqQfZ(^LpvWCHlZ<~zAOXh~Bho|#sdd=|&dG0(xi`iifix1`38cG&#WQ@?p1{;k5e@A-^`2?71j zDik*>uzz4xV7jt&!)Rj%=v6`<^%IPhWJcBndea|0U7Q|6Kr@>e_dF7 zAg=m(DCpBE-TH)mJAxjB`k(l2)imQVsOp7a z-3*253)>b<@wv5$ZQtLw`O(hK&FN(;uc=fl;Am*JZ&Y|V$t2u6fh&rq(y9CabH~Ah zOO4(-ZYi6v*yhNT>=P3{wV3eptZGtPK6z>LcAgE-B@`|^?{M|mZStg3pTA$O^ir7q zv7Ic9-x;!b_W$DYo%q-F{imjb?dCk^W!7)^(hlOzWq9GaEq?a-?ZK53q?Ru5-oSr- zRpOp^E6Y-<){4lSxBY(S*>yfcq$Y!IM#J(@zDVZ(0n8Og|2?+d!^SW7=13;LUGD3&^=wj{ohla?Uocf0WXdggEVq(rMqv%JhS!DT zsXKgXA{wL^=YM+`G=njfb;9$8wpJFVz`oWg=O<`7_Xmw|)tbK9+dZ!zaAFa6 zKkB(gx4xk9^mEsjo72zxU4KxeWl)iaROsD^VXox6_3YP)^yrHY3|l})omgI56O+8y z?Yjt{r^Dh2)ewtj6vh^>QtQ^0axZ(R{k4$Qyzc%npB}HQ~6|K*_Vs%+xchin>INZ)YF0B z`t^)*#+E6(f1G(47R~54t@`q!?4B<-gCghF2aL=Ds^5hTH>l3Fwg|3#^ucAKYoxxC z$`poc+4l^d{IJoN7b<-b@?n)~{;)NA6i%6xQA9n)#!Jq1J9LgLKhcj@ZL9Pc|>FV!btMcGQ}@ z^KxM~*LHJPKWr7xxv;=7Y~|GXhU;~&PC^>R`xnP7x!~_!Kcl71Yz%rb4Bg&(JD1p& zA9dzlTe;XQNz`xUiVOSV_k8nYP|Llgwe3)W;!hTKlX-{ctA@4Ns0#1=XU6a+fGMhC zZ|tg(x9C&p+cqAe?w*?QoI8@Z%V^8N5!8dr==Je9{zoKcXzq<;aSV4 zMIQdVn(H|$gG#|8=Q&I!`?j*}JR>i>!Ly+w!lbWByi3J_gOla^Uds!f3CH&q<)pJ! zeBN|{F_u+dwlyd)zU6=u)B0vBM&G#Y46)v~lvElmzu&Et*7Q5z?|%5k?X`b<*;erf zwDYuQFx6&WUA~Uv=BY0WV2Yro{2omr5T z;ny6l9S8$W;)IDzJ~=y^!67g-G&T8H&&F=OT_=pQS7)!yeCrl%ogI46`KbWw^hpen zw{))UO*vp(-eS4SD}jB3>W1d@?zw*$T`TTADCie4dNT2_^0NlcDJmULm?v>AwKO<% zd0O1zDGUqEc^=>75(-nTs`{yy}fU)hR1JZ=C`@<>eVZm1v|_A8qASO z$+ejZ_Nv>K&GfnN!~D=Od3LYyIg7*JZs*%aZ_L=ac3=L+y?^cpuUWZKvvSLaEYUXM z)(tW_man{iET6!_)Ht6Zv%y$Vt8oQaSZ0U6 zt%Z5ohhjzT2H~wsEVHWqPU2d&_gq82gYDIR_MZJrD;B5vx$AwtFYu(aW8Dqg%w28{ z3%5&iId9z(zGwZvy0)vm?-E08SFPQaKDYM0_5H1y!OLzeogVk=|JHK5^?|X0GWMM_ zPxg5BBrodp&6#>(nn-Jt(;bdpA;&4(6iS}PKXek$E4k^~&AoEnUuKsA!A%>pE(R&> z%8Jr(%9^D%xpT6Q+Gpj@&Q<%KKRr-vlH8W4apL>Kiu=32-}!y7M(E?=z2EO@-@SWR zsQc)R<~Df^ejx=$A%(_WEE*Z5A2%5IFgOPy)%-=frmfujLG7cezqfYYdXJe zZH2MPk07Z<{EUu=4?n1W)VXTsray&0!#!Vbl4_m)I><}($?^B~6QZ~0ot$f3p3|+v zA=PG~xZB2~fdzyX$Tgb1J$U2K{Izisr#%?viROHra{6gXWo4vZ+#ZeZ@9tJ}pPg>s z%O12uRX{UmYD>J)m$*oOiMppB+>g4~o~SxEq0ut#TJf?=!f!rG`aHWnD~c^JDDrmC z>xL&QWOvrwELeGud2hKz0rwH5V`BYR%?#7eUd!10N#WeP)yse6ow!&pAhv#L=phsT zcGKE(l1xq8Et_)_1^!(oiiN6>LaIImkzb8|F=gsp!(Sv$(OHR zPo6hVuJicgYt3$+!68upY*TS?ye-7beb1G_)_rM^=e29sQZh3=`(!MW5}BKsCq?<) zl+j5ksofR*RCVzsBb~d8IXmB8f4JASu%ds>{xcqiE4@oH)`yhj`>dKdDP)sqX8(!> zN0m-Y+q(bKrO!v@Zq7J;@>r1L?bo4#r@T|jm+#RF-MYr#=(6YS2{O~BOGr<7@?=d& zx9H0|;@fYWs)|&y4a|HU5PDf+OSA8pTn;hk}9t;znT5RrR#HK`raXFF+k z`FpSW`u$(NeY>T{Gvm0sC3CTyiUX611JhTIfD5lKCf*yA zYx3{o?V&H26n*UDHY|E^Z^xVeWiDQ;T?Ox^3+HD!z54L+@td2QKYe)_{5saA@KMXj zNTrDz#90+{vBA z&voSYx@i3WwL3pWb-&iiE1UlOoSi4KZf}r8UQT%A>KERuAMee*ws_k4CrdYLt1Wf% zyzHl$#{Er3c9){{v%1jIi`PALw2ppNsE_8q`A8w>h1~nD({XQQ5C6)a5#4t7!pBcX zrfP>jd2q1VY)$7pgc{Y_vA0=iT{ha)xFfUIp zLdkjm<^BKJs?+y<6qqeOLHCPX(#4y}$NNrxetv$fy5LcXZF{P|uIlOS<*YjM;h=G1 zCa;bD^lKI3fzKu~fSjJ=+R(I3RPxz%)r85fwyN_^VvcNbah#x#UD3%3KhLh@^k3E?x|jl#!V-S=~QMu8Q{~ z6GI$J!wkiv((CQkoUBYQt)Cm=0ZQd60nQCgVazSldj%U-ENtg-o;5+YM!{`IS!MH@ zof{OzO8*?FS+il!jvtjfxA#}w6K?-e&@b}o;^wD@_v~)D#aLXs%|HENT%37CnDP(5 zdEvL^Qg|M*avrg~<=&OOMwmVDuw@G0`BRqZ|1PI^f$iH6%OtYp?WClbSSCGBfjJ-8 zCq3KLCLyk}L}2=diSG-7R=*bwESM!+tM9!=RKiixge9s7Pkz; zZHi2K=JU)#DvfWvuIkF+xVXEoz-LFz>Wco4pKkrydi`O=`rz4BJi*{ allDc = cluster.state().getAllDataCenters(); + // a specific member's data center + Member aMember = cluster.state().getMembers().iterator().next(); + String aDc = aMember.dataCenter(); + //#dcAccess + } } diff --git a/akka-docs/src/test/scala/docs/cluster/ClusterDocSpec.scala b/akka-docs/src/test/scala/docs/cluster/ClusterDocSpec.scala index c5381cb149..78b07162b4 100644 --- a/akka-docs/src/test/scala/docs/cluster/ClusterDocSpec.scala +++ b/akka-docs/src/test/scala/docs/cluster/ClusterDocSpec.scala @@ -5,6 +5,7 @@ package scala.docs.cluster import akka.cluster.Cluster import akka.testkit.AkkaSpec +import docs.CompileOnlySpec object ClusterDocSpec { @@ -15,13 +16,28 @@ object ClusterDocSpec { """ } -class ClusterDocSpec extends AkkaSpec(ClusterDocSpec.config) { +class ClusterDocSpec extends AkkaSpec(ClusterDocSpec.config) with CompileOnlySpec { - "demonstrate leave" in { + "demonstrate leave" in compileOnlySpec { //#leave val cluster = Cluster(system) cluster.leave(cluster.selfAddress) //#leave } + "demonstrate data center" in compileOnlySpec { + { + //#dcAccess + val cluster = Cluster(system) + // this node's data center + val dc = cluster.selfDataCenter + // all known data centers + val allDc = cluster.state.allDataCenters + // a specific member's data center + val aMember = cluster.state.members.head + val aDc = aMember.dataCenter + //#dcAccess + } + } + } From 9f4da87840d6ace4290a91b311b4bda48284411b Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Johan=20Andr=C3=A9n?= Date: Fri, 7 Jul 2017 16:50:36 +0100 Subject: [PATCH 19/34] =clu #23286 filter emitted reachability event by DC --- .../scala/akka/cluster/ClusterEvent.scala | 20 +++++++++---------- .../scala/akka/cluster/MembershipState.scala | 3 +++ 2 files changed, 13 insertions(+), 10 deletions(-) diff --git a/akka-cluster/src/main/scala/akka/cluster/ClusterEvent.scala b/akka-cluster/src/main/scala/akka/cluster/ClusterEvent.scala index 16c9b21c7e..91b6859f75 100644 --- a/akka-cluster/src/main/scala/akka/cluster/ClusterEvent.scala +++ b/akka-cluster/src/main/scala/akka/cluster/ClusterEvent.scala @@ -268,13 +268,12 @@ object ClusterEvent { private[cluster] def diffUnreachable(oldState: MembershipState, newState: MembershipState): immutable.Seq[UnreachableMember] = if (newState eq oldState) Nil else { - val oldGossip = oldState.latestGossip val newGossip = newState.latestGossip - val oldUnreachableNodes = oldGossip.overview.reachability.allUnreachableOrTerminated - (newGossip.overview.reachability.allUnreachableOrTerminated.collect { + val oldUnreachableNodes = oldState.dcReachabilityNoOutsideNodes.allUnreachableOrTerminated + newState.dcReachabilityNoOutsideNodes.allUnreachableOrTerminated.collect { case node if !oldUnreachableNodes.contains(node) && node != newState.selfUniqueAddress ⇒ UnreachableMember(newGossip.member(node)) - })(collection.breakOut) + }(collection.breakOut) } /** @@ -283,13 +282,11 @@ object ClusterEvent { private[cluster] def diffReachable(oldState: MembershipState, newState: MembershipState): immutable.Seq[ReachableMember] = if (newState eq oldState) Nil else { - val oldGossip = oldState.latestGossip val newGossip = newState.latestGossip - (oldState.overview.reachability.allUnreachable.collect { - case node if newGossip.hasMember(node) && newGossip.overview.reachability.isReachable(node) && node != newState.selfUniqueAddress ⇒ + oldState.dcReachabilityNoOutsideNodes.allUnreachable.collect { + case node if newGossip.hasMember(node) && newState.dcReachabilityNoOutsideNodes.isReachable(node) && node != newState.selfUniqueAddress ⇒ ReachableMember(newGossip.member(node)) - })(collection.breakOut) - + }(collection.breakOut) } /** @@ -413,7 +410,7 @@ private[cluster] final class ClusterDomainEventPublisher extends Actor with Acto */ def sendCurrentClusterState(receiver: ActorRef): Unit = { val unreachable: Set[Member] = - membershipState.latestGossip.overview.reachability.allUnreachableOrTerminated.collect { + membershipState.dcReachabilityNoOutsideNodes.allUnreachableOrTerminated.collect { case node if node != selfUniqueAddress ⇒ membershipState.latestGossip.member(node) } val state = CurrentClusterState( @@ -454,6 +451,9 @@ private[cluster] final class ClusterDomainEventPublisher extends Actor with Acto } def publishDiff(oldState: MembershipState, newState: MembershipState, pub: AnyRef ⇒ Unit): Unit = { + def inSameDc(reachabilityEvent: ReachabilityEvent): Boolean = + reachabilityEvent.member.dataCenter == selfDc + diffMemberEvents(oldState, newState) foreach pub diffUnreachable(oldState, newState) foreach pub diffReachable(oldState, newState) foreach pub diff --git a/akka-cluster/src/main/scala/akka/cluster/MembershipState.scala b/akka-cluster/src/main/scala/akka/cluster/MembershipState.scala index 99e10a273c..0c893f4e55 100644 --- a/akka-cluster/src/main/scala/akka/cluster/MembershipState.scala +++ b/akka-cluster/src/main/scala/akka/cluster/MembershipState.scala @@ -90,6 +90,9 @@ import scala.util.Random overview.reachability.removeObservers(membersToExclude).remove(members.collect { case m if m.dataCenter != selfDc ⇒ m.uniqueAddress }) } + lazy val dcReachabilityNoOutsideNodes: Reachability = + overview.reachability.remove(members.collect { case m if m.dataCenter != selfDc ⇒ m.uniqueAddress }) + /** * @return Up to `crossDcConnections` oldest members for each DC */ From a15e45992243e12eb250ce7f3f3efe7f08f765e9 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Johan=20Andr=C3=A9n?= Date: Mon, 10 Jul 2017 13:01:06 +0100 Subject: [PATCH 20/34] Merging did not prune vector clocks for tombstoned nodes #23318 --- .../scala/akka/cluster/ClusterDaemon.scala | 35 ++++++------------- .../src/main/scala/akka/cluster/Gossip.scala | 15 +++++--- .../test/scala/akka/cluster/GossipSpec.scala | 8 +++-- 3 files changed, 27 insertions(+), 31 deletions(-) diff --git a/akka-cluster/src/main/scala/akka/cluster/ClusterDaemon.scala b/akka-cluster/src/main/scala/akka/cluster/ClusterDaemon.scala index c038c5ecb5..0b81eed8eb 100644 --- a/akka-cluster/src/main/scala/akka/cluster/ClusterDaemon.scala +++ b/akka-cluster/src/main/scala/akka/cluster/ClusterDaemon.scala @@ -3,31 +3,23 @@ */ package akka.cluster -import language.existentials -import scala.collection.{ SortedSet, breakOut, immutable, mutable } -import scala.concurrent.duration._ -import java.util.concurrent.ThreadLocalRandom - -import scala.util.control.NonFatal import akka.actor._ +import akka.annotation.InternalApi import akka.actor.SupervisorStrategy.Stop import akka.cluster.MemberStatus._ import akka.cluster.ClusterEvent._ -import akka.cluster.ClusterSettings.DataCenter import akka.dispatch.{ RequiresMessageQueue, UnboundedMessageQueueSemantics } -import akka.remote.QuarantinedEvent -import java.util.ArrayList -import java.util.Collections - -import akka.pattern.ask -import akka.util.Timeout import akka.Done -import akka.annotation.InternalApi -import akka.cluster.ClusterSettings.DataCenter +import akka.pattern.ask +import akka.remote.QuarantinedEvent +import akka.util.Timeout +import scala.collection.immutable +import scala.concurrent.duration._ import scala.concurrent.Future import scala.concurrent.Promise -import scala.util.Random +import scala.util.control.NonFatal +import language.existentials /** * Base trait for all cluster messages. All ClusterMessage's are serializable. @@ -235,7 +227,6 @@ private[cluster] final class ClusterDaemon(settings: ClusterSettings) extends Ac */ private[cluster] final class ClusterCoreSupervisor extends Actor with ActorLogging with RequiresMessageQueue[UnboundedMessageQueueSemantics] { - import InternalClusterAction._ // Important - don't use Cluster(context.system) in constructor because that would // cause deadlock. The Cluster extension is currently being created and is waiting @@ -274,8 +265,6 @@ private[cluster] final class ClusterCoreSupervisor extends Actor with ActorLoggi */ @InternalApi private[cluster] object ClusterCoreDaemon { - def vclockName(node: UniqueAddress): String = s"${node.address}-${node.longUid}" - val NumberOfGossipsBeforeShutdownWhenLeaderExits = 5 val MaxGossipsBeforeShuttingDownMyself = 5 @@ -300,7 +289,7 @@ private[cluster] class ClusterCoreDaemon(publisher: ActorRef) extends Actor with protected def selfUniqueAddress = cluster.selfUniqueAddress - val vclockNode = VectorClock.Node(vclockName(selfUniqueAddress)) + val vclockNode = VectorClock.Node(Gossip.vclockName(selfUniqueAddress)) val gossipTargetSelector = new GossipTargetSelector( ReduceGossipDifferentViewProbability, cluster.settings.MultiDataCenter.CrossDcGossipProbability) @@ -737,8 +726,6 @@ private[cluster] class ClusterCoreDaemon(publisher: ActorRef) extends Actor with def downing(address: Address): Unit = { val localGossip = latestGossip val localMembers = localGossip.members - val localOverview = localGossip.overview - val localSeen = localOverview.seen val localReachability = membershipState.dcReachability // check if the node to DOWN is in the `members` set @@ -847,14 +834,14 @@ private[cluster] class ClusterCoreDaemon(publisher: ActorRef) extends Actor with val prunedLocalGossip = localGossip.members.foldLeft(localGossip) { (g, m) ⇒ if (removeUnreachableWithMemberStatus(m.status) && !remoteGossip.members.contains(m)) { log.debug("Cluster Node [{}] - Pruned conflicting local gossip: {}", selfAddress, m) - g.prune(VectorClock.Node(vclockName(m.uniqueAddress))) + g.prune(VectorClock.Node(Gossip.vclockName(m.uniqueAddress))) } else g } val prunedRemoteGossip = remoteGossip.members.foldLeft(remoteGossip) { (g, m) ⇒ if (removeUnreachableWithMemberStatus(m.status) && !localGossip.members.contains(m)) { log.debug("Cluster Node [{}] - Pruned conflicting remote gossip: {}", selfAddress, m) - g.prune(VectorClock.Node(vclockName(m.uniqueAddress))) + g.prune(VectorClock.Node(Gossip.vclockName(m.uniqueAddress))) } else g } diff --git a/akka-cluster/src/main/scala/akka/cluster/Gossip.scala b/akka-cluster/src/main/scala/akka/cluster/Gossip.scala index 9a05003004..cc42764524 100644 --- a/akka-cluster/src/main/scala/akka/cluster/Gossip.scala +++ b/akka-cluster/src/main/scala/akka/cluster/Gossip.scala @@ -22,6 +22,8 @@ private[cluster] object Gossip { def apply(members: immutable.SortedSet[Member]) = if (members.isEmpty) empty else empty.copy(members = members) + def vclockName(node: UniqueAddress): String = s"${node.address}-${node.longUid}" + } /** @@ -150,11 +152,14 @@ private[cluster] final case class Gossip( */ def merge(that: Gossip): Gossip = { - // 1. merge vector clocks - val mergedVClock = this.version merge that.version - - // 2. merge sets of tombstones + // 1. merge sets of tombstones val mergedTombstones = tombstones ++ that.tombstones + val newTombstonedNodes = mergedTombstones.keySet diff that.tombstones.keySet + + // 2. merge vector clocks (but remove entries for tombstoned nodes) + val mergedVClock = newTombstonedNodes.foldLeft(this.version merge that.version) { (vclock, node) ⇒ + vclock.prune(VectorClock.Node(Gossip.vclockName(node))) + } // 2. merge members by selecting the single Member with highest MemberStatus out of the Member groups val mergedMembers = Gossip.emptyMembers union Member.pickHighestPriority(this.members, that.members, mergedTombstones) @@ -228,7 +233,7 @@ private[cluster] final case class Gossip( // and will propagate as is if there are no other changes on other nodes. // If other concurrent changes on other nodes (e.g. join) the pruning is also // taken care of when receiving gossips. - val newVersion = version.prune(VectorClock.Node(ClusterCoreDaemon.vclockName(node))) + val newVersion = version.prune(VectorClock.Node(Gossip.vclockName(node))) val newMembers = members.filterNot(_.uniqueAddress == node) val newTombstones = tombstones + (node → removalTimestamp) copy(version = newVersion, members = newMembers, overview = newOverview, tombstones = newTombstones) diff --git a/akka-cluster/src/test/scala/akka/cluster/GossipSpec.scala b/akka-cluster/src/test/scala/akka/cluster/GossipSpec.scala index f4c29f2115..5ee3d0aea3 100644 --- a/akka-cluster/src/test/scala/akka/cluster/GossipSpec.scala +++ b/akka-cluster/src/test/scala/akka/cluster/GossipSpec.scala @@ -7,7 +7,7 @@ package akka.cluster import org.scalatest.WordSpec import org.scalatest.Matchers import akka.actor.Address -import akka.cluster.ClusterSettings.DataCenter +import akka.cluster.Gossip.vclockName import akka.cluster.ClusterSettings.DefaultDataCenter import scala.collection.immutable.SortedSet @@ -354,9 +354,13 @@ class GossipSpec extends WordSpec with Matchers { "not reintroduce members from out-of data center gossip when merging" in { // dc1 does not know about any unreachability nor that the node has been downed val gdc1 = Gossip(members = SortedSet(dc1a1, dc1b1, dc2c1, dc2d1)) + .seen(dc1b1.uniqueAddress) + .seen(dc2c1.uniqueAddress) + .:+(VectorClock.Node(vclockName(dc2d1.uniqueAddress))) // just to make sure these are also pruned // dc2 has downed the dc2d1 node, seen it as unreachable and removed it val gdc2 = Gossip(members = SortedSet(dc1a1, dc1b1, dc2c1, dc2d1)) + .seen(dc1a1.uniqueAddress) .remove(dc2d1.uniqueAddress, System.currentTimeMillis()) gdc2.tombstones.keys should contain(dc2d1.uniqueAddress) @@ -372,7 +376,7 @@ class GossipSpec extends WordSpec with Matchers { merged1.members should not contain (dc2d1) merged1.overview.reachability.records.filter(r ⇒ r.subject == dc2d1.uniqueAddress || r.observer == dc2d1.uniqueAddress) should be(empty) merged1.overview.reachability.versions.keys should not contain (dc2d1.uniqueAddress) - + merged1.version.versions.keys should not contain (VectorClock.Node(vclockName(dc2d1.uniqueAddress))) } "prune old tombstones" in { From be5a0207bb0734e77924c269e82a88e96723b57a Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Johan=20Andr=C3=A9n?= Date: Tue, 11 Jul 2017 16:29:32 +0100 Subject: [PATCH 21/34] Prune version clocks based on merged tombstones when merging #23318 --- .../src/main/scala/akka/cluster/Gossip.scala | 3 +- .../test/scala/akka/cluster/GossipSpec.scala | 46 +++++++++++++++++-- 2 files changed, 43 insertions(+), 6 deletions(-) diff --git a/akka-cluster/src/main/scala/akka/cluster/Gossip.scala b/akka-cluster/src/main/scala/akka/cluster/Gossip.scala index cc42764524..82c777c019 100644 --- a/akka-cluster/src/main/scala/akka/cluster/Gossip.scala +++ b/akka-cluster/src/main/scala/akka/cluster/Gossip.scala @@ -154,10 +154,9 @@ private[cluster] final case class Gossip( // 1. merge sets of tombstones val mergedTombstones = tombstones ++ that.tombstones - val newTombstonedNodes = mergedTombstones.keySet diff that.tombstones.keySet // 2. merge vector clocks (but remove entries for tombstoned nodes) - val mergedVClock = newTombstonedNodes.foldLeft(this.version merge that.version) { (vclock, node) ⇒ + val mergedVClock = mergedTombstones.keys.foldLeft(this.version merge that.version) { (vclock, node) ⇒ vclock.prune(VectorClock.Node(Gossip.vclockName(node))) } diff --git a/akka-cluster/src/test/scala/akka/cluster/GossipSpec.scala b/akka-cluster/src/test/scala/akka/cluster/GossipSpec.scala index 5ee3d0aea3..68b379ed71 100644 --- a/akka-cluster/src/test/scala/akka/cluster/GossipSpec.scala +++ b/akka-cluster/src/test/scala/akka/cluster/GossipSpec.scala @@ -339,16 +339,25 @@ class GossipSpec extends WordSpec with Matchers { // TODO test coverage for when leaderOf returns None - I have not been able to figure it out "clear out a bunch of stuff when removing a node" in { - val g = Gossip(members = SortedSet(dc1a1, dc1b1, dc2d2)) + val g = Gossip( + members = SortedSet(dc1a1, dc1b1, dc2d2), + overview = GossipOverview(reachability = + Reachability.empty + .unreachable(dc1b1.uniqueAddress, dc2d2.uniqueAddress) + .unreachable(dc2d2.uniqueAddress, dc1b1.uniqueAddress) + )) + .:+(VectorClock.Node(Gossip.vclockName(dc1b1.uniqueAddress))) + .:+(VectorClock.Node(Gossip.vclockName(dc2d2.uniqueAddress))) .remove(dc1b1.uniqueAddress, System.currentTimeMillis()) g.seenBy should not contain (dc1b1.uniqueAddress) - g.overview.reachability.records.exists(_.observer == dc1b1.uniqueAddress) should be(false) - g.overview.reachability.records.exists(_.subject == dc1b1.uniqueAddress) should be(false) - g.version.versions should have size (0) + g.overview.reachability.records.map(_.observer) should not contain (dc1b1.uniqueAddress) + g.overview.reachability.records.map(_.subject) should not contain (dc1b1.uniqueAddress) // sort order should be kept g.members.toList should ===(List(dc1a1, dc2d2)) + g.version.versions.keySet should not contain (VectorClock.Node(Gossip.vclockName(dc1b1.uniqueAddress))) + g.version.versions.keySet should contain(VectorClock.Node(Gossip.vclockName(dc2d2.uniqueAddress))) } "not reintroduce members from out-of data center gossip when merging" in { @@ -379,6 +388,35 @@ class GossipSpec extends WordSpec with Matchers { merged1.version.versions.keys should not contain (VectorClock.Node(vclockName(dc2d1.uniqueAddress))) } + "correctly prune vector clocks based on tombstones when merging" in { + val gdc1 = Gossip(members = SortedSet(dc1a1, dc1b1, dc2c1, dc2d1)) + .:+(VectorClock.Node(vclockName(dc1a1.uniqueAddress))) + .:+(VectorClock.Node(vclockName(dc1b1.uniqueAddress))) + .:+(VectorClock.Node(vclockName(dc2c1.uniqueAddress))) + .:+(VectorClock.Node(vclockName(dc2d1.uniqueAddress))) + .remove(dc1b1.uniqueAddress, System.currentTimeMillis()) + + gdc1.version.versions.keySet should not contain (VectorClock.Node(vclockName(dc1b1.uniqueAddress))) + + val gdc2 = Gossip(members = SortedSet(dc1a1, dc1b1, dc2c1, dc2d1)) + .seen(dc1a1.uniqueAddress) + .:+(VectorClock.Node(vclockName(dc1a1.uniqueAddress))) + .:+(VectorClock.Node(vclockName(dc1b1.uniqueAddress))) + .:+(VectorClock.Node(vclockName(dc2c1.uniqueAddress))) + .:+(VectorClock.Node(vclockName(dc2d1.uniqueAddress))) + .remove(dc2c1.uniqueAddress, System.currentTimeMillis()) + + gdc2.version.versions.keySet should not contain (VectorClock.Node(vclockName(dc2c1.uniqueAddress))) + + // when we merge the two, the nodes should not be reintroduced + val merged1 = gdc2 merge gdc1 + merged1.members should ===(SortedSet(dc1a1, dc2d1)) + + merged1.version.versions.keySet should ===(Set( + VectorClock.Node(vclockName(dc1a1.uniqueAddress)), + VectorClock.Node(vclockName(dc2d1.uniqueAddress)))) + } + "prune old tombstones" in { val timestamp = 352684800 val g = Gossip(members = SortedSet(dc1a1, dc1b1)) From 9c7e8d027a0512a967481ecdc7da8cf458fb6714 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Johan=20Andr=C3=A9n?= Date: Wed, 12 Jul 2017 11:47:32 +0100 Subject: [PATCH 22/34] Renamed/moved the self data center setting #23312 (#23344) --- .../cluster/sharding/ClusterSharding.scala | 2 +- .../akka/cluster/sharding/ShardRegion.scala | 2 +- .../sharding/MultiDcClusterShardingSpec.scala | 4 ++-- .../singleton/ClusterSingletonManager.scala | 2 +- .../singleton/ClusterSingletonProxy.scala | 2 +- .../MultiDcSingletonManagerSpec.scala | 10 +++++----- akka-cluster/src/main/resources/reference.conf | 16 +++++++++------- .../src/main/scala/akka/cluster/Cluster.scala | 18 +++++++++--------- .../scala/akka/cluster/ClusterDaemon.scala | 4 ++-- .../main/scala/akka/cluster/ClusterEvent.scala | 4 ++-- .../scala/akka/cluster/ClusterSettings.scala | 6 +++--- .../akka/cluster/MultiDcClusterSpec.scala | 8 ++++---- .../akka/cluster/MultiDcSplitBrainSpec.scala | 4 ++-- .../akka/cluster/MultiDcSunnyWeatherSpec.scala | 4 ++-- .../akka/cluster/MultiNodeClusterSpec.scala | 2 +- .../scala/akka/cluster/ClusterConfigSpec.scala | 6 +++--- 16 files changed, 48 insertions(+), 46 deletions(-) diff --git a/akka-cluster-sharding/src/main/scala/akka/cluster/sharding/ClusterSharding.scala b/akka-cluster-sharding/src/main/scala/akka/cluster/sharding/ClusterSharding.scala index 5f9678d799..2c2e4f0cec 100644 --- a/akka-cluster-sharding/src/main/scala/akka/cluster/sharding/ClusterSharding.scala +++ b/akka-cluster-sharding/src/main/scala/akka/cluster/sharding/ClusterSharding.scala @@ -520,7 +520,7 @@ private[akka] class ClusterShardingGuardian extends Actor { case None ⇒ "replicator" } // Use members within the data center and with the given role (if any) - val replicatorRoles = Set(ClusterSettings.DcRolePrefix + cluster.settings.DataCenter) ++ settings.role + val replicatorRoles = Set(ClusterSettings.DcRolePrefix + cluster.settings.SelfDataCenter) ++ settings.role val ref = context.actorOf(Replicator.props(replicatorSettings.withRoles(replicatorRoles)), name) replicatorByRole = replicatorByRole.updated(settings.role, ref) ref diff --git a/akka-cluster-sharding/src/main/scala/akka/cluster/sharding/ShardRegion.scala b/akka-cluster-sharding/src/main/scala/akka/cluster/sharding/ShardRegion.scala index 380f6d8c56..f9d8aad224 100644 --- a/akka-cluster-sharding/src/main/scala/akka/cluster/sharding/ShardRegion.scala +++ b/akka-cluster-sharding/src/main/scala/akka/cluster/sharding/ShardRegion.scala @@ -426,7 +426,7 @@ private[akka] class ShardRegion( // when using proxy the data center can be different from the own data center private val targetDcRole = dataCenter match { case Some(t) ⇒ ClusterSettings.DcRolePrefix + t - case None ⇒ ClusterSettings.DcRolePrefix + cluster.settings.DataCenter + case None ⇒ ClusterSettings.DcRolePrefix + cluster.settings.SelfDataCenter } def matchingRole(member: Member): Boolean = diff --git a/akka-cluster-sharding/src/multi-jvm/scala/akka/cluster/sharding/MultiDcClusterShardingSpec.scala b/akka-cluster-sharding/src/multi-jvm/scala/akka/cluster/sharding/MultiDcClusterShardingSpec.scala index 54df6e3ebf..7a45b08500 100644 --- a/akka-cluster-sharding/src/multi-jvm/scala/akka/cluster/sharding/MultiDcClusterShardingSpec.scala +++ b/akka-cluster-sharding/src/multi-jvm/scala/akka/cluster/sharding/MultiDcClusterShardingSpec.scala @@ -62,11 +62,11 @@ object MultiDcClusterShardingSpecConfig extends MultiNodeConfig { """)) nodeConfig(first, second) { - ConfigFactory.parseString("akka.cluster.data-center = DC1") + ConfigFactory.parseString("akka.cluster.multi-data-center.self-data-center = DC1") } nodeConfig(third, fourth) { - ConfigFactory.parseString("akka.cluster.data-center = DC2") + ConfigFactory.parseString("akka.cluster.multi-data-center.self-data-center = DC2") } } diff --git a/akka-cluster-tools/src/main/scala/akka/cluster/singleton/ClusterSingletonManager.scala b/akka-cluster-tools/src/main/scala/akka/cluster/singleton/ClusterSingletonManager.scala index 535a929636..cd4f790a4e 100644 --- a/akka-cluster-tools/src/main/scala/akka/cluster/singleton/ClusterSingletonManager.scala +++ b/akka-cluster-tools/src/main/scala/akka/cluster/singleton/ClusterSingletonManager.scala @@ -257,7 +257,7 @@ object ClusterSingletonManager { } override def postStop(): Unit = cluster.unsubscribe(self) - private val selfDc = ClusterSettings.DcRolePrefix + cluster.settings.DataCenter + private val selfDc = ClusterSettings.DcRolePrefix + cluster.settings.SelfDataCenter def matchingRole(member: Member): Boolean = member.hasRole(selfDc) && role.forall(member.hasRole) diff --git a/akka-cluster-tools/src/main/scala/akka/cluster/singleton/ClusterSingletonProxy.scala b/akka-cluster-tools/src/main/scala/akka/cluster/singleton/ClusterSingletonProxy.scala index 57dd041218..54e3fd0552 100644 --- a/akka-cluster-tools/src/main/scala/akka/cluster/singleton/ClusterSingletonProxy.scala +++ b/akka-cluster-tools/src/main/scala/akka/cluster/singleton/ClusterSingletonProxy.scala @@ -179,7 +179,7 @@ final class ClusterSingletonProxy(singletonManagerPath: String, settings: Cluste private val targetDcRole = settings.dataCenter match { case Some(t) ⇒ ClusterSettings.DcRolePrefix + t - case None ⇒ ClusterSettings.DcRolePrefix + cluster.settings.DataCenter + case None ⇒ ClusterSettings.DcRolePrefix + cluster.settings.SelfDataCenter } def matchingRole(member: Member): Boolean = diff --git a/akka-cluster-tools/src/multi-jvm/scala/akka/cluster/singleton/MultiDcSingletonManagerSpec.scala b/akka-cluster-tools/src/multi-jvm/scala/akka/cluster/singleton/MultiDcSingletonManagerSpec.scala index 0a37bc1749..67a32a37a8 100644 --- a/akka-cluster-tools/src/multi-jvm/scala/akka/cluster/singleton/MultiDcSingletonManagerSpec.scala +++ b/akka-cluster-tools/src/multi-jvm/scala/akka/cluster/singleton/MultiDcSingletonManagerSpec.scala @@ -28,18 +28,18 @@ object MultiDcSingletonManagerSpec extends MultiNodeConfig { nodeConfig(controller) { ConfigFactory.parseString(""" - akka.cluster.data-center = one + akka.cluster.multi-data-center.self-data-center = one akka.cluster.roles = []""") } nodeConfig(first) { ConfigFactory.parseString(""" - akka.cluster.data-center = one + akka.cluster.multi-data-center.self-data-center = one akka.cluster.roles = [ worker ]""") } nodeConfig(second, third) { ConfigFactory.parseString(""" - akka.cluster.data-center = two + akka.cluster.multi-data-center.self-data-center = two akka.cluster.roles = [ worker ]""") } } @@ -56,7 +56,7 @@ class MultiDcSingleton extends Actor with ActorLogging { override def receive: Receive = { case Ping ⇒ - sender() ! Pong(cluster.settings.DataCenter, cluster.selfAddress, cluster.selfRoles) + sender() ! Pong(cluster.settings.SelfDataCenter, cluster.selfAddress, cluster.selfRoles) } } object MultiDcSingleton { @@ -98,7 +98,7 @@ abstract class MultiDcSingletonManagerSpec extends MultiNodeSpec(MultiDcSingleto enterBarrier("pongs-received") - pong.fromDc should equal(Cluster(system).settings.DataCenter) + pong.fromDc should equal(Cluster(system).settings.SelfDataCenter) pong.roles should contain(worker) runOn(controller, first) { pong.roles should contain(ClusterSettings.DcRolePrefix + "one") diff --git a/akka-cluster/src/main/resources/reference.conf b/akka-cluster/src/main/resources/reference.conf index ef0a32fd1b..c061fd63a1 100644 --- a/akka-cluster/src/main/resources/reference.conf +++ b/akka-cluster/src/main/resources/reference.conf @@ -65,18 +65,13 @@ akka { # move 'WeaklyUp' members to 'Up' status once convergence has been reached. allow-weakly-up-members = on - # Defines which data center this node belongs to. It is typically used to make islands of the - # cluster that are colocated. This can be used to make the cluster aware that it is running - # across multiple availability zones or regions. It can also be used for other logical - # grouping of nodes. - data-center = "default" - # The roles of this member. List of strings, e.g. roles = ["A", "B"]. # The roles are part of the membership information and can be used by # routers or other services to distribute work to certain member types, # e.g. front-end and back-end nodes. # Roles are not allowed to start with "dc-" as that is reserved for the - # special role assigned from the data-center a node belongs to (see above) + # special role assigned from the data-center a node belongs to (see the + # multi-data-center section below) roles = [] # Run the coordinated shutdown from phase 'cluster-shutdown' when the cluster @@ -211,6 +206,13 @@ akka { # if your cluster nodes are configured with at-least 2 different `akka.cluster.data-center` values. multi-data-center { + # Defines which data center this node belongs to. It is typically used to make islands of the + # cluster that are colocated. This can be used to make the cluster aware that it is running + # across multiple availability zones or regions. It can also be used for other logical + # grouping of nodes. + self-data-center = "default" + + # Try to limit the number of connections between data centers. Used for gossip and heartbeating. # This will not limit connections created for the messaging of the application. # If the cluster does not span multiple data centers, this value has no effect. diff --git a/akka-cluster/src/main/scala/akka/cluster/Cluster.scala b/akka-cluster/src/main/scala/akka/cluster/Cluster.scala index dde1cf19ba..f0b47a99c2 100644 --- a/akka-cluster/src/main/scala/akka/cluster/Cluster.scala +++ b/akka-cluster/src/main/scala/akka/cluster/Cluster.scala @@ -79,7 +79,7 @@ class Cluster(val system: ExtendedActorSystem) extends Extension { def selfAddress: Address = selfUniqueAddress.address /** Data center to which this node belongs to (defaults to "default" if not configured explicitly) */ - def selfDataCenter: DataCenter = settings.DataCenter + def selfDataCenter: DataCenter = settings.SelfDataCenter /** * roles that this member has @@ -434,31 +434,31 @@ class Cluster(val system: ExtendedActorSystem) extends Extension { def logInfo(message: String): Unit = if (LogInfo) - if (settings.DataCenter == ClusterSettings.DefaultDataCenter) + if (settings.SelfDataCenter == ClusterSettings.DefaultDataCenter) log.info("Cluster Node [{}] - {}", selfAddress, message) else - log.info("Cluster Node [{}] dc [{}] - {}", selfAddress, settings.DataCenter, message) + log.info("Cluster Node [{}] dc [{}] - {}", selfAddress, settings.SelfDataCenter, message) def logInfo(template: String, arg1: Any): Unit = if (LogInfo) - if (settings.DataCenter == ClusterSettings.DefaultDataCenter) + if (settings.SelfDataCenter == ClusterSettings.DefaultDataCenter) log.info("Cluster Node [{}] - " + template, selfAddress, arg1) else - log.info("Cluster Node [{}] dc [{}] - " + template, selfAddress, settings.DataCenter, arg1) + log.info("Cluster Node [{}] dc [{}] - " + template, selfAddress, settings.SelfDataCenter, arg1) def logInfo(template: String, arg1: Any, arg2: Any): Unit = if (LogInfo) - if (settings.DataCenter == ClusterSettings.DefaultDataCenter) + if (settings.SelfDataCenter == ClusterSettings.DefaultDataCenter) log.info("Cluster Node [{}] - " + template, selfAddress, arg1, arg2) else - log.info("Cluster Node [{}] dc [{}] - " + template, selfAddress, settings.DataCenter, arg1, arg2) + log.info("Cluster Node [{}] dc [{}] - " + template, selfAddress, settings.SelfDataCenter, arg1, arg2) def logInfo(template: String, arg1: Any, arg2: Any, arg3: Any): Unit = if (LogInfo) - if (settings.DataCenter == ClusterSettings.DefaultDataCenter) + if (settings.SelfDataCenter == ClusterSettings.DefaultDataCenter) log.info("Cluster Node [{}] - " + template, selfAddress, arg1, arg2, arg3) else - log.info("Cluster Node [{}] dc [" + settings.DataCenter + "] - " + template, selfAddress, arg1, arg2, arg3) + log.info("Cluster Node [{}] dc [" + settings.SelfDataCenter + "] - " + template, selfAddress, arg1, arg2, arg3) } } diff --git a/akka-cluster/src/main/scala/akka/cluster/ClusterDaemon.scala b/akka-cluster/src/main/scala/akka/cluster/ClusterDaemon.scala index 0b81eed8eb..b16fe4328e 100644 --- a/akka-cluster/src/main/scala/akka/cluster/ClusterDaemon.scala +++ b/akka-cluster/src/main/scala/akka/cluster/ClusterDaemon.scala @@ -299,7 +299,7 @@ private[cluster] class ClusterCoreDaemon(publisher: ActorRef) extends Actor with var membershipState = MembershipState( Gossip.empty, cluster.selfUniqueAddress, - cluster.settings.DataCenter, + cluster.settings.SelfDataCenter, cluster.settings.MultiDataCenter.CrossDcConnections) def latestGossip: Gossip = membershipState.latestGossip @@ -1230,7 +1230,7 @@ private[cluster] class ClusterCoreDaemon(publisher: ActorRef) extends Actor with def publishMembershipState(): Unit = { if (cluster.settings.Debug.VerboseGossipLogging) - log.debug("Cluster Node [{}] dc [{}] - New gossip published [{}]", selfAddress, cluster.settings.DataCenter, membershipState.latestGossip) + log.debug("Cluster Node [{}] dc [{}] - New gossip published [{}]", selfAddress, cluster.settings.SelfDataCenter, membershipState.latestGossip) publisher ! PublishChanges(membershipState) if (PublishStatsInterval == Duration.Zero) publishInternalStats() diff --git a/akka-cluster/src/main/scala/akka/cluster/ClusterEvent.scala b/akka-cluster/src/main/scala/akka/cluster/ClusterEvent.scala index 91b6859f75..a296ac8c2c 100644 --- a/akka-cluster/src/main/scala/akka/cluster/ClusterEvent.scala +++ b/akka-cluster/src/main/scala/akka/cluster/ClusterEvent.scala @@ -378,10 +378,10 @@ private[cluster] final class ClusterDomainEventPublisher extends Actor with Acto val emptyMembershipState = MembershipState( Gossip.empty, cluster.selfUniqueAddress, - cluster.settings.DataCenter, + cluster.settings.SelfDataCenter, cluster.settings.MultiDataCenter.CrossDcConnections) var membershipState: MembershipState = emptyMembershipState - def selfDc = cluster.settings.DataCenter + def selfDc = cluster.settings.SelfDataCenter override def preRestart(reason: Throwable, message: Option[Any]) { // don't postStop when restarted, no children to stop diff --git a/akka-cluster/src/main/scala/akka/cluster/ClusterSettings.scala b/akka-cluster/src/main/scala/akka/cluster/ClusterSettings.scala index 63a4cec36f..29a6c8a884 100644 --- a/akka-cluster/src/main/scala/akka/cluster/ClusterSettings.scala +++ b/akka-cluster/src/main/scala/akka/cluster/ClusterSettings.scala @@ -137,14 +137,14 @@ final class ClusterSettings(val config: Config, val systemName: String) { val AllowWeaklyUpMembers: Boolean = cc.getBoolean("allow-weakly-up-members") - val DataCenter: DataCenter = cc.getString("data-center") + val SelfDataCenter: DataCenter = cc.getString("multi-data-center.self-data-center") val Roles: Set[String] = { val configuredRoles = (immutableSeq(cc.getStringList("roles")).toSet) requiring ( _.forall(!_.startsWith(DcRolePrefix)), - s"Roles must not start with '${DcRolePrefix}' as that is reserved for the cluster data-center setting") + s"Roles must not start with '${DcRolePrefix}' as that is reserved for the cluster self-data-center setting") - configuredRoles + s"$DcRolePrefix$DataCenter" + configuredRoles + s"$DcRolePrefix$SelfDataCenter" } val MinNrOfMembers: Int = { diff --git a/akka-cluster/src/multi-jvm/scala/akka/cluster/MultiDcClusterSpec.scala b/akka-cluster/src/multi-jvm/scala/akka/cluster/MultiDcClusterSpec.scala index 91ae496fe0..fbfabc3846 100644 --- a/akka-cluster/src/multi-jvm/scala/akka/cluster/MultiDcClusterSpec.scala +++ b/akka-cluster/src/multi-jvm/scala/akka/cluster/MultiDcClusterSpec.scala @@ -25,12 +25,12 @@ class MultiDcSpecConfig(crossDcConnections: Int = 5) extends MultiNodeConfig { nodeConfig(first, second)(ConfigFactory.parseString( """ - akka.cluster.data-center = "dc1" + akka.cluster.multi-data-center.self-data-center = "dc1" """)) nodeConfig(third, fourth, fifth)(ConfigFactory.parseString( """ - akka.cluster.data-center = "dc2" + akka.cluster.multi-data-center.self-data-center = "dc2" """)) testTransport(on = true) @@ -80,13 +80,13 @@ abstract class MultiDcSpec(config: MultiDcSpecConfig) "have a leader per data center" in { runOn(first, second) { - cluster.settings.DataCenter should ===("dc1") + cluster.settings.SelfDataCenter should ===("dc1") clusterView.leader shouldBe defined val dc1 = Set(address(first), address(second)) dc1 should contain(clusterView.leader.get) } runOn(third, fourth) { - cluster.settings.DataCenter should ===("dc2") + cluster.settings.SelfDataCenter should ===("dc2") clusterView.leader shouldBe defined val dc2 = Set(address(third), address(fourth)) dc2 should contain(clusterView.leader.get) diff --git a/akka-cluster/src/multi-jvm/scala/akka/cluster/MultiDcSplitBrainSpec.scala b/akka-cluster/src/multi-jvm/scala/akka/cluster/MultiDcSplitBrainSpec.scala index d41c0d7608..458d5ab03d 100644 --- a/akka-cluster/src/multi-jvm/scala/akka/cluster/MultiDcSplitBrainSpec.scala +++ b/akka-cluster/src/multi-jvm/scala/akka/cluster/MultiDcSplitBrainSpec.scala @@ -24,12 +24,12 @@ object MultiDcSplitBrainMultiJvmSpec extends MultiNodeConfig { nodeConfig(first, second)(ConfigFactory.parseString( """ - akka.cluster.data-center = "dc1" + akka.cluster.multi-data-center.self-data-center = "dc1" """)) nodeConfig(third, fourth)(ConfigFactory.parseString( """ - akka.cluster.data-center = "dc2" + akka.cluster.multi-data-center.self-data-center = "dc2" """)) testTransport(on = true) diff --git a/akka-cluster/src/multi-jvm/scala/akka/cluster/MultiDcSunnyWeatherSpec.scala b/akka-cluster/src/multi-jvm/scala/akka/cluster/MultiDcSunnyWeatherSpec.scala index 7a0fff7130..9c738291a8 100644 --- a/akka-cluster/src/multi-jvm/scala/akka/cluster/MultiDcSunnyWeatherSpec.scala +++ b/akka-cluster/src/multi-jvm/scala/akka/cluster/MultiDcSunnyWeatherSpec.scala @@ -23,14 +23,14 @@ object MultiDcSunnyWeatherMultiJvmSpec extends MultiNodeConfig { nodeConfig(first, second, third)(ConfigFactory.parseString( """ akka { - cluster.data-center = alpha + cluster.multi-data-center.self-data-center = alpha } """)) nodeConfig(fourth, fifth)(ConfigFactory.parseString( """ akka { - cluster.data-center = beta + cluster.multi-data-center.self-data-center = beta } """)) diff --git a/akka-cluster/src/multi-jvm/scala/akka/cluster/MultiNodeClusterSpec.scala b/akka-cluster/src/multi-jvm/scala/akka/cluster/MultiNodeClusterSpec.scala index cb7f0e666b..fa461bc769 100644 --- a/akka-cluster/src/multi-jvm/scala/akka/cluster/MultiNodeClusterSpec.scala +++ b/akka-cluster/src/multi-jvm/scala/akka/cluster/MultiNodeClusterSpec.scala @@ -306,7 +306,7 @@ trait MultiNodeClusterSpec extends Suite with STMultiNodeSpec with WatchedByCoro awaitAssert(clusterView.members.map(_.status) should ===(Set(MemberStatus.Up))) // clusterView.leader is updated by LeaderChanged, await that to be updated also val expectedLeader = clusterView.members.collectFirst { - case m if m.dataCenter == cluster.settings.DataCenter ⇒ m.address + case m if m.dataCenter == cluster.settings.SelfDataCenter ⇒ m.address } awaitAssert(clusterView.leader should ===(expectedLeader)) } diff --git a/akka-cluster/src/test/scala/akka/cluster/ClusterConfigSpec.scala b/akka-cluster/src/test/scala/akka/cluster/ClusterConfigSpec.scala index 5bcec3aa07..97cf9df6a7 100644 --- a/akka-cluster/src/test/scala/akka/cluster/ClusterConfigSpec.scala +++ b/akka-cluster/src/test/scala/akka/cluster/ClusterConfigSpec.scala @@ -45,7 +45,7 @@ class ClusterConfigSpec extends AkkaSpec { DownRemovalMargin should ===(Duration.Zero) MinNrOfMembers should ===(1) MinNrOfMembersOfRole should ===(Map.empty[String, Int]) - DataCenter should ===("default") + SelfDataCenter should ===("default") Roles should ===(Set(ClusterSettings.DcRolePrefix + "default")) JmxEnabled should ===(true) UseDispatcher should ===(Dispatchers.DefaultDispatcherId) @@ -61,13 +61,13 @@ class ClusterConfigSpec extends AkkaSpec { |akka { | cluster { | roles = [ "hamlet" ] - | data-center = "blue" + | multi-data-center.self-data-center = "blue" | } |} """.stripMargin).withFallback(ConfigFactory.load()), system.name) import settings._ Roles should ===(Set("hamlet", ClusterSettings.DcRolePrefix + "blue")) - DataCenter should ===("blue") + SelfDataCenter should ===("blue") } } } From 73d3c5db5d31a07d0a3d2c2f1315801023fb78ff Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Martynas=20Mickevi=C4=8Dius?= Date: Wed, 12 Jul 2017 15:48:15 +0300 Subject: [PATCH 23/34] DC reachability events #23245 --- .../scala/akka/cluster/ClusterEvent.scala | 53 +++++++++++++++++ .../scala/akka/cluster/MembershipState.scala | 10 +++- .../scala/akka/cluster/Reachability.scala | 3 + .../ClusterDomainEventPublisherSpec.scala | 21 ++++++- .../akka/cluster/ClusterDomainEventSpec.scala | 59 +++++++++++++++++++ akka-docs/.history | 1 - 6 files changed, 143 insertions(+), 4 deletions(-) delete mode 100644 akka-docs/.history diff --git a/akka-cluster/src/main/scala/akka/cluster/ClusterEvent.scala b/akka-cluster/src/main/scala/akka/cluster/ClusterEvent.scala index a296ac8c2c..181ee76774 100644 --- a/akka-cluster/src/main/scala/akka/cluster/ClusterEvent.scala +++ b/akka-cluster/src/main/scala/akka/cluster/ClusterEvent.scala @@ -244,6 +244,22 @@ object ClusterEvent { */ final case class ReachableMember(member: Member) extends ReachabilityEvent + /** + * Marker interface to facilitate subscription of + * both [[UnreachableDataCenter]] and [[ReachableDataCenter]]. + */ + sealed trait DataCenterReachabilityEvent extends ClusterDomainEvent + + /** + * A data center is considered as unreachable when any members from the data center are unreachable + */ + final case class UnreachableDataCenter(dataCenter: DataCenter) extends DataCenterReachabilityEvent + + /** + * A data center is considered reachable when all members from the data center are reachable + */ + final case class ReachableDataCenter(dataCenter: DataCenter) extends DataCenterReachabilityEvent + /** * INTERNAL API * The nodes that have seen current version of the Gossip. @@ -289,6 +305,41 @@ object ClusterEvent { }(collection.breakOut) } + private def isReachable(state: MembershipState, oldUnreachableNodes: Set[UniqueAddress])(otherDc: DataCenter): Boolean = { + val unrelatedDcNodes = state.latestGossip.members.collect { + case m if m.dataCenter != otherDc && m.dataCenter != state.selfDc ⇒ m.uniqueAddress + } + + val reachabilityForOtherDc = state.dcReachabilityWithoutObservationsWithin.remove(unrelatedDcNodes) + reachabilityForOtherDc.allUnreachable.filterNot(oldUnreachableNodes).isEmpty + } + + /** + * INTERNAL API + */ + private[cluster] def diffUnreachableDataCenter(oldState: MembershipState, newState: MembershipState): immutable.Seq[UnreachableDataCenter] = { + if (newState eq oldState) Nil + else { + val otherDcs = (oldState.latestGossip.allDataCenters union newState.latestGossip.allDataCenters) - newState.selfDc + otherDcs.filterNot(isReachable(newState, oldState.dcReachability.allUnreachableOrTerminated)).map(UnreachableDataCenter)(collection.breakOut) + } + } + + /** + * INTERNAL API + */ + private[cluster] def diffReachableDataCenter(oldState: MembershipState, newState: MembershipState): immutable.Seq[ReachableDataCenter] = { + if (newState eq oldState) Nil + else { + val otherDcs = (oldState.latestGossip.allDataCenters union newState.latestGossip.allDataCenters) - newState.selfDc + + val oldUnreachableDcs = otherDcs.filterNot(isReachable(oldState, Set())) + val currentUnreachableDcs = otherDcs.filterNot(isReachable(newState, Set())) + + (oldUnreachableDcs diff currentUnreachableDcs).map(ReachableDataCenter)(collection.breakOut) + } + } + /** * INTERNAL API. */ @@ -457,6 +508,8 @@ private[cluster] final class ClusterDomainEventPublisher extends Actor with Acto diffMemberEvents(oldState, newState) foreach pub diffUnreachable(oldState, newState) foreach pub diffReachable(oldState, newState) foreach pub + diffUnreachableDataCenter(oldState, newState) foreach pub + diffReachableDataCenter(oldState, newState) foreach pub diffLeader(oldState, newState) foreach pub diffRolesLeader(oldState, newState) foreach pub // publish internal SeenState for testing purposes diff --git a/akka-cluster/src/main/scala/akka/cluster/MembershipState.scala b/akka-cluster/src/main/scala/akka/cluster/MembershipState.scala index 0c893f4e55..1cf99419c4 100644 --- a/akka-cluster/src/main/scala/akka/cluster/MembershipState.scala +++ b/akka-cluster/src/main/scala/akka/cluster/MembershipState.scala @@ -79,8 +79,14 @@ import scala.util.Random * nodes outside of the data center */ lazy val dcReachability: Reachability = - overview.reachability.removeObservers( - members.collect { case m if m.dataCenter != selfDc ⇒ m.uniqueAddress }) + overview.reachability.removeObservers(members.collect { case m if m.dataCenter != selfDc ⇒ m.uniqueAddress }) + + /** + * @return Reachability excluding observations from nodes outside of the data center and observations within self data center, + * but including observed unreachable nodes outside of the data center + */ + lazy val dcReachabilityWithoutObservationsWithin: Reachability = + dcReachability.filterRecords { r ⇒ latestGossip.member(r.subject).dataCenter != selfDc } /** * @return reachability for data center nodes, with observations from outside the data center or from downed nodes filtered out diff --git a/akka-cluster/src/main/scala/akka/cluster/Reachability.scala b/akka-cluster/src/main/scala/akka/cluster/Reachability.scala index 3afe0bc8ab..2bc50cd44b 100644 --- a/akka-cluster/src/main/scala/akka/cluster/Reachability.scala +++ b/akka-cluster/src/main/scala/akka/cluster/Reachability.scala @@ -209,6 +209,9 @@ private[cluster] class Reachability private ( Reachability(newRecords, newVersions) } + def filterRecords(f: Record ⇒ Boolean) = + Reachability(records.filter(f), versions) + def status(observer: UniqueAddress, subject: UniqueAddress): ReachabilityStatus = observerRows(observer) match { case None ⇒ Reachable diff --git a/akka-cluster/src/test/scala/akka/cluster/ClusterDomainEventPublisherSpec.scala b/akka-cluster/src/test/scala/akka/cluster/ClusterDomainEventPublisherSpec.scala index e86fe2a1b5..ce21fa37be 100644 --- a/akka-cluster/src/test/scala/akka/cluster/ClusterDomainEventPublisherSpec.scala +++ b/akka-cluster/src/test/scala/akka/cluster/ClusterDomainEventPublisherSpec.scala @@ -38,6 +38,8 @@ class ClusterDomainEventPublisherSpec extends AkkaSpec(ClusterDomainEventPublish var publisher: ActorRef = _ + final val OtherDataCenter = "dc2" + val aUp = TestMember(Address(protocol, "sys", "a", 2552), Up) val aLeaving = aUp.copy(status = Leaving) val aExiting = aLeaving.copy(status = Exiting) @@ -49,6 +51,7 @@ class ClusterDomainEventPublisherSpec extends AkkaSpec(ClusterDomainEventPublish val cRemoved = cUp.copy(status = Removed) val a51Up = TestMember(Address(protocol, "sys", "a", 2551), Up) val dUp = TestMember(Address(protocol, "sys", "d", 2552), Up, Set("GRP")) + val eUp = TestMember(Address(protocol, "sys", "e", 2552), Up, Set("GRP"), OtherDataCenter) private def state(gossip: Gossip, self: UniqueAddress, dc: DataCenter) = MembershipState(gossip, self, DefaultDataCenter, crossDcConnections = 5) @@ -74,6 +77,12 @@ class ClusterDomainEventPublisherSpec extends AkkaSpec(ClusterDomainEventPublish val g8 = Gossip(members = SortedSet(aUp, bExiting, cUp, dUp), overview = GossipOverview(reachability = Reachability.empty.unreachable(aUp.uniqueAddress, dUp.uniqueAddress))).seen(aUp.uniqueAddress) val state8 = state(g8, aUp.uniqueAddress, DefaultDataCenter) + val g9 = Gossip(members = SortedSet(aUp, bExiting, cUp, dUp, eUp), overview = GossipOverview(reachability = + Reachability.empty.unreachable(aUp.uniqueAddress, eUp.uniqueAddress))) + val state9 = state(g9, aUp.uniqueAddress, DefaultDataCenter) + val g10 = Gossip(members = SortedSet(aUp, bExiting, cUp, dUp, eUp), overview = GossipOverview(reachability = + Reachability.empty)) + val state10 = state(g10, aUp.uniqueAddress, DefaultDataCenter) // created in beforeEach var memberSubscriber: TestProbe = _ @@ -160,7 +169,6 @@ class ClusterDomainEventPublisherSpec extends AkkaSpec(ClusterDomainEventPublish subscriber.expectMsgType[CurrentClusterState] // but only to the new subscriber memberSubscriber.expectNoMsg(500 millis) - } "send events corresponding to current state when subscribe" in { @@ -172,6 +180,17 @@ class ClusterDomainEventPublisherSpec extends AkkaSpec(ClusterDomainEventPublish subscriber.expectNoMsg(500 millis) } + "send datacenter reachability events" in { + val subscriber = TestProbe() + publisher ! PublishChanges(state9) + publisher ! Subscribe(subscriber.ref, InitialStateAsEvents, Set(classOf[DataCenterReachabilityEvent])) + subscriber.expectMsg(UnreachableDataCenter(OtherDataCenter)) + subscriber.expectNoMsg(500 millis) + publisher ! PublishChanges(state10) + subscriber.expectMsg(ReachableDataCenter(OtherDataCenter)) + subscriber.expectNoMsg(500 millis) + } + "support unsubscribe" in { val subscriber = TestProbe() publisher ! Subscribe(subscriber.ref, InitialStateAsSnapshot, Set(classOf[MemberEvent])) diff --git a/akka-cluster/src/test/scala/akka/cluster/ClusterDomainEventSpec.scala b/akka-cluster/src/test/scala/akka/cluster/ClusterDomainEventSpec.scala index 0ad9e55da9..505d52e7a3 100644 --- a/akka-cluster/src/test/scala/akka/cluster/ClusterDomainEventSpec.scala +++ b/akka-cluster/src/test/scala/akka/cluster/ClusterDomainEventSpec.scala @@ -89,6 +89,65 @@ class ClusterDomainEventSpec extends WordSpec with Matchers { diffSeen(state(g1), state(g2)) should ===(Seq.empty) } + "be produced for reachability observations between data centers" in { + val dc2AMemberUp = TestMember(Address("akka.tcp", "sys", "dc2A", 2552), Up, Set.empty, "dc2") + val dc2AMemberDown = TestMember(Address("akka.tcp", "sys", "dc2A", 2552), Down, Set.empty, "dc2") + val dc2BMemberUp = TestMember(Address("akka.tcp", "sys", "dc2B", 2552), Up, Set.empty, "dc2") + + val dc3AMemberUp = TestMember(Address("akka.tcp", "sys", "dc3A", 2552), Up, Set.empty, "dc3") + val dc3BMemberUp = TestMember(Address("akka.tcp", "sys", "dc3B", 2552), Up, Set.empty, "dc3") + + val reachability1 = Reachability.empty + val g1 = Gossip(members = SortedSet(aUp, bUp, dc2AMemberUp, dc2BMemberUp, dc3AMemberUp, dc3BMemberUp), overview = GossipOverview(reachability = reachability1)) + + val reachability2 = reachability1 + .unreachable(aUp.uniqueAddress, dc2AMemberDown.uniqueAddress) + .unreachable(dc2BMemberUp.uniqueAddress, dc2AMemberDown.uniqueAddress) + val g2 = Gossip(members = SortedSet(aUp, bUp, dc2AMemberDown, dc2BMemberUp, dc3AMemberUp, dc3BMemberUp), overview = GossipOverview(reachability = reachability2)) + + Set(aUp, bUp, dc2AMemberUp, dc2BMemberUp, dc3AMemberUp, dc3BMemberUp).foreach { member ⇒ + val otherDc = + if (member.dataCenter == ClusterSettings.DefaultDataCenter) Seq("dc2") + else Seq() + + diffUnreachableDataCenter( + MembershipState(g1, member.uniqueAddress, member.dataCenter, crossDcConnections = 5), + MembershipState(g2, member.uniqueAddress, member.dataCenter, crossDcConnections = 5)) should ===(otherDc.map(UnreachableDataCenter)) + + diffReachableDataCenter( + MembershipState(g2, member.uniqueAddress, member.dataCenter, crossDcConnections = 5), + MembershipState(g1, member.uniqueAddress, member.dataCenter, crossDcConnections = 5)) should ===(otherDc.map(ReachableDataCenter)) + } + } + + "not be produced for same reachability observations between data centers" in { + val dc2AMemberUp = TestMember(Address("akka.tcp", "sys", "dc2A", 2552), Up, Set.empty, "dc2") + val dc2AMemberDown = TestMember(Address("akka.tcp", "sys", "dc2A", 2552), Down, Set.empty, "dc2") + + val reachability1 = Reachability.empty + val g1 = Gossip(members = SortedSet(aUp, dc2AMemberUp), overview = GossipOverview(reachability = reachability1)) + + val reachability2 = reachability1 + .unreachable(aUp.uniqueAddress, dc2AMemberDown.uniqueAddress) + val g2 = Gossip(members = SortedSet(aUp, dc2AMemberDown), overview = GossipOverview(reachability = reachability2)) + + diffUnreachableDataCenter( + MembershipState(g1, aUp.uniqueAddress, aUp.dataCenter, crossDcConnections = 5), + MembershipState(g1, aUp.uniqueAddress, aUp.dataCenter, crossDcConnections = 5)) should ===(Seq()) + + diffUnreachableDataCenter( + MembershipState(g2, aUp.uniqueAddress, aUp.dataCenter, crossDcConnections = 5), + MembershipState(g2, aUp.uniqueAddress, aUp.dataCenter, crossDcConnections = 5)) should ===(Seq()) + + diffReachableDataCenter( + MembershipState(g1, aUp.uniqueAddress, aUp.dataCenter, crossDcConnections = 5), + MembershipState(g1, aUp.uniqueAddress, aUp.dataCenter, crossDcConnections = 5)) should ===(Seq()) + + diffReachableDataCenter( + MembershipState(g2, aUp.uniqueAddress, aUp.dataCenter, crossDcConnections = 5), + MembershipState(g2, aUp.uniqueAddress, aUp.dataCenter, crossDcConnections = 5)) should ===(Seq()) + } + "be produced for members becoming reachable after unreachable" in { val reachability1 = Reachability.empty. unreachable(aUp.uniqueAddress, cUp.uniqueAddress).reachable(aUp.uniqueAddress, cUp.uniqueAddress). diff --git a/akka-docs/.history b/akka-docs/.history deleted file mode 100644 index a3abe50906..0000000000 --- a/akka-docs/.history +++ /dev/null @@ -1 +0,0 @@ -exit From eb24033cc0cb0671225007452d236314d25db621 Mon Sep 17 00:00:00 2001 From: Konrad `ktoso` Malawski Date: Thu, 13 Jul 2017 12:50:28 +0200 Subject: [PATCH 24/34] =clu,dc #23340 additional test to see a node take over monitoring of remote DC (#23342) --- .../cluster/CrossDcClusterHeartbeat.scala | 2 +- .../MultiDcHeartbeatTakingOverSpec.scala | 194 ++++++++++++++++++ .../akka/cluster/MultiNodeClusterSpec.scala | 48 ++++- 3 files changed, 239 insertions(+), 5 deletions(-) create mode 100644 akka-cluster/src/multi-jvm/scala/akka/cluster/MultiDcHeartbeatTakingOverSpec.scala diff --git a/akka-cluster/src/main/scala/akka/cluster/CrossDcClusterHeartbeat.scala b/akka-cluster/src/main/scala/akka/cluster/CrossDcClusterHeartbeat.scala index 2a5c9585b3..6f187be058 100644 --- a/akka-cluster/src/main/scala/akka/cluster/CrossDcClusterHeartbeat.scala +++ b/akka-cluster/src/main/scala/akka/cluster/CrossDcClusterHeartbeat.scala @@ -179,7 +179,7 @@ private[cluster] final class CrossDcHeartbeatSender extends Actor with ActorLogg /** Idempotent, become active if this node is n-th oldest and should monitor other nodes */ private def becomeActiveIfResponsibleForHeartbeat(): Unit = { if (!activelyMonitoring && selfIsResponsibleForCrossDcHeartbeat()) { - if (verboseHeartbeat) log.debug("Becoming ACTIVE (for DC: {}), monitoring other DCs oldest nodes", selfDataCenter) + log.info("Cross DC heartbeat becoming ACTIVE on this node (for DC: {}), monitoring other DCs oldest nodes", selfDataCenter) activelyMonitoring = true context.become(active orElse introspecting) diff --git a/akka-cluster/src/multi-jvm/scala/akka/cluster/MultiDcHeartbeatTakingOverSpec.scala b/akka-cluster/src/multi-jvm/scala/akka/cluster/MultiDcHeartbeatTakingOverSpec.scala new file mode 100644 index 0000000000..71fab2632c --- /dev/null +++ b/akka-cluster/src/multi-jvm/scala/akka/cluster/MultiDcHeartbeatTakingOverSpec.scala @@ -0,0 +1,194 @@ +/** + * Copyright (C) 2009-2017 Lightbend Inc. + */ +package akka.cluster + +import akka.actor.ActorSelection +import akka.annotation.InternalApi +import akka.remote.testconductor.RoleName +import akka.remote.testkit.{ MultiNodeConfig, MultiNodeSpec } +import akka.testkit._ +import com.typesafe.config.ConfigFactory + +import scala.collection.immutable +import scala.collection.immutable.SortedSet +import scala.concurrent.duration._ + +object MultiDcHeartbeatTakingOverSpecMultiJvmSpec extends MultiNodeConfig { + val first = role("first") // alpha + val second = role("second") // alpha + val third = role("third") // alpha + + val fourth = role("fourth") // beta + val fifth = role("fifth") // beta + + nodeConfig(first, second, third)(ConfigFactory.parseString( + """ + akka { + cluster.multi-data-center.self-data-center = alpha + } + """)) + + nodeConfig(fourth, fifth)(ConfigFactory.parseString( + """ + akka { + cluster.multi-data-center.self-data-center = beta + } + """)) + + commonConfig(ConfigFactory.parseString( + """ + akka { + actor.provider = cluster + + loggers = ["akka.testkit.TestEventListener"] + loglevel = INFO + + remote.log-remote-lifecycle-events = off + + cluster { + debug.verbose-heartbeat-logging = off + + multi-data-center { + cross-data-center-connections = 2 + } + } + } + """)) + +} + +class MultiDcHeartbeatTakingOverSpecMultiJvmNode1 extends MultiDcHeartbeatTakingOverSpec +class MultiDcHeartbeatTakingOverSpecMultiJvmNode2 extends MultiDcHeartbeatTakingOverSpec +class MultiDcHeartbeatTakingOverSpecMultiJvmNode3 extends MultiDcHeartbeatTakingOverSpec +class MultiDcHeartbeatTakingOverSpecMultiJvmNode4 extends MultiDcHeartbeatTakingOverSpec +class MultiDcHeartbeatTakingOverSpecMultiJvmNode5 extends MultiDcHeartbeatTakingOverSpec + +abstract class MultiDcHeartbeatTakingOverSpec extends MultiNodeSpec(MultiDcHeartbeatTakingOverSpecMultiJvmSpec) + with MultiNodeClusterSpec { + + "A 2-dc cluster" must { + + val observer: TestProbe = TestProbe("alpha-observer") + + val crossDcHeartbeatSenderPath = "/system/cluster/core/daemon/crossDcHeartbeatSender" + val selectCrossDcHeartbeatSender: ActorSelection = system.actorSelection(crossDcHeartbeatSenderPath) + + // these will be filled in during the initial phase of the test ----------- + var expectedAlphaHeartbeaterNodes: SortedSet[Member] = SortedSet.empty + var expectedAlphaHeartbeaterRoles: SortedSet[RoleName] = SortedSet.empty + + var expectedBetaHeartbeaterNodes: SortedSet[Member] = SortedSet.empty + var expectedBetaHeartbeaterRoles: SortedSet[RoleName] = SortedSet.empty + + var expectedNoActiveHeartbeatSenderRoles: Set[RoleName] = Set.empty + // end of these will be filled in during the initial phase of the test ----------- + + def refreshOldestMemberHeartbeatStatuses() = { + expectedAlphaHeartbeaterNodes = takeNOldestMembers(_.dataCenter == "alpha", 2) + expectedAlphaHeartbeaterRoles = membersAsRoles(expectedAlphaHeartbeaterNodes) + + expectedBetaHeartbeaterNodes = takeNOldestMembers(_.dataCenter == "beta", 2) + expectedBetaHeartbeaterRoles = membersAsRoles(expectedBetaHeartbeaterNodes) + + expectedNoActiveHeartbeatSenderRoles = roles.toSet -- (expectedAlphaHeartbeaterRoles union expectedBetaHeartbeaterRoles) + } + + "collect information on oldest nodes" taggedAs LongRunningTest in { + // allow all nodes to join: + awaitClusterUp(roles: _*) + + refreshOldestMemberHeartbeatStatuses() + info(s"expectedAlphaHeartbeaterNodes = ${expectedAlphaHeartbeaterNodes.map(_.address.port.get)}") + info(s"expectedBetaHeartbeaterNodes = ${expectedBetaHeartbeaterNodes.map(_.address.port.get)}") + info(s"expectedNoActiveHeartbeatSenderRoles = ${expectedNoActiveHeartbeatSenderRoles.map(_.port.get)}") + + expectedAlphaHeartbeaterRoles.size should ===(2) + expectedBetaHeartbeaterRoles.size should ===(2) + + enterBarrier("found-expectations") + } + + "be healthy" taggedAs LongRunningTest in { + implicit val sender = observer.ref + runOn(expectedAlphaHeartbeaterRoles.toList: _*) { + selectCrossDcHeartbeatSender ! CrossDcHeartbeatSender.ReportStatus() + observer.expectMsgType[CrossDcHeartbeatSender.MonitoringActive](5.seconds) + } + runOn(expectedBetaHeartbeaterRoles.toList: _*) { + selectCrossDcHeartbeatSender ! CrossDcHeartbeatSender.ReportStatus() + observer.expectMsgType[CrossDcHeartbeatSender.MonitoringActive](5.seconds) + } + runOn(expectedNoActiveHeartbeatSenderRoles.toList: _*) { + selectCrossDcHeartbeatSender ! CrossDcHeartbeatSender.ReportStatus() + observer.expectMsgType[CrossDcHeartbeatSender.MonitoringDormant](5.seconds) + } + + enterBarrier("sunny-weather-done") + } + + "other node must become oldest when current DC-oldest Leaves" taggedAs LongRunningTest in { + val observer = TestProbe("alpha-observer-prime") + + // we leave one of the current oldest nodes of the `alpha` DC, + // since it has 3 members the "not yet oldest" one becomes oldest and should start monitoring across datacenter + val preLeaveOldestAlphaRole = expectedAlphaHeartbeaterRoles.head + val preLeaveOldestAlphaAddress = expectedAlphaHeartbeaterNodes.find(_.address.port.get == preLeaveOldestAlphaRole.port.get).get.address + runOn(preLeaveOldestAlphaRole) { + info(s"Leaving: ${preLeaveOldestAlphaAddress}") + cluster.leave(cluster.selfAddress) + } + + awaitMemberRemoved(preLeaveOldestAlphaAddress) + enterBarrier("wat") + + // refresh our view about who is currently monitoring things in alpha: + refreshOldestMemberHeartbeatStatuses() + + enterBarrier("after-alpha-monitoring-node-left") + + implicit val sender = observer.ref + val expectedAlphaMonitoringNodesAfterLeaving = (takeNOldestMembers(_.dataCenter == "alpha", 3).filterNot(_.status == MemberStatus.Exiting)) + runOn(membersAsRoles(expectedAlphaMonitoringNodesAfterLeaving).toList: _*) { + awaitAssert({ + + selectCrossDcHeartbeatSender ! CrossDcHeartbeatSender.ReportStatus() + + try { + observer.expectMsgType[CrossDcHeartbeatSender.MonitoringActive](5.seconds) + info(s"Got confirmation from ${observer.lastSender} that it is actively monitoring now") + } catch { + case ex: Throwable ⇒ + throw new AssertionError(s"Monitoring was Dormant on ${cluster.selfAddress}, where we expected it to be active!", ex) + } + }, 20.seconds) + } + enterBarrier("confirmed-heartbeating-take-over") + } + } + + /** + * INTERNAL API + * Returns `Up` (or in "later" status, like Leaving etc, but never `Joining` or `WeaklyUp`) members, + * sorted by Member.ageOrdering (from oldest to youngest). This restriction on status is needed to + * strongly guaratnee the order of "oldest" members, as they're linearized by the order in which they become Up + * (since marking that transition is a Leader action). + */ + private def membersByAge(): immutable.SortedSet[Member] = + SortedSet.empty(Member.ageOrdering) + .union(cluster.state.members.filter(m ⇒ m.status != MemberStatus.WeaklyUp && m.status != MemberStatus.WeaklyUp)) + + /** INTERNAL API */ + @InternalApi + private[cluster] def takeNOldestMembers(memberFilter: Member ⇒ Boolean, n: Int): immutable.SortedSet[Member] = + membersByAge() + .filter(m ⇒ m.status != MemberStatus.Joining && m.status != MemberStatus.WeaklyUp) + .filter(memberFilter) + .take(n) + + private def membersAsRoles(ms: SortedSet[Member]): SortedSet[RoleName] = { + val res = ms.flatMap(m ⇒ roleName(m.address)) + require(res.size == ms.size, s"Not all members were converted to roles! Got: ${ms}, found ${res}") + res + } +} diff --git a/akka-cluster/src/multi-jvm/scala/akka/cluster/MultiNodeClusterSpec.scala b/akka-cluster/src/multi-jvm/scala/akka/cluster/MultiNodeClusterSpec.scala index fa461bc769..d83c159d58 100644 --- a/akka-cluster/src/multi-jvm/scala/akka/cluster/MultiNodeClusterSpec.scala +++ b/akka-cluster/src/multi-jvm/scala/akka/cluster/MultiNodeClusterSpec.scala @@ -14,7 +14,7 @@ import akka.remote.testconductor.RoleName import akka.remote.testkit.{ FlightRecordingSupport, MultiNodeSpec, STMultiNodeSpec } import akka.testkit._ import akka.testkit.TestEvent._ -import akka.actor.{ ActorSystem, Address } +import akka.actor.{ Actor, ActorRef, ActorSystem, Address, Deploy, PoisonPill, Props, RootActorPath } import akka.event.Logging.ErrorLevel import scala.concurrent.duration._ @@ -22,9 +22,9 @@ import scala.collection.immutable import java.util.concurrent.ConcurrentHashMap import akka.remote.DefaultFailureDetectorRegistry -import akka.actor.ActorRef -import akka.actor.Actor -import akka.actor.RootActorPath +import akka.cluster.ClusterEvent.{ CurrentClusterState, MemberEvent, MemberExited, MemberRemoved } + +import scala.concurrent.Await object MultiNodeClusterSpec { @@ -312,6 +312,46 @@ trait MultiNodeClusterSpec extends Suite with STMultiNodeSpec with WatchedByCoro } } + def awaitMemberRemoved(toBeRemovedAddress: Address, timeout: FiniteDuration = 25.seconds): Unit = within(timeout) { + if (toBeRemovedAddress == cluster.selfAddress) { + enterBarrier("registered-listener") + + cluster.leave(toBeRemovedAddress) + enterBarrier("member-left") + + awaitCond(cluster.isTerminated, remaining) + enterBarrier("member-shutdown") + } else { + val exitingLatch = TestLatch() + + val awaiter = system.actorOf(Props(new Actor { + def receive = { + case MemberRemoved(m, _) if m.address == toBeRemovedAddress ⇒ + exitingLatch.countDown() + case _ ⇒ + // ignore + } + }).withDeploy(Deploy.local)) + cluster.subscribe(awaiter, classOf[MemberEvent]) + enterBarrier("registered-listener") + + // in the meantime member issues leave + enterBarrier("member-left") + + // verify that the member is EXITING + try Await.result(exitingLatch, timeout) catch { + case cause: Exception ⇒ + throw new AssertionError(s"Member ${toBeRemovedAddress} was not removed within ${timeout}!", cause) + } + awaiter ! PoisonPill // you've done your job, now die + + enterBarrier("member-shutdown") + markNodeAsUnavailable(toBeRemovedAddress) + } + + enterBarrier("member-totally-shutdown") + } + def awaitAllReachable(): Unit = awaitAssert(clusterView.unreachableMembers should ===(Set.empty)) From c728098b3df77e27c2f0a6bee68c6844419a4ed1 Mon Sep 17 00:00:00 2001 From: Konrad `ktoso` Malawski Date: Thu, 13 Jul 2017 19:46:32 +0900 Subject: [PATCH 25/34] =clu,dc #23354 do not heartbeat to yourself (cross-dc) --- .../cluster/CrossDcClusterHeartbeat.scala | 29 +++++++++++++++---- .../cluster/MultiDcSunnyWeatherSpec.scala | 29 +++++++++++++++++++ 2 files changed, 53 insertions(+), 5 deletions(-) diff --git a/akka-cluster/src/main/scala/akka/cluster/CrossDcClusterHeartbeat.scala b/akka-cluster/src/main/scala/akka/cluster/CrossDcClusterHeartbeat.scala index 6f187be058..3e8f3f318d 100644 --- a/akka-cluster/src/main/scala/akka/cluster/CrossDcClusterHeartbeat.scala +++ b/akka-cluster/src/main/scala/akka/cluster/CrossDcClusterHeartbeat.scala @@ -55,6 +55,7 @@ private[cluster] final class CrossDcHeartbeatSender extends Actor with ActorLogg val selfHeartbeat = ClusterHeartbeatSender.Heartbeat(selfAddress) var dataCentersState: CrossDcHeartbeatingState = CrossDcHeartbeatingState.init( + selfDataCenter, crossDcFailureDetector, crossDcSettings.NrOfMonitoringActors, SortedSet.empty @@ -123,7 +124,7 @@ private[cluster] final class CrossDcHeartbeatSender extends Actor with ActorLogg // nr of monitored nodes is the same as the number of monitoring nodes (`n` oldest in one DC watch `n` oldest in other) val nodes = snapshot.members val nrOfMonitoredNodes = crossDcSettings.NrOfMonitoringActors - dataCentersState = CrossDcHeartbeatingState.init(crossDcFailureDetector, nrOfMonitoredNodes, nodes) + dataCentersState = CrossDcHeartbeatingState.init(selfDataCenter, crossDcFailureDetector, nrOfMonitoredNodes, nodes) } def addMember(m: Member): Unit = @@ -207,6 +208,7 @@ private[akka] object CrossDcHeartbeatSender { /** INTERNAL API */ @InternalApi private[cluster] final case class CrossDcHeartbeatingState( + selfDataCenter: DataCenter, failureDetector: FailureDetectorRegistry[Address], nrOfMonitoredNodesPerDc: Int, state: Map[ClusterSettings.DataCenter, SortedSet[Member]]) { @@ -265,11 +267,26 @@ private[cluster] final case class CrossDcHeartbeatingState( } } - val activeReceivers: Set[UniqueAddress] = - dataCenters.flatMap(k ⇒ state(k).take(nrOfMonitoredNodesPerDc).map(_.uniqueAddress)(breakOut)) + /** Lists addresses that this node should send heartbeats to */ + val activeReceivers: Set[UniqueAddress] = { + val otherDcs = state.filter(_._1 != selfDataCenter) + val allOtherNodes = otherDcs.values + allOtherNodes.flatMap( + _.take(nrOfMonitoredNodesPerDc) + .map(_.uniqueAddress)(breakOut) + ).toSet + } + + /** Lists addresses in diven DataCenter that this node should send heartbeats to */ private def activeReceiversIn(dc: DataCenter): Set[UniqueAddress] = - state.getOrElse(dc, emptyMembersSortedSet).take(nrOfMonitoredNodesPerDc).map(_.uniqueAddress)(breakOut) + if (dc == selfDataCenter) Set.empty // CrossDcHeartbeatSender is not supposed to send within its own Dc + else { + val otherNodes = state.getOrElse(dc, emptyMembersSortedSet) + otherNodes + .take(nrOfMonitoredNodesPerDc) + .map(_.uniqueAddress)(breakOut) + } def allMembers: Iterable[Member] = state.values.flatMap(ConstantFun.scalaIdentityFunction) @@ -294,10 +311,12 @@ private[cluster] object CrossDcHeartbeatingState { private def emptyMembersSortedSet: SortedSet[Member] = SortedSet.empty[Member](Member.ageOrdering) def init( + selfDataCenter: DataCenter, crossDcFailureDetector: FailureDetectorRegistry[Address], nrOfMonitoredNodesPerDc: Int, members: SortedSet[Member]): CrossDcHeartbeatingState = { - CrossDcHeartbeatingState( + new CrossDcHeartbeatingState( + selfDataCenter, crossDcFailureDetector, nrOfMonitoredNodesPerDc, state = { diff --git a/akka-cluster/src/multi-jvm/scala/akka/cluster/MultiDcSunnyWeatherSpec.scala b/akka-cluster/src/multi-jvm/scala/akka/cluster/MultiDcSunnyWeatherSpec.scala index 9c738291a8..fec90f9720 100644 --- a/akka-cluster/src/multi-jvm/scala/akka/cluster/MultiDcSunnyWeatherSpec.scala +++ b/akka-cluster/src/multi-jvm/scala/akka/cluster/MultiDcSunnyWeatherSpec.scala @@ -109,6 +109,35 @@ abstract class MultiDcSunnyWeatherSpec extends MultiNodeSpec(MultiDcSunnyWeather enterBarrier("done") } + + "never heartbeat to itself or members of same its own data center" taggedAs LongRunningTest in { + + val observer = TestProbe("alpha-observer") + + val crossDcHeartbeatSenderPath = "/system/cluster/core/daemon/crossDcHeartbeatSender" + val selectCrossDcHeartbeatSender = system.actorSelection(crossDcHeartbeatSenderPath) + + enterBarrier("checking-activeReceivers") + + implicit val sender = observer.ref + selectCrossDcHeartbeatSender ! CrossDcHeartbeatSender.ReportStatus() + observer.expectMsgType[CrossDcHeartbeatSender.MonitoringStateReport](5.seconds) match { + case CrossDcHeartbeatSender.MonitoringDormant() ⇒ // ok ... + case CrossDcHeartbeatSender.MonitoringActive(state) ⇒ + + // must not heartbeat myself + state.activeReceivers should not contain cluster.selfUniqueAddress + + // not any of the members in the same datacenter; it's "cross-dc" after all + val myDataCenterMembers = state.state.getOrElse(cluster.selfDataCenter, Set.empty) + myDataCenterMembers foreach { myDcMember ⇒ + state.activeReceivers should not contain myDcMember.uniqueAddress + } + + } + + enterBarrier("done-checking-activeReceivers") + } } /** From b86b10c477d6076890fca70ef306de0353714f63 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Johan=20Andr=C3=A9n?= Date: Wed, 19 Jul 2017 04:48:27 +0200 Subject: [PATCH 26/34] Elminate race in MultiDcHeartbeatTakingOverSpec #23371 (#23373) --- .../MultiDcHeartbeatTakingOverSpec.scala | 20 ++++++++++++------- 1 file changed, 13 insertions(+), 7 deletions(-) diff --git a/akka-cluster/src/multi-jvm/scala/akka/cluster/MultiDcHeartbeatTakingOverSpec.scala b/akka-cluster/src/multi-jvm/scala/akka/cluster/MultiDcHeartbeatTakingOverSpec.scala index 71fab2632c..60241fb62d 100644 --- a/akka-cluster/src/multi-jvm/scala/akka/cluster/MultiDcHeartbeatTakingOverSpec.scala +++ b/akka-cluster/src/multi-jvm/scala/akka/cluster/MultiDcHeartbeatTakingOverSpec.scala @@ -109,19 +109,25 @@ abstract class MultiDcHeartbeatTakingOverSpec extends MultiNodeSpec(MultiDcHeart enterBarrier("found-expectations") } - "be healthy" taggedAs LongRunningTest in { + "be healthy" taggedAs LongRunningTest in within(5.seconds) { implicit val sender = observer.ref runOn(expectedAlphaHeartbeaterRoles.toList: _*) { - selectCrossDcHeartbeatSender ! CrossDcHeartbeatSender.ReportStatus() - observer.expectMsgType[CrossDcHeartbeatSender.MonitoringActive](5.seconds) + awaitAssert { + selectCrossDcHeartbeatSender ! CrossDcHeartbeatSender.ReportStatus() + observer.expectMsgType[CrossDcHeartbeatSender.MonitoringActive] + } } runOn(expectedBetaHeartbeaterRoles.toList: _*) { - selectCrossDcHeartbeatSender ! CrossDcHeartbeatSender.ReportStatus() - observer.expectMsgType[CrossDcHeartbeatSender.MonitoringActive](5.seconds) + awaitAssert { + selectCrossDcHeartbeatSender ! CrossDcHeartbeatSender.ReportStatus() + observer.expectMsgType[CrossDcHeartbeatSender.MonitoringActive] + } } runOn(expectedNoActiveHeartbeatSenderRoles.toList: _*) { - selectCrossDcHeartbeatSender ! CrossDcHeartbeatSender.ReportStatus() - observer.expectMsgType[CrossDcHeartbeatSender.MonitoringDormant](5.seconds) + awaitAssert { + selectCrossDcHeartbeatSender ! CrossDcHeartbeatSender.ReportStatus() + observer.expectMsgType[CrossDcHeartbeatSender.MonitoringDormant] + } } enterBarrier("sunny-weather-done") From 699c78f9591a574e4d35b1af218d81bc75401e79 Mon Sep 17 00:00:00 2001 From: Patrik Nordwall Date: Tue, 15 Aug 2017 16:05:34 +0200 Subject: [PATCH 27/34] fix NodeChurnSpec tombstones, #23369 * the gossip was growing because we introduced tombstones * in this test it should be safe to have a short removal period of the tombstones --- .../src/multi-jvm/scala/akka/cluster/NodeChurnSpec.scala | 1 + 1 file changed, 1 insertion(+) diff --git a/akka-cluster/src/multi-jvm/scala/akka/cluster/NodeChurnSpec.scala b/akka-cluster/src/multi-jvm/scala/akka/cluster/NodeChurnSpec.scala index 951493bad9..43d784e846 100644 --- a/akka-cluster/src/multi-jvm/scala/akka/cluster/NodeChurnSpec.scala +++ b/akka-cluster/src/multi-jvm/scala/akka/cluster/NodeChurnSpec.scala @@ -23,6 +23,7 @@ object NodeChurnMultiJvmSpec extends MultiNodeConfig { commonConfig(debugConfig(on = false). withFallback(ConfigFactory.parseString(""" akka.cluster.auto-down-unreachable-after = 1s + akka.cluster.prune-gossip-tombstones-after = 1s akka.remote.log-frame-size-exceeding = 1200b akka.remote.artery.advanced { idle-cpu-level = 1 From cff43a16f749a287468b776512039d7bd97062fa Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Johan=20Andr=C3=A9n?= Date: Tue, 22 Aug 2017 13:04:39 +0200 Subject: [PATCH 28/34] Data center reachability in cluster state (#23359) * Manual case-declassing of CurrentClusterState #23347 * Unreachable data centers set in CurrentClusterState #23347 --- .../scala/akka/cluster/ClusterEvent.scala | 107 ++++++++++++++++-- .../scala/akka/cluster/ClusterReadView.scala | 6 + 2 files changed, 103 insertions(+), 10 deletions(-) diff --git a/akka-cluster/src/main/scala/akka/cluster/ClusterEvent.scala b/akka-cluster/src/main/scala/akka/cluster/ClusterEvent.scala index 181ee76774..18ad6853c6 100644 --- a/akka-cluster/src/main/scala/akka/cluster/ClusterEvent.scala +++ b/akka-cluster/src/main/scala/akka/cluster/ClusterEvent.scala @@ -5,7 +5,7 @@ package akka.cluster import language.postfixOps import scala.collection.immutable -import scala.collection.immutable.VectorBuilder +import scala.collection.immutable.{ SortedSet, VectorBuilder } import akka.actor.{ Actor, ActorLogging, ActorRef, Address } import akka.cluster.ClusterSettings.DataCenter import akka.cluster.ClusterEvent._ @@ -16,6 +16,7 @@ import akka.actor.DeadLetterSuppression import akka.annotation.InternalApi import scala.collection.breakOut +import scala.runtime.AbstractFunction5 /** * Domain events published to the event bus. @@ -55,17 +56,52 @@ object ClusterEvent { */ sealed trait ClusterDomainEvent extends DeadLetterSuppression + // for binary compatibility (used to be a case class) + object CurrentClusterState extends AbstractFunction5[immutable.SortedSet[Member], Set[Member], Set[Address], Option[Address], Map[String, Option[Address]], CurrentClusterState] { + + def apply( + members: immutable.SortedSet[Member] = immutable.SortedSet.empty, + unreachable: Set[Member] = Set.empty, + seenBy: Set[Address] = Set.empty, + leader: Option[Address] = None, + roleLeaderMap: Map[String, Option[Address]] = Map.empty): CurrentClusterState = + new CurrentClusterState(members, unreachable, seenBy, leader, roleLeaderMap) + + def unapply(cs: CurrentClusterState): Option[(immutable.SortedSet[Member], Set[Member], Set[Address], Option[Address], Map[String, Option[Address]])] = + Some(( + cs.members, + cs.unreachable, + cs.seenBy, + cs.leader, + cs.roleLeaderMap + )) + + } + /** * Current snapshot state of the cluster. Sent to new subscriber. * * @param leader leader of the data center of this node */ - final case class CurrentClusterState( - members: immutable.SortedSet[Member] = immutable.SortedSet.empty, - unreachable: Set[Member] = Set.empty, - seenBy: Set[Address] = Set.empty, - leader: Option[Address] = None, - roleLeaderMap: Map[String, Option[Address]] = Map.empty) { + @SerialVersionUID(2) + final class CurrentClusterState( + val members: immutable.SortedSet[Member], + val unreachable: Set[Member], + val seenBy: Set[Address], + val leader: Option[Address], + val roleLeaderMap: Map[String, Option[Address]], + val unreachableDataCenters: Set[DataCenter]) + extends Product5[immutable.SortedSet[Member], Set[Member], Set[Address], Option[Address], Map[String, Option[Address]]] + with Serializable { + + // for binary compatibility + def this( + members: immutable.SortedSet[Member] = immutable.SortedSet.empty, + unreachable: Set[Member] = Set.empty, + seenBy: Set[Address] = Set.empty, + leader: Option[Address] = None, + roleLeaderMap: Map[String, Option[Address]] = Map.empty) = + this(members, unreachable, seenBy, leader, roleLeaderMap, Set.empty) /** * Java API: get current member list. @@ -125,6 +161,47 @@ object ClusterEvent { def getAllDataCenters: java.util.Set[String] = scala.collection.JavaConverters.setAsJavaSetConverter(allDataCenters).asJava + /** + * Replace the set of unreachable datacenters with the given set + */ + def withUnreachableDataCenters(unreachableDataCenters: Set[DataCenter]): CurrentClusterState = + new CurrentClusterState(members, unreachable, seenBy, leader, roleLeaderMap, unreachableDataCenters) + + // for binary compatibility (used to be a case class) + def copy( + members: immutable.SortedSet[Member] = this.members, + unreachable: Set[Member] = this.unreachable, + seenBy: Set[Address] = this.seenBy, + leader: Option[Address] = this.leader, + roleLeaderMap: Map[String, Option[Address]] = this.roleLeaderMap) = + new CurrentClusterState(members, unreachable, seenBy, leader, roleLeaderMap, unreachableDataCenters) + + override def equals(other: Any): Boolean = other match { + case that: CurrentClusterState ⇒ + (this eq that) || ( + members == that.members && + unreachable == that.unreachable && + seenBy == that.seenBy && + leader == that.leader && + roleLeaderMap == that.roleLeaderMap) + case _ ⇒ false + } + + override def hashCode(): Int = { + val state = Seq(members, unreachable, seenBy, leader, roleLeaderMap) + state.map(_.hashCode()).foldLeft(0)((a, b) ⇒ 31 * a + b) + } + + // Product5 + override def productPrefix = "CurrentClusterState" + def _1: SortedSet[Member] = members + def _2: Set[Member] = unreachable + def _3: Set[Address] = seenBy + def _4: Option[Address] = leader + def _5: Map[String, Option[Address]] = roleLeaderMap + def canEqual(that: Any): Boolean = that.isInstanceOf[CurrentClusterState] + + override def toString = s"CurrentClusterState($members, $unreachable, $seenBy, $leader, $roleLeaderMap)" } /** @@ -305,7 +382,10 @@ object ClusterEvent { }(collection.breakOut) } - private def isReachable(state: MembershipState, oldUnreachableNodes: Set[UniqueAddress])(otherDc: DataCenter): Boolean = { + /** + * Internal API + */ + private[cluster] def isReachable(state: MembershipState, oldUnreachableNodes: Set[UniqueAddress])(otherDc: DataCenter): Boolean = { val unrelatedDcNodes = state.latestGossip.members.collect { case m if m.dataCenter != otherDc && m.dataCenter != state.selfDc ⇒ m.uniqueAddress } @@ -464,13 +544,20 @@ private[cluster] final class ClusterDomainEventPublisher extends Actor with Acto membershipState.dcReachabilityNoOutsideNodes.allUnreachableOrTerminated.collect { case node if node != selfUniqueAddress ⇒ membershipState.latestGossip.member(node) } - val state = CurrentClusterState( + + val unreachableDataCenters: Set[DataCenter] = + if (!membershipState.latestGossip.isMultiDc) Set.empty + else membershipState.latestGossip.allDataCenters.filterNot(isReachable(membershipState, Set.empty)) + + val state = new CurrentClusterState( members = membershipState.latestGossip.members, unreachable = unreachable, seenBy = membershipState.latestGossip.seenBy.map(_.address), leader = membershipState.leader.map(_.address), roleLeaderMap = membershipState.latestGossip.allRoles.map(r ⇒ - r → membershipState.roleLeader(r).map(_.address))(collection.breakOut)) + r → membershipState.roleLeader(r).map(_.address))(collection.breakOut), + unreachableDataCenters + ) receiver ! state } diff --git a/akka-cluster/src/main/scala/akka/cluster/ClusterReadView.scala b/akka-cluster/src/main/scala/akka/cluster/ClusterReadView.scala index 20799cfb11..40b32777de 100644 --- a/akka-cluster/src/main/scala/akka/cluster/ClusterReadView.scala +++ b/akka-cluster/src/main/scala/akka/cluster/ClusterReadView.scala @@ -70,6 +70,12 @@ private[akka] class ClusterReadView(cluster: Cluster) extends Closeable { _state = _state.copy(roleLeaderMap = _state.roleLeaderMap + (role → leader)) case stats: CurrentInternalStats ⇒ _latestStats = stats case ClusterShuttingDown ⇒ + + case r: ReachableDataCenter ⇒ + _state = _state.withUnreachableDataCenters(_state.unreachableDataCenters - r.dataCenter) + case r: UnreachableDataCenter ⇒ + _state = _state.withUnreachableDataCenters(_state.unreachableDataCenters + r.dataCenter) + } case s: CurrentClusterState ⇒ _state = s } From 659b28e4eb5ce38918f5a95b20dba0bc5af65f8a Mon Sep 17 00:00:00 2001 From: Patrik Nordwall Date: Tue, 15 Aug 2017 15:31:52 +0200 Subject: [PATCH 29/34] Missing become after CurrentClusterState in CrossDcHeartbeatSender, #23371 * and a few other small things * one can see in the failed test log that there is no ACTIVE log line on the failing node --- .../cluster/CrossDcClusterHeartbeat.scala | 33 ++++++++++--------- .../MultiDcHeartbeatTakingOverSpec.scala | 15 ++++----- .../cluster/MultiDcSunnyWeatherSpec.scala | 11 +++---- 3 files changed, 29 insertions(+), 30 deletions(-) diff --git a/akka-cluster/src/main/scala/akka/cluster/CrossDcClusterHeartbeat.scala b/akka-cluster/src/main/scala/akka/cluster/CrossDcClusterHeartbeat.scala index 3e8f3f318d..2d6b1e56f1 100644 --- a/akka-cluster/src/main/scala/akka/cluster/CrossDcClusterHeartbeat.scala +++ b/akka-cluster/src/main/scala/akka/cluster/CrossDcClusterHeartbeat.scala @@ -58,8 +58,7 @@ private[cluster] final class CrossDcHeartbeatSender extends Actor with ActorLogg selfDataCenter, crossDcFailureDetector, crossDcSettings.NrOfMonitoringActors, - SortedSet.empty - ) + SortedSet.empty) // start periodic heartbeat to other nodes in cluster val heartbeatTask = scheduler.schedule( @@ -125,10 +124,11 @@ private[cluster] final class CrossDcHeartbeatSender extends Actor with ActorLogg val nodes = snapshot.members val nrOfMonitoredNodes = crossDcSettings.NrOfMonitoringActors dataCentersState = CrossDcHeartbeatingState.init(selfDataCenter, crossDcFailureDetector, nrOfMonitoredNodes, nodes) + becomeActiveIfResponsibleForHeartbeat() } def addMember(m: Member): Unit = - if (m.status != MemberStatus.Joining && m.status != MemberStatus.WeaklyUp) { + if (CrossDcHeartbeatingState.atLeastInUpState(m)) { // since we only monitor nodes in Up or later states, due to the n-th oldest requirement dataCentersState = dataCentersState.addMember(m) if (verboseHeartbeat && m.dataCenter != selfDataCenter) @@ -194,7 +194,7 @@ private[cluster] final class CrossDcHeartbeatSender extends Actor with ActorLogg @InternalApi private[akka] object CrossDcHeartbeatSender { - // -- messages intended only for local messaging during testing -- + // -- messages intended only for local messaging during testing -- sealed trait InspectionCommand extends NoSerializationVerificationNeeded final case class ReportStatus() @@ -202,7 +202,7 @@ private[akka] object CrossDcHeartbeatSender { sealed trait MonitoringStateReport extends StatusReport final case class MonitoringActive(state: CrossDcHeartbeatingState) extends MonitoringStateReport final case class MonitoringDormant() extends MonitoringStateReport - // -- end of messages intended only for local messaging during testing -- + // -- end of messages intended only for local messaging during testing -- } /** INTERNAL API */ @@ -219,9 +219,7 @@ private[cluster] final case class CrossDcHeartbeatingState( * Only the `nrOfMonitoredNodesPerDc`-oldest nodes in each DC fulfil this role. */ def shouldActivelyMonitorNodes(selfDc: ClusterSettings.DataCenter, selfAddress: UniqueAddress): Boolean = { - /** Since we need ordering of oldests guaranteed, we must only look at Up (or Leaving, Exiting...) nodes */ - def atLeastInUpState(m: Member): Boolean = - m.status != MemberStatus.WeaklyUp && m.status != MemberStatus.Joining + // Since we need ordering of oldests guaranteed, we must only look at Up (or Leaving, Exiting...) nodes val selfDcNeighbours: SortedSet[Member] = state.getOrElse(selfDc, emptyMembersSortedSet) val selfDcOldOnes = selfDcNeighbours.filter(atLeastInUpState).take(nrOfMonitoredNodesPerDc) @@ -244,12 +242,12 @@ private[cluster] final case class CrossDcHeartbeatingState( val updatedState = this.copy(state = state.updated(dc, updatedMembers)) // guarding against the case of two members having the same upNumber, in which case the activeReceivers - // which are based on the ageOrdering could actually have changed by adding a node. In practice this - // should happen rarely, since upNumbers are assigned sequentially, and we only ever compare nodes - // in the same DC. If it happens though, we need to remove the previously monitored node from the failure + // which are based on the ageOrdering could actually have changed by adding a node. In practice this + // should happen rarely, since upNumbers are assigned sequentially, and we only ever compare nodes + // in the same DC. If it happens though, we need to remove the previously monitored node from the failure // detector, to prevent both a resource leak and that node actually appearing as unreachable in the gossip (!) val stoppedMonitoringReceivers = updatedState.activeReceiversIn(dc) diff this.activeReceiversIn(dc) - stoppedMonitoringReceivers.foreach(m ⇒ failureDetector.remove(m.address)) // at most one element difference + stoppedMonitoringReceivers.foreach(m ⇒ failureDetector.remove(m.address)) // at most one element difference updatedState } @@ -263,7 +261,7 @@ private[cluster] final case class CrossDcHeartbeatingState( failureDetector.remove(m.address) copy(state = state.updated(dc, updatedMembers)) case None ⇒ - this // no change needed, was certainly not present (not even its DC was) + this // no change needed, was certainly not present (not even its DC was) } } @@ -274,8 +272,7 @@ private[cluster] final case class CrossDcHeartbeatingState( allOtherNodes.flatMap( _.take(nrOfMonitoredNodesPerDc) - .map(_.uniqueAddress)(breakOut) - ).toSet + .map(_.uniqueAddress)(breakOut)).toSet } /** Lists addresses in diven DataCenter that this node should send heartbeats to */ @@ -310,6 +307,10 @@ private[cluster] object CrossDcHeartbeatingState { /** Sorted by age */ private def emptyMembersSortedSet: SortedSet[Member] = SortedSet.empty[Member](Member.ageOrdering) + // Since we need ordering of oldests guaranteed, we must only look at Up (or Leaving, Exiting...) nodes + def atLeastInUpState(m: Member): Boolean = + m.status != MemberStatus.WeaklyUp && m.status != MemberStatus.Joining + def init( selfDataCenter: DataCenter, crossDcFailureDetector: FailureDetectorRegistry[Address], @@ -321,7 +322,7 @@ private[cluster] object CrossDcHeartbeatingState { nrOfMonitoredNodesPerDc, state = { // TODO unduplicate this with the logic in MembershipState.ageSortedTopOldestMembersPerDc - val groupedByDc = members.groupBy(_.dataCenter) + val groupedByDc = members.filter(atLeastInUpState).groupBy(_.dataCenter) if (members.ordering == Member.ageOrdering) { // we already have the right ordering diff --git a/akka-cluster/src/multi-jvm/scala/akka/cluster/MultiDcHeartbeatTakingOverSpec.scala b/akka-cluster/src/multi-jvm/scala/akka/cluster/MultiDcHeartbeatTakingOverSpec.scala index 60241fb62d..5a3c312da4 100644 --- a/akka-cluster/src/multi-jvm/scala/akka/cluster/MultiDcHeartbeatTakingOverSpec.scala +++ b/akka-cluster/src/multi-jvm/scala/akka/cluster/MultiDcHeartbeatTakingOverSpec.scala @@ -16,7 +16,7 @@ import scala.concurrent.duration._ object MultiDcHeartbeatTakingOverSpecMultiJvmSpec extends MultiNodeConfig { val first = role("first") // alpha - val second = role("second") // alpha + val second = role("second") // alpha val third = role("third") // alpha val fourth = role("fourth") // beta @@ -40,15 +40,15 @@ object MultiDcHeartbeatTakingOverSpecMultiJvmSpec extends MultiNodeConfig { """ akka { actor.provider = cluster - + loggers = ["akka.testkit.TestEventListener"] loglevel = INFO - + remote.log-remote-lifecycle-events = off - + cluster { debug.verbose-heartbeat-logging = off - + multi-data-center { cross-data-center-connections = 2 } @@ -136,7 +136,7 @@ abstract class MultiDcHeartbeatTakingOverSpec extends MultiNodeSpec(MultiDcHeart "other node must become oldest when current DC-oldest Leaves" taggedAs LongRunningTest in { val observer = TestProbe("alpha-observer-prime") - // we leave one of the current oldest nodes of the `alpha` DC, + // we leave one of the current oldest nodes of the `alpha` DC, // since it has 3 members the "not yet oldest" one becomes oldest and should start monitoring across datacenter val preLeaveOldestAlphaRole = expectedAlphaHeartbeaterRoles.head val preLeaveOldestAlphaAddress = expectedAlphaHeartbeaterNodes.find(_.address.port.get == preLeaveOldestAlphaRole.port.get).get.address @@ -182,13 +182,12 @@ abstract class MultiDcHeartbeatTakingOverSpec extends MultiNodeSpec(MultiDcHeart */ private def membersByAge(): immutable.SortedSet[Member] = SortedSet.empty(Member.ageOrdering) - .union(cluster.state.members.filter(m ⇒ m.status != MemberStatus.WeaklyUp && m.status != MemberStatus.WeaklyUp)) + .union(cluster.state.members.filter(m ⇒ m.status != MemberStatus.Joining && m.status != MemberStatus.WeaklyUp)) /** INTERNAL API */ @InternalApi private[cluster] def takeNOldestMembers(memberFilter: Member ⇒ Boolean, n: Int): immutable.SortedSet[Member] = membersByAge() - .filter(m ⇒ m.status != MemberStatus.Joining && m.status != MemberStatus.WeaklyUp) .filter(memberFilter) .take(n) diff --git a/akka-cluster/src/multi-jvm/scala/akka/cluster/MultiDcSunnyWeatherSpec.scala b/akka-cluster/src/multi-jvm/scala/akka/cluster/MultiDcSunnyWeatherSpec.scala index fec90f9720..e13f2bc691 100644 --- a/akka-cluster/src/multi-jvm/scala/akka/cluster/MultiDcSunnyWeatherSpec.scala +++ b/akka-cluster/src/multi-jvm/scala/akka/cluster/MultiDcSunnyWeatherSpec.scala @@ -38,15 +38,15 @@ object MultiDcSunnyWeatherMultiJvmSpec extends MultiNodeConfig { """ akka { actor.provider = cluster - + loggers = ["akka.testkit.TestEventListener"] loglevel = INFO - + remote.log-remote-lifecycle-events = off - + cluster { debug.verbose-heartbeat-logging = off - + multi-data-center { cross-data-center-connections = 2 } @@ -149,13 +149,12 @@ abstract class MultiDcSunnyWeatherSpec extends MultiNodeSpec(MultiDcSunnyWeather */ private def membersByAge(): immutable.SortedSet[Member] = SortedSet.empty(Member.ageOrdering) - .union(cluster.state.members.filter(m ⇒ m.status != MemberStatus.WeaklyUp && m.status != MemberStatus.WeaklyUp)) + .union(cluster.state.members.filter(m ⇒ m.status != MemberStatus.Joining && m.status != MemberStatus.WeaklyUp)) /** INTERNAL API */ @InternalApi private[cluster] def takeNOldestMembers(memberFilter: Member ⇒ Boolean, n: Int): immutable.SortedSet[Member] = membersByAge() - .filter(m ⇒ m.status != MemberStatus.Joining && m.status != MemberStatus.WeaklyUp) .filter(memberFilter) .take(n) From e3aada5016dd029fc7da47860ee9cbc42e2e93c7 Mon Sep 17 00:00:00 2001 From: Patrik Nordwall Date: Tue, 22 Aug 2017 15:02:27 +0200 Subject: [PATCH 30/34] Connect the dots for cross-dc reachability, #23377 * the crossDcFailureDetector was not connected to the reachability table * additional test by listen for {Reachable/Unreachable}DataCenter events in split spec * missing Java API for getUnreachableDataCenters in CurrentClusterState --- .../scala/akka/cluster/ClusterDaemon.scala | 18 +++- .../scala/akka/cluster/ClusterEvent.scala | 12 ++- .../akka/cluster/MultiDcSplitBrainSpec.scala | 88 +++++++++++++++---- 3 files changed, 91 insertions(+), 27 deletions(-) diff --git a/akka-cluster/src/main/scala/akka/cluster/ClusterDaemon.scala b/akka-cluster/src/main/scala/akka/cluster/ClusterDaemon.scala index b16fe4328e..dd6fc1e34d 100644 --- a/akka-cluster/src/main/scala/akka/cluster/ClusterDaemon.scala +++ b/akka-cluster/src/main/scala/akka/cluster/ClusterDaemon.scala @@ -281,7 +281,7 @@ private[cluster] class ClusterCoreDaemon(publisher: ActorRef) extends Actor with import MembershipState._ val cluster = Cluster(context.system) - import cluster.{ selfAddress, selfRoles, scheduler, failureDetector } + import cluster.{ selfAddress, selfRoles, scheduler, failureDetector, crossDcFailureDetector } import cluster.settings._ import cluster.InfoLogger._ @@ -606,6 +606,7 @@ private[cluster] class ClusterCoreDaemon(publisher: ActorRef) extends Actor with case None ⇒ // remove the node from the failure detector failureDetector.remove(joiningNode.address) + crossDcFailureDetector.remove(joiningNode.address) // add joining node as Joining // add self in case someone else joins before self has joined (Set discards duplicates) @@ -859,7 +860,11 @@ private[cluster] class ClusterCoreDaemon(publisher: ActorRef) extends Actor with // for all new joining nodes we remove them from the failure detector latestGossip.members foreach { - node ⇒ if (node.status == Joining && !localGossip.members(node)) failureDetector.remove(node.address) + node ⇒ + if (node.status == Joining && !localGossip.members(node)) { + failureDetector.remove(node.address) + crossDcFailureDetector.remove(node.address) + } } log.debug("Cluster Node [{}] - Receiving gossip from [{}]", selfAddress, from) @@ -1133,15 +1138,20 @@ private[cluster] class ClusterCoreDaemon(publisher: ActorRef) extends Actor with val localOverview = localGossip.overview val localMembers = localGossip.members + def isAvailable(member: Member): Boolean = { + if (member.dataCenter == SelfDataCenter) failureDetector.isAvailable(member.address) + else crossDcFailureDetector.isAvailable(member.address) + } + val newlyDetectedUnreachableMembers = localMembers filterNot { member ⇒ member.uniqueAddress == selfUniqueAddress || localOverview.reachability.status(selfUniqueAddress, member.uniqueAddress) == Reachability.Unreachable || localOverview.reachability.status(selfUniqueAddress, member.uniqueAddress) == Reachability.Terminated || - failureDetector.isAvailable(member.address) + isAvailable(member) } val newlyDetectedReachableMembers = localOverview.reachability.allUnreachableFrom(selfUniqueAddress) collect { - case node if node != selfUniqueAddress && failureDetector.isAvailable(node.address) ⇒ + case node if node != selfUniqueAddress && isAvailable(localGossip.member(node)) ⇒ localGossip.member(node) } diff --git a/akka-cluster/src/main/scala/akka/cluster/ClusterEvent.scala b/akka-cluster/src/main/scala/akka/cluster/ClusterEvent.scala index 18ad6853c6..e4e769c79c 100644 --- a/akka-cluster/src/main/scala/akka/cluster/ClusterEvent.scala +++ b/akka-cluster/src/main/scala/akka/cluster/ClusterEvent.scala @@ -73,8 +73,7 @@ object ClusterEvent { cs.unreachable, cs.seenBy, cs.leader, - cs.roleLeaderMap - )) + cs.roleLeaderMap)) } @@ -117,6 +116,12 @@ object ClusterEvent { def getUnreachable: java.util.Set[Member] = scala.collection.JavaConverters.setAsJavaSetConverter(unreachable).asJava + /** + * Java API: All data centers in the cluster + */ + def getUnreachableDataCenters: java.util.Set[String] = + scala.collection.JavaConverters.setAsJavaSetConverter(unreachableDataCenters).asJava + /** * Java API: get current “seen-by” set. */ @@ -556,8 +561,7 @@ private[cluster] final class ClusterDomainEventPublisher extends Actor with Acto leader = membershipState.leader.map(_.address), roleLeaderMap = membershipState.latestGossip.allRoles.map(r ⇒ r → membershipState.roleLeader(r).map(_.address))(collection.breakOut), - unreachableDataCenters - ) + unreachableDataCenters) receiver ! state } diff --git a/akka-cluster/src/multi-jvm/scala/akka/cluster/MultiDcSplitBrainSpec.scala b/akka-cluster/src/multi-jvm/scala/akka/cluster/MultiDcSplitBrainSpec.scala index 458d5ab03d..036d997209 100644 --- a/akka-cluster/src/multi-jvm/scala/akka/cluster/MultiDcSplitBrainSpec.scala +++ b/akka-cluster/src/multi-jvm/scala/akka/cluster/MultiDcSplitBrainSpec.scala @@ -3,9 +3,11 @@ */ package akka.cluster +import akka.cluster.ClusterEvent.{ CurrentClusterState, DataCenterReachabilityEvent, ReachableDataCenter, UnreachableDataCenter } import akka.remote.testconductor.RoleName import akka.remote.testkit.{ MultiNodeConfig, MultiNodeSpec } import akka.remote.transport.ThrottlerTransportAdapter.Direction +import akka.testkit.TestProbe import com.typesafe.config.ConfigFactory import scala.concurrent.duration._ @@ -19,7 +21,12 @@ object MultiDcSplitBrainMultiJvmSpec extends MultiNodeConfig { commonConfig(ConfigFactory.parseString( """ akka.loglevel = INFO - akka.cluster.run-coordinated-shutdown-when-down = off + akka.cluster.multi-data-center { + failure-detector { + acceptable-heartbeat-pause = 4s + heartbeat-interval = 1s + } + } """).withFallback(MultiNodeClusterSpec.clusterConfig)) nodeConfig(first, second)(ConfigFactory.parseString( @@ -48,28 +55,75 @@ abstract class MultiDcSplitBrainSpec val dc1 = List(first, second) val dc2 = List(third, fourth) + var barrierCounter = 0 + + def splitDataCenters(notMembers: Set[RoleName]): Unit = { + val memberNodes = (dc1 ++ dc2).filterNot(notMembers) + val probe = TestProbe() + runOn(memberNodes: _*) { + cluster.subscribe(probe.ref, classOf[DataCenterReachabilityEvent]) + probe.expectMsgType[CurrentClusterState] + } + enterBarrier(s"split-$barrierCounter") + barrierCounter += 1 - def splitDataCenters(dc1: Seq[RoleName], dc2: Seq[RoleName]): Unit = { runOn(first) { - for { - dc1Node ← dc1 - dc2Node ← dc2 - } { + for (dc1Node ← dc1; dc2Node ← dc2) { testConductor.blackhole(dc1Node, dc2Node, Direction.Both).await } } + + enterBarrier(s"after-split-$barrierCounter") + barrierCounter += 1 + + runOn(memberNodes: _*) { + probe.expectMsgType[UnreachableDataCenter](15.seconds) + cluster.unsubscribe(probe.ref) + runOn(dc1: _*) { + awaitAssert { + cluster.state.unreachableDataCenters should ===(Set("dc2")) + } + } + runOn(dc2: _*) { + awaitAssert { + cluster.state.unreachableDataCenters should ===(Set("dc1")) + } + } + cluster.state.unreachable should ===(Set.empty) + } + enterBarrier(s"after-split-verified-$barrierCounter") + barrierCounter += 1 } - def unsplitDataCenters(dc1: Seq[RoleName], dc2: Seq[RoleName]): Unit = { + def unsplitDataCenters(notMembers: Set[RoleName]): Unit = { + val memberNodes = (dc1 ++ dc2).filterNot(notMembers) + val probe = TestProbe() + runOn(memberNodes: _*) { + cluster.subscribe(probe.ref, classOf[DataCenterReachabilityEvent]) + probe.expectMsgType[CurrentClusterState] + } + enterBarrier(s"unsplit-$barrierCounter") + barrierCounter += 1 + runOn(first) { - for { - dc1Node ← dc1 - dc2Node ← dc2 - } { + for (dc1Node ← dc1; dc2Node ← dc2) { testConductor.passThrough(dc1Node, dc2Node, Direction.Both).await } } + enterBarrier(s"after-unsplit-$barrierCounter") + barrierCounter += 1 + + runOn(memberNodes: _*) { + probe.expectMsgType[ReachableDataCenter](15.seconds) + cluster.unsubscribe(probe.ref) + awaitAssert { + cluster.state.unreachableDataCenters should ===(Set.empty) + } + } + enterBarrier(s"after-unsplit-verified-$barrierCounter") + barrierCounter += 1 + } "A cluster with multiple data centers" must { @@ -79,8 +133,7 @@ abstract class MultiDcSplitBrainSpec "be able to have a data center member join while there is inter data center split" in within(20.seconds) { // introduce a split between data centers - splitDataCenters(dc1 = List(first, second), dc2 = List(third)) - enterBarrier("data-center-split-1") + splitDataCenters(notMembers = Set(fourth)) runOn(fourth) { cluster.join(third) @@ -96,8 +149,7 @@ abstract class MultiDcSplitBrainSpec } enterBarrier("dc2-join-completed") - unsplitDataCenters(dc1 = List(first, second), dc2 = List(third)) - enterBarrier("data-center-unsplit-1") + unsplitDataCenters(notMembers = Set.empty) runOn(dc1: _*) { awaitAssert(clusterView.members.collect { @@ -109,8 +161,7 @@ abstract class MultiDcSplitBrainSpec } "be able to have data center member leave while there is inter data center split" in within(20.seconds) { - splitDataCenters(dc1, dc2) - enterBarrier("data-center-split-2") + splitDataCenters(notMembers = Set.empty) runOn(fourth) { cluster.leave(fourth) @@ -121,8 +172,7 @@ abstract class MultiDcSplitBrainSpec } enterBarrier("node-4-left") - unsplitDataCenters(dc1, List(third)) - enterBarrier("data-center-unsplit-2") + unsplitDataCenters(notMembers = Set(fourth)) runOn(first, second) { awaitAssert(clusterView.members.filter(_.address == address(fourth)) should ===(Set.empty)) From 6bfb7c92626210ae6dbe95464f6bccba3fa62ec7 Mon Sep 17 00:00:00 2001 From: Patrik Nordwall Date: Mon, 28 Aug 2017 14:40:56 +0200 Subject: [PATCH 31/34] increase timeout in MultiDcSplitBrainSpec * due to handshake timeout reduce handshake timeout fourth might generate UnreachableDataCenter in unsplit MultiDcClusterSharding --- .../cluster/sharding/MultiDcClusterShardingSpec.scala | 4 +++- .../scala/akka/cluster/MultiDcSplitBrainSpec.scala | 9 ++++++--- 2 files changed, 9 insertions(+), 4 deletions(-) diff --git a/akka-cluster-sharding/src/multi-jvm/scala/akka/cluster/sharding/MultiDcClusterShardingSpec.scala b/akka-cluster-sharding/src/multi-jvm/scala/akka/cluster/sharding/MultiDcClusterShardingSpec.scala index 7a45b08500..65d8c30282 100644 --- a/akka-cluster-sharding/src/multi-jvm/scala/akka/cluster/sharding/MultiDcClusterShardingSpec.scala +++ b/akka-cluster-sharding/src/multi-jvm/scala/akka/cluster/sharding/MultiDcClusterShardingSpec.scala @@ -55,7 +55,9 @@ object MultiDcClusterShardingSpecConfig extends MultiNodeConfig { val fourth = role("fourth") commonConfig(ConfigFactory.parseString(s""" - akka.loglevel = INFO + # DEBUG because of failing test, issue #23582 + akka.loglevel = DEBUG + akka.cluster.debug.verbose-heartbeat-logging = on akka.actor.provider = "cluster" akka.remote.log-remote-lifecycle-events = off akka.cluster.auto-down-unreachable-after = 0s diff --git a/akka-cluster/src/multi-jvm/scala/akka/cluster/MultiDcSplitBrainSpec.scala b/akka-cluster/src/multi-jvm/scala/akka/cluster/MultiDcSplitBrainSpec.scala index 036d997209..c02d0f0aa3 100644 --- a/akka-cluster/src/multi-jvm/scala/akka/cluster/MultiDcSplitBrainSpec.scala +++ b/akka-cluster/src/multi-jvm/scala/akka/cluster/MultiDcSplitBrainSpec.scala @@ -20,7 +20,10 @@ object MultiDcSplitBrainMultiJvmSpec extends MultiNodeConfig { commonConfig(ConfigFactory.parseString( """ - akka.loglevel = INFO + akka.loglevel = DEBUG + akka.cluster.debug.verbose-heartbeat-logging = on + akka.remote.netty.tcp.connection-timeout = 5 s # speedup in case of connection issue + akka.remote.retry-gate-closed-for = 1 s akka.cluster.multi-data-center { failure-detector { acceptable-heartbeat-pause = 4s @@ -99,7 +102,7 @@ abstract class MultiDcSplitBrainSpec val memberNodes = (dc1 ++ dc2).filterNot(notMembers) val probe = TestProbe() runOn(memberNodes: _*) { - cluster.subscribe(probe.ref, classOf[DataCenterReachabilityEvent]) + cluster.subscribe(probe.ref, classOf[ReachableDataCenter]) probe.expectMsgType[CurrentClusterState] } enterBarrier(s"unsplit-$barrierCounter") @@ -115,7 +118,7 @@ abstract class MultiDcSplitBrainSpec barrierCounter += 1 runOn(memberNodes: _*) { - probe.expectMsgType[ReachableDataCenter](15.seconds) + probe.expectMsgType[ReachableDataCenter](25.seconds) cluster.unsubscribe(probe.ref) awaitAssert { cluster.state.unreachableDataCenters should ===(Set.empty) From a10391829a6b8904b501b07c65ad0ed5896496e9 Mon Sep 17 00:00:00 2001 From: Patrik Nordwall Date: Thu, 31 Aug 2017 10:43:42 +0200 Subject: [PATCH 32/34] remove mima changes to avoid merge conflict --- project/MiMa.scala | 27 --------------------------- 1 file changed, 27 deletions(-) diff --git a/project/MiMa.scala b/project/MiMa.scala index 92e229c589..6c15c4a27c 100644 --- a/project/MiMa.scala +++ b/project/MiMa.scala @@ -1230,33 +1230,6 @@ object MiMa extends AutoPlugin { // #23023 added a new overload with implementation to trait, so old transport implementations compiled against // older versions will be missing the method. We accept that incompatibility for now. ProblemFilters.exclude[ReversedMissingMethodProblem]("akka.remote.transport.AssociationHandle.disassociate") - ), - "2.5.3" -> Seq( - // #23231 multi-DC Sharding - ProblemFilters.exclude[IncompatibleResultTypeProblem]("akka.cluster.ddata.Replicator.leader"), - ProblemFilters.exclude[DirectMissingMethodProblem]("akka.cluster.ddata.Replicator.receiveLeaderChanged"), - ProblemFilters.exclude[IncompatibleMethTypeProblem]("akka.cluster.ddata.Replicator.leader_="), - FilterAnyProblemStartingWith("akka.cluster.sharding.ClusterShardingGuardian"), - ProblemFilters.exclude[DirectMissingMethodProblem]("akka.cluster.sharding.ShardRegion.proxyProps"), - ProblemFilters.exclude[DirectMissingMethodProblem]("akka.cluster.sharding.ShardRegion.this"), - - // #23228 single leader per cluster data center - FilterAnyProblemStartingWith("akka.cluster.Gossip"), - FilterAnyProblemStartingWith("akka.cluster.ClusterCoreDaemon"), - FilterAnyProblemStartingWith("akka.cluster.ClusterDomainEventPublisher"), - FilterAnyProblemStartingWith("akka.cluster.InternalClusterAction"), - ProblemFilters.exclude[DirectMissingMethodProblem]("akka.cluster.ClusterEvent.diffReachable"), - ProblemFilters.exclude[DirectMissingMethodProblem]("akka.cluster.ClusterEvent.diffLeader"), - ProblemFilters.exclude[DirectMissingMethodProblem]("akka.cluster.ClusterEvent.diffRolesLeader"), - ProblemFilters.exclude[DirectMissingMethodProblem]("akka.cluster.ClusterEvent.diffSeen"), - ProblemFilters.exclude[IncompatibleMethTypeProblem]("akka.cluster.ClusterEvent.diffReachability"), - ProblemFilters.exclude[DirectMissingMethodProblem]("akka.cluster.ClusterEvent.diffUnreachable"), - ProblemFilters.exclude[IncompatibleMethTypeProblem]("akka.cluster.ClusterEvent.diffMemberEvents"), - ProblemFilters.exclude[ReversedMissingMethodProblem]("akka.cluster.protobuf.msg.ClusterMessages#GossipOrBuilder.getTombstonesCount"), - ProblemFilters.exclude[ReversedMissingMethodProblem]("akka.cluster.protobuf.msg.ClusterMessages#GossipOrBuilder.getTombstones"), - ProblemFilters.exclude[ReversedMissingMethodProblem]("akka.cluster.protobuf.msg.ClusterMessages#GossipOrBuilder.getTombstonesList"), - ProblemFilters.exclude[ReversedMissingMethodProblem]("akka.cluster.protobuf.msg.ClusterMessages#GossipOrBuilder.getTombstonesOrBuilderList"), - ProblemFilters.exclude[ReversedMissingMethodProblem]("akka.cluster.protobuf.msg.ClusterMessages#GossipOrBuilder.getTombstonesOrBuilder") ) ) From 0ed5bc183587f8c194eb2fd3bca1a3678b4112a8 Mon Sep 17 00:00:00 2001 From: Patrik Nordwall Date: Thu, 31 Aug 2017 11:29:49 +0200 Subject: [PATCH 33/34] add mima filters --- .../mima-filters/2.5.4.backwards.excludes | 4 ++++ .../mima-filters/2.5.4.backwards.excludes | 20 ++++++++++++++++++- .../mima-filters/2.5.4.backwards.excludes | 4 ++++ 3 files changed, 27 insertions(+), 1 deletion(-) create mode 100644 akka-cluster-sharding/src/main/mima-filters/2.5.4.backwards.excludes create mode 100644 akka-distributed-data/src/main/mima-filters/2.5.4.backwards.excludes diff --git a/akka-cluster-sharding/src/main/mima-filters/2.5.4.backwards.excludes b/akka-cluster-sharding/src/main/mima-filters/2.5.4.backwards.excludes new file mode 100644 index 0000000000..4e29c3851f --- /dev/null +++ b/akka-cluster-sharding/src/main/mima-filters/2.5.4.backwards.excludes @@ -0,0 +1,4 @@ +# #23231 multi-DC Sharding +ProblemFilters.exclude[Problem]("akka.cluster.sharding.ClusterShardingGuardian*") +ProblemFilters.exclude[DirectMissingMethodProblem]("akka.cluster.sharding.ShardRegion.proxyProps") +ProblemFilters.exclude[DirectMissingMethodProblem]("akka.cluster.sharding.ShardRegion.this") diff --git a/akka-cluster/src/main/mima-filters/2.5.4.backwards.excludes b/akka-cluster/src/main/mima-filters/2.5.4.backwards.excludes index 51edebc818..16bf45f3b5 100644 --- a/akka-cluster/src/main/mima-filters/2.5.4.backwards.excludes +++ b/akka-cluster/src/main/mima-filters/2.5.4.backwards.excludes @@ -4,4 +4,22 @@ ProblemFilters.exclude[ReversedMissingMethodProblem]("akka.cluster.protobuf.msg. ProblemFilters.exclude[ReversedMissingMethodProblem]("akka.cluster.protobuf.msg.ClusterMessages#ClusterRouterPoolSettingsOrBuilder.getUseRolesCount") ProblemFilters.exclude[ReversedMissingMethodProblem]("akka.cluster.protobuf.msg.ClusterMessages#ClusterRouterPoolSettingsOrBuilder.getUseRolesList") ProblemFilters.exclude[DirectMissingMethodProblem]("akka.cluster.routing.ClusterRouterSettingsBase.useRole") -ProblemFilters.exclude[ReversedMissingMethodProblem]("akka.cluster.routing.ClusterRouterSettingsBase.useRoles") \ No newline at end of file +ProblemFilters.exclude[ReversedMissingMethodProblem]("akka.cluster.routing.ClusterRouterSettingsBase.useRoles") + +# #23228 single leader per cluster data center +ProblemFilters.exclude[Problem]("akka.cluster.Gossip*") +ProblemFilters.exclude[Problem]("akka.cluster.ClusterCoreDaemon*") +ProblemFilters.exclude[Problem]("akka.cluster.ClusterDomainEventPublisher*") +ProblemFilters.exclude[Problem]("akka.cluster.InternalClusterAction*") +ProblemFilters.exclude[DirectMissingMethodProblem]("akka.cluster.ClusterEvent.diffReachable") +ProblemFilters.exclude[DirectMissingMethodProblem]("akka.cluster.ClusterEvent.diffLeader") +ProblemFilters.exclude[DirectMissingMethodProblem]("akka.cluster.ClusterEvent.diffRolesLeader") +ProblemFilters.exclude[DirectMissingMethodProblem]("akka.cluster.ClusterEvent.diffSeen") +ProblemFilters.exclude[IncompatibleMethTypeProblem]("akka.cluster.ClusterEvent.diffReachability") +ProblemFilters.exclude[DirectMissingMethodProblem]("akka.cluster.ClusterEvent.diffUnreachable") +ProblemFilters.exclude[IncompatibleMethTypeProblem]("akka.cluster.ClusterEvent.diffMemberEvents") +ProblemFilters.exclude[ReversedMissingMethodProblem]("akka.cluster.protobuf.msg.ClusterMessages#GossipOrBuilder.getTombstonesCount") +ProblemFilters.exclude[ReversedMissingMethodProblem]("akka.cluster.protobuf.msg.ClusterMessages#GossipOrBuilder.getTombstones") +ProblemFilters.exclude[ReversedMissingMethodProblem]("akka.cluster.protobuf.msg.ClusterMessages#GossipOrBuilder.getTombstonesList") +ProblemFilters.exclude[ReversedMissingMethodProblem]("akka.cluster.protobuf.msg.ClusterMessages#GossipOrBuilder.getTombstonesOrBuilderList") +ProblemFilters.exclude[ReversedMissingMethodProblem]("akka.cluster.protobuf.msg.ClusterMessages#GossipOrBuilder.getTombstonesOrBuilder") diff --git a/akka-distributed-data/src/main/mima-filters/2.5.4.backwards.excludes b/akka-distributed-data/src/main/mima-filters/2.5.4.backwards.excludes new file mode 100644 index 0000000000..5154d81574 --- /dev/null +++ b/akka-distributed-data/src/main/mima-filters/2.5.4.backwards.excludes @@ -0,0 +1,4 @@ +# #23231 multi-DC Sharding +ProblemFilters.exclude[IncompatibleResultTypeProblem]("akka.cluster.ddata.Replicator.leader") +ProblemFilters.exclude[DirectMissingMethodProblem]("akka.cluster.ddata.Replicator.receiveLeaderChanged") +ProblemFilters.exclude[IncompatibleMethTypeProblem]("akka.cluster.ddata.Replicator.leader_=") From 5212661705c8a580836cc2247e06aba8e3629854 Mon Sep 17 00:00:00 2001 From: Patrik Nordwall Date: Thu, 31 Aug 2017 13:08:25 +0200 Subject: [PATCH 34/34] fix snip of ClusterShardingTest, moved --- akka-docs/src/main/paradox/scala/cluster-dc.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/akka-docs/src/main/paradox/scala/cluster-dc.md b/akka-docs/src/main/paradox/scala/cluster-dc.md index 6461ef802e..4eec74de19 100644 --- a/akka-docs/src/main/paradox/scala/cluster-dc.md +++ b/akka-docs/src/main/paradox/scala/cluster-dc.md @@ -181,7 +181,7 @@ Scala : @@snip [ClusterShardingSpec.scala]($akka$/akka-cluster-sharding/src/multi-jvm/scala/akka/cluster/sharding/ClusterShardingSpec.scala) { #proxy-dc } Java -: @@snip [ClusterShardingTest.java]($akka$/akka-cluster-sharding/src/test/java/akka/cluster/sharding/ClusterShardingTest.java) { #proxy-dc } +: @@snip [ClusterShardingTest.java]($code$/java/jdocs/sharding/ClusterShardingTest.java) { #proxy-dc } Another way to manage global entities is to make sure that certain entity ids are located in only one data center by routing the messages to the right region. For example, the routing function