Merge paradox/scala/cluster-singleton.md and java/cluster-singleton.md (#23184)

This commit is contained in:
Richard Imaoka 2017-06-20 16:27:08 +09:00 committed by Arnout Engelen
parent e93ae613d1
commit ce96053dcd
6 changed files with 162 additions and 165 deletions

View file

@ -49,16 +49,20 @@ object ClusterSingletonManagerSpec extends MultiNodeConfig {
nodeConfig(first, second, third, fourth, fifth, sixth)(
ConfigFactory.parseString("akka.cluster.roles =[worker]"))
//#singleton-message-classes
object PointToPointChannel {
case object UnregistrationOk
//#singleton-message-classes
case object RegisterConsumer
case object UnregisterConsumer
case object RegistrationOk
case object UnexpectedRegistration
case object UnregistrationOk
case object UnexpectedUnregistration
case object Reset
case object ResetOk
//#singleton-message-classes
}
//#singleton-message-classes
/**
* This channel is extremely strict with regards to
@ -105,12 +109,14 @@ object ClusterSingletonManagerSpec extends MultiNodeConfig {
}
}
//#singleton-message-classes
object Consumer {
case object End
case object GetCurrent
case object Ping
case object Pong
}
//#singleton-message-classes
/**
* The Singleton actor
@ -136,8 +142,8 @@ object ClusterSingletonManagerSpec extends MultiNodeConfig {
case n: Int
current = n
delegateTo ! n
case x @ (RegistrationOk | UnexpectedRegistration)
delegateTo ! x
case message @ (RegistrationOk | UnexpectedRegistration)
delegateTo ! message
case GetCurrent
sender() ! current
//#consumer-end

View file

@ -19,22 +19,21 @@ public class ClusterSingletonManagerTest {
//#create-singleton-manager
final ClusterSingletonManagerSettings settings =
ClusterSingletonManagerSettings.create(system).withRole("worker");
system.actorOf(ClusterSingletonManager.props(
Props.create(Consumer.class, queue, testActor),
new End(), settings), "consumer");
system.actorOf(
ClusterSingletonManager.props(
Props.create(Consumer.class, () -> new Consumer(queue, testActor)),
TestSingletonMessages.end(),
settings),
"consumer");
//#create-singleton-manager
//#create-singleton-proxy
ClusterSingletonProxySettings proxySettings =
ClusterSingletonProxySettings.create(system).withRole("worker");
system.actorOf(ClusterSingletonProxy.props("/user/consumer", proxySettings),
system.actorOf(ClusterSingletonProxy.props("/user/consumer", proxySettings),
"consumerProxy");
//#create-singleton-proxy
}
public static class End {
}
public static class Consumer {
}
}

View file

@ -0,0 +1,73 @@
/**
* Copyright (C) 2009-2017 Lightbend Inc. <http://www.lightbend.com>
*/
package akka.cluster.singleton;
import akka.actor.AbstractActor;
import akka.actor.ActorRef;
import akka.event.Logging;
import akka.event.LoggingAdapter;
import akka.cluster.singleton.TestSingletonMessages.*;
public class Consumer extends AbstractActor {
private final LoggingAdapter log = Logging.getLogger(getContext().getSystem(), this);
ActorRef queue;
ActorRef delegateTo;
int current = 0;
boolean stoppedBeforeUnregistration = true;
public Consumer(ActorRef _queue, ActorRef _delegateTo) {
queue = _queue;
delegateTo = _delegateTo;
}
@Override
public void preStart() {
queue.tell(TestSingletonMessages.registerConsumer(), getSelf());
}
@Override
public void postStop() {
if (stoppedBeforeUnregistration)
log.warning("Stopped before unregistration");
}
@Override
public Receive createReceive() {
return receiveBuilder()
.match(Integer.class, n -> {
if(n <= current)
getContext().stop(self());
else {
current = n;
delegateTo.tell(n, getSelf());
}
})
.match(RegistrationOk.class, message ->
delegateTo.tell(message, getSelf())
)
.match(UnexpectedRegistration.class, message ->
delegateTo.tell(message, getSelf())
)
.match(GetCurrent.class, message ->
getSender().tell(current, getSelf())
)
//#consumer-end
.match(End.class, message ->
queue.tell(UnregisterConsumer.class, getSelf())
)
.match(UnregistrationOk.class, message -> {
stoppedBeforeUnregistration = false;
getContext().stop(getSelf());
}
)
.match(Ping.class, message ->
getSender().tell(TestSingletonMessages.pong(), getSelf())
)
//#consumer-end
.build();
}
}

View file

@ -0,0 +1,40 @@
/**
* Copyright (C) 2009-2017 Lightbend Inc. <http://www.lightbend.com>
*/
package akka.cluster.singleton;
//#singleton-message-classes
public class TestSingletonMessages {
public static class UnregistrationOk{}
public static class End{}
public static class Ping{}
public static class Pong{}
public static class GetCurrent{}
//#singleton-message-classes
public static class RegisterConsumer{}
public static class UnregisterConsumer{}
public static class RegistrationOk{}
public static class UnexpectedRegistration{}
public static class UnexpectedUnregistration{}
public static class Reset{}
public static class ResetOk{}
public static RegisterConsumer registerConsumer() { return new RegisterConsumer(); }
public static UnregisterConsumer unregisterConsumer() { return new UnregisterConsumer(); }
public static RegistrationOk registrationOk() { return new RegistrationOk(); }
public static UnexpectedRegistration unexpectedRegistration() { return new UnexpectedRegistration(); }
public static UnexpectedUnregistration unexpectedUnregistration() { return new UnexpectedUnregistration(); }
public static Reset reset() { return new Reset(); }
public static ResetOk resetOk() { return new ResetOk(); }
//#singleton-message-classes
public static UnregistrationOk unregistrationOk() { return new UnregistrationOk(); }
public static End end() { return new End(); }
public static Ping ping() { return new Ping(); }
public static Pong pong() { return new Pong(); }
public static GetCurrent getCurrent() { return new GetCurrent(); }
}
//#singleton-message-classes

View file

@ -1,144 +0,0 @@
# Cluster Singleton
For some use cases it is convenient and sometimes also mandatory to ensure that
you have exactly one actor of a certain type running somewhere in the cluster.
Some examples:
* single point of responsibility for certain cluster-wide consistent decisions, or
coordination of actions across the cluster system
* single entry point to an external system
* single master, many workers
* centralized naming service, or routing logic
Using a singleton should not be the first design choice. It has several drawbacks,
such as single-point of bottleneck. Single-point of failure is also a relevant concern,
but for some cases this feature takes care of that by making sure that another singleton
instance will eventually be started.
The cluster singleton pattern is implemented by `akka.cluster.singleton.ClusterSingletonManager`.
It manages one singleton actor instance among all cluster nodes or a group of nodes tagged with
a specific role. `ClusterSingletonManager` is an actor that is supposed to be started on
all nodes, or all nodes with specified role, in the cluster. The actual singleton actor is
started by the `ClusterSingletonManager` on the oldest node by creating a child actor from
supplied `Props`. `ClusterSingletonManager` makes sure that at most one singleton instance
is running at any point in time.
The singleton actor is always running on the oldest member with specified role.
The oldest member is determined by `akka.cluster.Member#isOlderThan`.
This can change when removing that member from the cluster. Be aware that there is a short time
period when there is no active singleton during the hand-over process.
The cluster failure detector will notice when oldest node becomes unreachable due to
things like JVM crash, hard shut down, or network failure. Then a new oldest node will
take over and a new singleton actor is created. For these failure scenarios there will
not be a graceful hand-over, but more than one active singletons is prevented by all
reasonable means. Some corner cases are eventually resolved by configurable timeouts.
You can access the singleton actor by using the provided `akka.cluster.singleton.ClusterSingletonProxy`,
which will route all messages to the current instance of the singleton. The proxy will keep track of
the oldest node in the cluster and resolve the singleton's `ActorRef` by explicitly sending the
singleton's `actorSelection` the `akka.actor.Identify` message and waiting for it to reply.
This is performed periodically if the singleton doesn't reply within a certain (configurable) time.
Given the implementation, there might be periods of time during which the `ActorRef` is unavailable,
e.g., when a node leaves the cluster. In these cases, the proxy will buffer the messages sent to the
singleton and then deliver them when the singleton is finally available. If the buffer is full
the `ClusterSingletonProxy` will drop old messages when new messages are sent via the proxy.
The size of the buffer is configurable and it can be disabled by using a buffer size of 0.
It's worth noting that messages can always be lost because of the distributed nature of these actors.
As always, additional logic should be implemented in the singleton (acknowledgement) and in the
client (retry) actors to ensure at-least-once message delivery.
The singleton instance will not run on members with status @ref:[WeaklyUp](cluster-usage.md#weakly-up).
## Potential problems to be aware of
This pattern may seem to be very tempting to use at first, but it has several drawbacks, some of them are listed below:
* the cluster singleton may quickly become a *performance bottleneck*,
* you can not rely on the cluster singleton to be *non-stop* available — e.g. when the node on which the singleton has
been running dies, it will take a few seconds for this to be noticed and the singleton be migrated to another node,
* in the case of a *network partition* appearing in a Cluster that is using Automatic Downing (see docs for
@ref:[Downing](cluster-usage.md#automatic-vs-manual-downing)),
it may happen that the isolated clusters each decide to spin up their own singleton, meaning that there might be multiple
singletons running in the system, yet the Clusters have no way of finding out about them (because of the partition).
Especially the last point is something you should be aware of — in general when using the Cluster Singleton pattern
you should take care of downing nodes yourself and not rely on the timing based auto-down feature.
@@@ warning
**Don't use Cluster Singleton together with Automatic Downing**,
since it allows the cluster to split up into two separate clusters, which in turn will result
in *multiple Singletons* being started, one in each separate cluster!
@@@
## An Example
Assume that we need one single entry point to an external system. An actor that
receives messages from a JMS queue with the strict requirement that only one
JMS consumer must exist to be make sure that the messages are processed in order.
That is perhaps not how one would like to design things, but a typical real-world
scenario when integrating with external systems.
On each node in the cluster you need to start the `ClusterSingletonManager` and
supply the `Props` of the singleton actor, in this case the JMS queue consumer.
@@snip [ClusterSingletonManagerTest.java]($akka$/akka-cluster-tools/src/test/java/akka/cluster/singleton/ClusterSingletonManagerTest.java) { #create-singleton-manager }
Here we limit the singleton to nodes tagged with the `"worker"` role, but all nodes, independent of
role, can be used by not specifying `withRole`.
Here we use an application specific `terminationMessage` to be able to close the
resources before actually stopping the singleton actor. Note that `PoisonPill` is a
perfectly fine `terminationMessage` if you only need to stop the actor.
With the names given above, access to the singleton can be obtained from any cluster node using a properly
configured proxy.
@@snip [ClusterSingletonManagerTest.java]($akka$/akka-cluster-tools/src/test/java/akka/cluster/singleton/ClusterSingletonManagerTest.java) { #create-singleton-proxy }
A more comprehensive sample is available in the tutorial named [Distributed workers with Akka and Java!](https://github.com/typesafehub/activator-akka-distributed-workers-java).
## Dependencies
To use the Cluster Singleton you must add the following dependency in your project.
sbt
: @@@vars
```
"com.typesafe.akka" %% "akka-cluster-tools" % "$akka.version$"
```
@@@
Maven
: @@@vars
```
<dependency>
<groupId>com.typesafe.akka</groupId>
<artifactId>akka-cluster-tools_$scala.binary_version$</artifactId>
<version>$akka.version$</version>
</dependency>
```
@@@
## Configuration
The following configuration properties are read by the `ClusterSingletonManagerSettings`
when created with a `ActorSystem` parameter. It is also possible to amend the `ClusterSingletonManagerSettings`
or create it from another config section with the same layout as below. `ClusterSingletonManagerSettings` is
a parameter to the `ClusterSingletonManager.props` factory method, i.e. each singleton can be configured
with different settings if needed.
@@snip [reference.conf]($akka$/akka-cluster-tools/src/main/resources/reference.conf) { #singleton-config }
The following configuration properties are read by the `ClusterSingletonProxySettings`
when created with a `ActorSystem` parameter. It is also possible to amend the `ClusterSingletonProxySettings`
or create it from another config section with the same layout as below. `ClusterSingletonProxySettings` is
a parameter to the `ClusterSingletonProxy.props` factory method, i.e. each singleton proxy can be configured
with different settings if needed.
@@snip [reference.conf]($akka$/akka-cluster-tools/src/main/resources/reference.conf) { #singleton-proxy-config }

View file

@ -0,0 +1 @@
../scala/cluster-singleton.md

View file

@ -59,8 +59,8 @@ This pattern may seem to be very tempting to use at first, but it has several dr
* the cluster singleton may quickly become a *performance bottleneck*,
* you can not rely on the cluster singleton to be *non-stop* available — e.g. when the node on which the singleton has
been running dies, it will take a few seconds for this to be noticed and the singleton be migrated to another node,
* in the case of a *network partition* appearing in a Cluster that is using Automatic Downing (see Auto Downing docs for
@ref:[Downing](cluster-usage.md#automatic-vs-manual-downing)),
* in the case of a *network partition* appearing in a Cluster that is using Automatic Downing (see docs for
@ref:[Auto Downing](cluster-usage.md#automatic-vs-manual-downing)),
it may happen that the isolated clusters each decide to spin up their own singleton, meaning that there might be multiple
singletons running in the system, yet the Clusters have no way of finding out about them (because of the partition).
@ -79,32 +79,54 @@ in *multiple Singletons* being started, one in each separate cluster!
Assume that we need one single entry point to an external system. An actor that
receives messages from a JMS queue with the strict requirement that only one
JMS consumer must exist to be make sure that the messages are processed in order.
JMS consumer must exist to make sure that the messages are processed in order.
That is perhaps not how one would like to design things, but a typical real-world
scenario when integrating with external systems.
Before explaining how to create a cluster singleton actor, let's define message classes @java[and their corresponding factory methods]
which will be used by the singleton.
Scala
: @@snip [ClusterSingletonManagerSpec.scala]($akka$/akka-cluster-tools/src/multi-jvm/scala/akka/cluster/singleton/ClusterSingletonManagerSpec.scala) { #singleton-message-classes }
Java
: @@snip [ClusterSingletonManagerTest.java]($akka$/akka-cluster-tools/src/test/java/akka/cluster/singleton/TestSingletonMessages.java) { #singleton-message-classes }
On each node in the cluster you need to start the `ClusterSingletonManager` and
supply the `Props` of the singleton actor, in this case the JMS queue consumer.
@@snip [ClusterSingletonManagerSpec.scala]($akka$/akka-cluster-tools/src/multi-jvm/scala/akka/cluster/singleton/ClusterSingletonManagerSpec.scala) { #create-singleton-manager }
Scala
: @@snip [ClusterSingletonManagerSpec.scala]($akka$/akka-cluster-tools/src/multi-jvm/scala/akka/cluster/singleton/ClusterSingletonManagerSpec.scala) { #create-singleton-manager }
Java
: @@snip [ClusterSingletonManagerTest.java]($akka$/akka-cluster-tools/src/test/java/akka/cluster/singleton/ClusterSingletonManagerTest.java) { #create-singleton-manager }
Here we limit the singleton to nodes tagged with the `"worker"` role, but all nodes, independent of
role, can be used by not specifying `withRole`.
Here we use an application specific `terminationMessage` to be able to close the
We use an application specific `terminationMessage` @java[(i.e. `TestSingletonMessages.end()` message)] to be able to close the
resources before actually stopping the singleton actor. Note that `PoisonPill` is a
perfectly fine `terminationMessage` if you only need to stop the actor.
Here is how the singleton actor handles the `terminationMessage` in this example.
@@snip [ClusterSingletonManagerSpec.scala]($akka$/akka-cluster-tools/src/multi-jvm/scala/akka/cluster/singleton/ClusterSingletonManagerSpec.scala) { #consumer-end }
Scala
: @@snip [ClusterSingletonManagerSpec.scala]($akka$/akka-cluster-tools/src/multi-jvm/scala/akka/cluster/singleton/ClusterSingletonManagerSpec.scala) { #consumer-end }
Java
: @@snip [ClusterSingletonManagerTest.java]($akka$/akka-cluster-tools/src/test/java/akka/cluster/singleton/Consumer.java) { #consumer-end }
With the names given above, access to the singleton can be obtained from any cluster node using a properly
configured proxy.
@@snip [ClusterSingletonManagerSpec.scala]($akka$/akka-cluster-tools/src/multi-jvm/scala/akka/cluster/singleton/ClusterSingletonManagerSpec.scala) { #create-singleton-proxy }
Scala
: @@snip [ClusterSingletonManagerSpec.scala]($akka$/akka-cluster-tools/src/multi-jvm/scala/akka/cluster/singleton/ClusterSingletonManagerSpec.scala) { #create-singleton-proxy }
A more comprehensive sample is available in the tutorial named [Distributed workers with Akka and Scala!](https://github.com/typesafehub/activator-akka-distributed-workers).
Java
: @@snip [ClusterSingletonManagerTest.java]($akka$/akka-cluster-tools/src/test/java/akka/cluster/singleton/ClusterSingletonManagerTest.java) { #create-singleton-proxy }
A more comprehensive sample is available in the tutorial named
@scala[[Distributed workers with Akka and Scala!](https://github.com/typesafehub/activator-akka-distributed-workers)]@java[[Distributed workers with Akka and Java!](https://github.com/typesafehub/activator-akka-distributed-workers-java)].
## Dependencies