diff --git a/akka-samples/akka-sample-cluster-scala/src/main/scala/sample/cluster/stats/StatsMessages.scala b/akka-cluster-metrics/src/multi-jvm/scala/akka/cluster/metrics/sample/StatsMessages.scala similarity index 81% rename from akka-samples/akka-sample-cluster-scala/src/main/scala/sample/cluster/stats/StatsMessages.scala rename to akka-cluster-metrics/src/multi-jvm/scala/akka/cluster/metrics/sample/StatsMessages.scala index 4617a15d3b..929809b50c 100644 --- a/akka-samples/akka-sample-cluster-scala/src/main/scala/sample/cluster/stats/StatsMessages.scala +++ b/akka-cluster-metrics/src/multi-jvm/scala/akka/cluster/metrics/sample/StatsMessages.scala @@ -1,4 +1,4 @@ -package sample.cluster.stats +package akka.cluster.metrics.sample //#messages final case class StatsJob(text: String) diff --git a/akka-samples/akka-sample-cluster-scala/src/multi-jvm/scala/sample/cluster/stats/StatsSampleSpec.scala b/akka-cluster-metrics/src/multi-jvm/scala/akka/cluster/metrics/sample/StatsSampleSpec.scala similarity index 89% rename from akka-samples/akka-sample-cluster-scala/src/multi-jvm/scala/sample/cluster/stats/StatsSampleSpec.scala rename to akka-cluster-metrics/src/multi-jvm/scala/akka/cluster/metrics/sample/StatsSampleSpec.scala index 8b958268ea..d53d24d7c0 100644 --- a/akka-samples/akka-sample-cluster-scala/src/multi-jvm/scala/sample/cluster/stats/StatsSampleSpec.scala +++ b/akka-cluster-metrics/src/multi-jvm/scala/akka/cluster/metrics/sample/StatsSampleSpec.scala @@ -1,15 +1,11 @@ -package sample.cluster.stats - -import language.postfixOps -import scala.concurrent.duration._ +package akka.cluster.metrics.sample import akka.actor.Props -import akka.actor.RootActorPath import akka.cluster.Cluster -import akka.cluster.Member -import akka.cluster.MemberStatus -import akka.cluster.ClusterEvent.CurrentClusterState -import akka.cluster.ClusterEvent.MemberUp +import akka.cluster.ClusterEvent.{ CurrentClusterState, MemberUp } + +import scala.concurrent.duration._ +import scala.language.postfixOps //#MultiNodeConfig import akka.remote.testkit.MultiNodeConfig @@ -24,7 +20,7 @@ object StatsSampleSpecConfig extends MultiNodeConfig { def nodeList = Seq(first, second, third) // Extract individual sigar library for every node. - nodeList foreach { role => + nodeList foreach { role ⇒ nodeConfig(role) { ConfigFactory.parseString(s""" # Enable metrics extension in akka-cluster-metrics. @@ -67,11 +63,9 @@ class StatsSampleSpecMultiJvmNode3 extends StatsSampleSpec //#concrete-tests //#abstract-test -import org.scalatest.BeforeAndAfterAll -import org.scalatest.WordSpecLike -import org.scalatest.Matchers import akka.remote.testkit.MultiNodeSpec import akka.testkit.ImplicitSender +import org.scalatest.{ BeforeAndAfterAll, Matchers, WordSpecLike } abstract class StatsSampleSpec extends MultiNodeSpec(StatsSampleSpecConfig) with WordSpecLike with Matchers with BeforeAndAfterAll @@ -107,7 +101,7 @@ abstract class StatsSampleSpec extends MultiNodeSpec(StatsSampleSpecConfig) system.actorOf(Props[StatsWorker], "statsWorker") system.actorOf(Props[StatsService], "statsService") - receiveN(3).collect { case MemberUp(m) => m.address }.toSet should be( + receiveN(3).collect { case MemberUp(m) ⇒ m.address }.toSet should be( Set(firstAddress, secondAddress, thirdAddress)) Cluster(system).unsubscribe(testActor) diff --git a/akka-cluster-metrics/src/multi-jvm/scala/akka/cluster/metrics/sample/StatsService.scala b/akka-cluster-metrics/src/multi-jvm/scala/akka/cluster/metrics/sample/StatsService.scala new file mode 100644 index 0000000000..6fd82f4e07 --- /dev/null +++ b/akka-cluster-metrics/src/multi-jvm/scala/akka/cluster/metrics/sample/StatsService.scala @@ -0,0 +1,77 @@ +package akka.cluster.metrics.sample + +import akka.actor.{ Actor, ActorRef, Props, ReceiveTimeout } +import akka.routing.ConsistentHashingRouter.ConsistentHashableEnvelope +import akka.routing.FromConfig + +import scala.concurrent.duration._ + +//#service +class StatsService extends Actor { + // This router is used both with lookup and deploy of routees. If you + // have a router with only lookup of routees you can use Props.empty + // instead of Props[StatsWorker.class]. + val workerRouter = context.actorOf( + FromConfig.props(Props[StatsWorker]), + name = "workerRouter") + + def receive = { + case StatsJob(text) if text != "" ⇒ + val words = text.split(" ") + val replyTo = sender() // important to not close over sender() + // create actor that collects replies from workers + val aggregator = context.actorOf(Props( + classOf[StatsAggregator], words.size, replyTo)) + words foreach { word ⇒ + workerRouter.tell( + ConsistentHashableEnvelope(word, word), aggregator) + } + } +} + +class StatsAggregator(expectedResults: Int, replyTo: ActorRef) extends Actor { + var results = IndexedSeq.empty[Int] + context.setReceiveTimeout(3.seconds) + + def receive = { + case wordCount: Int ⇒ + results = results :+ wordCount + if (results.size == expectedResults) { + val meanWordLength = results.sum.toDouble / results.size + replyTo ! StatsResult(meanWordLength) + context.stop(self) + } + case ReceiveTimeout ⇒ + replyTo ! JobFailed("Service unavailable, try again later") + context.stop(self) + } +} +//#service + +// not used, only for documentation +abstract class StatsService2 extends Actor { + //#router-lookup-in-code + import akka.cluster.routing.{ ClusterRouterGroup, ClusterRouterGroupSettings } + import akka.routing.ConsistentHashingGroup + + val workerRouter = context.actorOf( + ClusterRouterGroup(ConsistentHashingGroup(Nil), ClusterRouterGroupSettings( + totalInstances = 100, routeesPaths = List("/user/statsWorker"), + allowLocalRoutees = true, useRole = Some("compute"))).props(), + name = "workerRouter2") + //#router-lookup-in-code +} + +// not used, only for documentation +abstract class StatsService3 extends Actor { + //#router-deploy-in-code + import akka.cluster.routing.{ ClusterRouterPool, ClusterRouterPoolSettings } + import akka.routing.ConsistentHashingPool + + val workerRouter = context.actorOf( + ClusterRouterPool(ConsistentHashingPool(0), ClusterRouterPoolSettings( + totalInstances = 100, maxInstancesPerNode = 3, + allowLocalRoutees = false, useRole = None)).props(Props[StatsWorker]), + name = "workerRouter3") + //#router-deploy-in-code +} diff --git a/akka-samples/akka-sample-cluster-scala/src/main/scala/sample/cluster/stats/StatsWorker.scala b/akka-cluster-metrics/src/multi-jvm/scala/akka/cluster/metrics/sample/StatsWorker.scala similarity index 61% rename from akka-samples/akka-sample-cluster-scala/src/main/scala/sample/cluster/stats/StatsWorker.scala rename to akka-cluster-metrics/src/multi-jvm/scala/akka/cluster/metrics/sample/StatsWorker.scala index b60993f1df..0ab49cb0ed 100644 --- a/akka-samples/akka-sample-cluster-scala/src/main/scala/sample/cluster/stats/StatsWorker.scala +++ b/akka-cluster-metrics/src/multi-jvm/scala/akka/cluster/metrics/sample/StatsWorker.scala @@ -1,4 +1,4 @@ -package sample.cluster.stats +package akka.cluster.metrics.sample import akka.actor.Actor @@ -6,16 +6,16 @@ import akka.actor.Actor class StatsWorker extends Actor { var cache = Map.empty[String, Int] def receive = { - case word: String => + case word: String ⇒ val length = cache.get(word) match { - case Some(x) => x - case None => + case Some(x) ⇒ x + case None ⇒ val x = word.length - cache += (word -> x) + cache += (word → x) x } sender() ! length } } -//#worker \ No newline at end of file +//#worker diff --git a/akka-docs/rst/additional/osgi.rst b/akka-docs/rst/additional/osgi.rst index a9c4e536fe..221ac9a4e3 100644 --- a/akka-docs/rst/additional/osgi.rst +++ b/akka-docs/rst/additional/osgi.rst @@ -136,5 +136,4 @@ Sample A complete sample project is provided in `akka-sample-osgi-dining-hakkers`_ -.. _akka-sample-osgi-dining-hakkers: @github@/akka-samples/akka-sample-osgi-dining-hakkers - +.. _akka-sample-osgi-dining-hakkers: @samples@/tree/master/akka-sample-osgi-dining-hakkers diff --git a/akka-docs/rst/dev/multi-jvm-testing.rst b/akka-docs/rst/dev/multi-jvm-testing.rst index ae0f692dd8..57da935176 100644 --- a/akka-docs/rst/dev/multi-jvm-testing.rst +++ b/akka-docs/rst/dev/multi-jvm-testing.rst @@ -11,7 +11,7 @@ Useful for integration testing where multiple systems communicate with each othe Setup ===== -The multi-JVM testing is an sbt plugin that you can find at ``_. +The multi-JVM testing is an sbt plugin that you can find at ``_. You can add it as a plugin by adding the following to your project/plugins.sbt: @@ -21,14 +21,11 @@ You can then add multi-JVM testing to ``build.sbt`` or ``project/Build.scala`` b settings and config. Please note that MultiJvm test sources are located in ``src/multi-jvm/...``, and not in ``src/test/...``. -Here is an example ``build.sbt`` file for sbt 0.13 that uses the MultiJvm plugin: - -.. includecode:: ../../../akka-samples/akka-sample-multi-node-scala/build.sbt - You can specify JVM options for the forked JVMs:: jvmOptions in MultiJvm := Seq("-Xmx256M") +Here is an example of a `sample project`_ that uses the ``sbt-multi-jvm`` plugin. Running tests ============= @@ -206,3 +203,5 @@ Multi Node Additions There has also been some additions made to the ``SbtMultiJvm`` plugin to accommodate the :ref:`experimental ` module :ref:`multi node testing `, described in that section. + +.. _sample project: @samples@/tree/master/akka-sample-multi-node-scala \ No newline at end of file diff --git a/akka-docs/rst/dev/multi-node-testing.rst b/akka-docs/rst/dev/multi-node-testing.rst index 84572b7ab3..d1ee52f272 100644 --- a/akka-docs/rst/dev/multi-node-testing.rst +++ b/akka-docs/rst/dev/multi-node-testing.rst @@ -181,18 +181,18 @@ A Multi Node Testing Example First we need some scaffolding to hook up the ``MultiNodeSpec`` with your favorite test framework. Lets define a trait ``STMultiNodeSpec`` that uses ScalaTest to start and stop ``MultiNodeSpec``. -.. includecode:: ../../../akka-samples/akka-sample-multi-node-scala/src/test/scala/sample/multinode/STMultiNodeSpec.scala#example +.. includecode:: ../../../akka-remote-tests/src/test/scala/akka/remote/testkit/STMultiNodeSpec.scala#example Then we need to define a configuration. Lets use two nodes ``"node1`` and ``"node2"`` and call it ``MultiNodeSampleConfig``. -.. includecode:: ../../../akka-samples/akka-sample-multi-node-scala/src/multi-jvm/scala/sample/multinode/MultiNodeSample.scala +.. includecode:: ../../../akka-remote-tests/src/multi-jvm/scala/akka/remote/sample/MultiNodeSample.scala :include: package,config And then finally to the node test code. That starts the two nodes, and demonstrates a barrier, and a remote actor message send/receive. -.. includecode:: ../../../akka-samples/akka-sample-multi-node-scala/src/multi-jvm/scala/sample/multinode/MultiNodeSample.scala +.. includecode:: ../../../akka-remote-tests/src/multi-jvm/scala/akka/remote/sample/MultiNodeSample.scala :include: package,spec The easiest way to run this example yourself is to download `Lightbend Activator `_ diff --git a/akka-docs/rst/intro/deployment-scenarios.rst b/akka-docs/rst/intro/deployment-scenarios.rst index 03a7686c35..2921935247 100644 --- a/akka-docs/rst/intro/deployment-scenarios.rst +++ b/akka-docs/rst/intro/deployment-scenarios.rst @@ -12,52 +12,32 @@ Akka can be used in different ways: - As a library: used as a regular JAR on the classpath and/or in a web app, to be put into ``WEB-INF/lib`` -- Package with `sbt-native-packager `_ +- As an application packaged with `sbt-native-packager `_ -- Package and deploy using `Lightbend ConductR `_. +- As an application packaged and deployed using `Lightbend ConductR `_. Native Packager =============== `sbt-native-packager `_ is a tool for creating -distributions of any type of application, including an Akka applications. +distributions of any type of application, including Akka applications. Define sbt version in ``project/build.properties`` file: .. code-block:: none - sbt.version=0.13.7 + sbt.version=0.13.13 Add `sbt-native-packager `_ in ``project/plugins.sbt`` file: .. code-block:: none - addSbtPlugin("com.typesafe.sbt" % "sbt-native-packager" % "1.0.0-RC1") + addSbtPlugin("com.typesafe.sbt" % "sbt-native-packager" % "1.1.5") -Use the package settings and optionally specify the mainClass in ``build.sbt`` file: +Follow the instructions for the ``JavaAppPackaging`` in the `sbt-native-packager plugin documentation`_. -.. includecode:: ../../../akka-samples/akka-sample-main-scala/build.sbt - - -.. note:: Use the ``JavaServerAppPackaging``. Don't use the deprecated ``AkkaAppPackaging`` (previously named - ``packageArchetype.akka_application``), since it doesn't have the same flexibility and quality - as the ``JavaServerAppPackaging``. - -Use sbt task ``dist`` package the application. - -To start the application (on a unix-based system): - -.. code-block:: none - - cd target/universal/ - unzip akka-sample-main-scala-2.5-SNAPSHOT.zip - chmod u+x akka-sample-main-scala-2.5-SNAPSHOT/bin/akka-sample-main-scala - akka-sample-main-scala-2.5-SNAPSHOT/bin/akka-sample-main-scala sample.hello.Main - -Use ``Ctrl-C`` to interrupt and exit the application. - -On a Windows machine you can also use the ``bin\akka-sample-main-scala.bat`` script. +.. _sbt-native-packager plugin documentation: http://sbt-native-packager.readthedocs.io/en/latest/archetypes/java_app/index.html In a Docker container diff --git a/akka-docs/rst/java/cluster-metrics.rst b/akka-docs/rst/java/cluster-metrics.rst index 1eb88ec1c2..7fa1476ad4 100644 --- a/akka-docs/rst/java/cluster-metrics.rst +++ b/akka-docs/rst/java/cluster-metrics.rst @@ -125,25 +125,44 @@ Let's take a look at this router in action. What can be more demanding than calc The backend worker that performs the factorial calculation: -.. includecode:: ../../../akka-samples/akka-sample-cluster-java/src/main/java/sample/cluster/factorial/FactorialBackend.java#backend +.. includecode:: code/docs/cluster/FactorialBackend.java#backend The frontend that receives user jobs and delegates to the backends via the router: -.. includecode:: ../../../akka-samples/akka-sample-cluster-java/src/main/java/sample/cluster/factorial/FactorialFrontend.java#frontend +.. includecode:: code/docs/cluster/FactorialFrontend.java#frontend As you can see, the router is defined in the same way as other routers, and in this case it is configured as follows: -.. includecode:: ../../../akka-samples/akka-sample-cluster-java/src/main/resources/factorial.conf#adaptive-router +:: + + akka.actor.deployment { + /factorialFrontend/factorialBackendRouter = { + # Router type provided by metrics extension. + router = cluster-metrics-adaptive-group + # Router parameter specific for metrics extension. + # metrics-selector = heap + # metrics-selector = load + # metrics-selector = cpu + metrics-selector = mix + # + routees.paths = ["/user/factorialBackend"] + cluster { + enabled = on + use-role = backend + allow-local-routees = off + } + } + } It is only ``router`` type and the ``metrics-selector`` parameter that is specific to this router, other things work in the same way as other routers. The same type of router could also have been defined in code: -.. includecode:: ../../../akka-samples/akka-sample-cluster-java/src/main/java/sample/cluster/factorial/Extra.java#router-lookup-in-code +.. includecode:: code/docs/cluster/FactorialFrontend.java#router-lookup-in-code -.. includecode:: ../../../akka-samples/akka-sample-cluster-java/src/main/java/sample/cluster/factorial/Extra.java#router-deploy-in-code +.. includecode:: code/docs/cluster/FactorialFrontend.java#router-deploy-in-code The `Lightbend Activator `_ tutorial named `Akka Cluster Samples with Java `_. @@ -154,7 +173,7 @@ Subscribe to Metrics Events It is possible to subscribe to the metrics events directly to implement other functionality. -.. includecode:: ../../../akka-samples/akka-sample-cluster-java/src/main/java/sample/cluster/factorial/MetricsListener.java#metrics-listener +.. includecode:: code/docs/cluster/MetricsListener.java#metrics-listener Custom Metrics Collector ------------------------ diff --git a/akka-docs/rst/java/cluster-usage.rst b/akka-docs/rst/java/cluster-usage.rst index 96ddda0b24..14fcaa6d5b 100644 --- a/akka-docs/rst/java/cluster-usage.rst +++ b/akka-docs/rst/java/cluster-usage.rst @@ -28,7 +28,41 @@ It joins the cluster and an actor subscribes to cluster membership events and lo The ``application.conf`` configuration looks like this: -.. includecode:: ../../../akka-samples/akka-sample-cluster-java/src/main/resources/application.conf#snippet +:: + + akka { + actor { + provider = "cluster" + } + remote { + log-remote-lifecycle-events = off + netty.tcp { + hostname = "127.0.0.1" + port = 0 + } + } + + cluster { + seed-nodes = [ + "akka.tcp://ClusterSystem@127.0.0.1:2551", + "akka.tcp://ClusterSystem@127.0.0.1:2552"] + + # auto downing is NOT safe for production deployments. + # you may want to use it during development, read more about it in the docs. + # + # auto-down-unreachable-after = 10s + } + } + + # Disable legacy metrics in akka-cluster. + akka.cluster.metrics.enabled=off + + # Enable metrics extension in akka-cluster-metrics. + akka.extensions=["akka.cluster.metrics.ClusterMetricsExtension"] + + # Sigar native library extract location during tests. + # Note: use per-jvm-instance folder when running multiple jvm on one host. + akka.cluster.metrics.native-library-extract-folder=${user.dir}/target/native To enable cluster capabilities in your Akka project you should, at a minimum, add the :ref:`remoting-java` settings, but with ``cluster``. @@ -45,13 +79,13 @@ ip-addresses or host names of the machines in ``application.conf`` instead of `` An actor that uses the cluster extension may look like this: -.. literalinclude:: ../../../akka-samples/akka-sample-cluster-java/src/main/java/sample/cluster/simple/SimpleClusterListener.java +.. literalinclude:: code/docs/cluster/SimpleClusterListener.java :language: java The actor registers itself as subscriber of certain cluster events. It receives events corresponding to the current state of the cluster when the subscription starts and then it receives events for changes that happen in the cluster. -The easiest way to run this example yourself is to download `Lightbend Activator `_ + The easiest way to run this example yourself is to download `Lightbend Activator `_ and open the tutorial named `Akka Cluster Samples with Java `_. It contains instructions of how to run the ``SimpleClusterApp``. @@ -140,7 +174,7 @@ It can also be performed programmatically with ``Cluster.get(system).down(addres A pre-packaged solution for the downing problem is provided by `Split Brain Resolver `_, -which is part of the `Lightbend Reactive Platform `_. +which is part of the `Lightbend Reactive Platform `_. If you don’t use RP, you should anyway carefully read the `documentation `_ of the Split Brain Resolver and make sure that the solution you are using handles the concerns described there. @@ -188,13 +222,13 @@ It can also be performed programmatically with: Note that this command can be issued to any member in the cluster, not necessarily the one that is leaving. -The :ref:`coordinated-shutdown-java` will automatically run when the cluster node sees itself as -``Exiting``, i.e. leaving from another node will trigger the shutdown process on the leaving node. -Tasks for graceful leaving of cluster including graceful shutdown of Cluster Singletons and -Cluster Sharding are added automatically when Akka Cluster is used, i.e. running the shutdown -process will also trigger the graceful leaving if it's not already in progress. +The :ref:`coordinated-shutdown-java` will automatically run when the cluster node sees itself as +``Exiting``, i.e. leaving from another node will trigger the shutdown process on the leaving node. +Tasks for graceful leaving of cluster including graceful shutdown of Cluster Singletons and +Cluster Sharding are added automatically when Akka Cluster is used, i.e. running the shutdown +process will also trigger the graceful leaving if it's not already in progress. -Normally this is handled automatically, but in case of network failures during this process it might still +Normally this is handled automatically, but in case of network failures during this process it might still be necessary to set the node’s status to ``Down`` in order to complete the removal. .. _weakly_up_java: @@ -206,7 +240,7 @@ If a node is ``unreachable`` then gossip convergence is not possible and therefo ``leader`` actions are also not possible. However, we still might want new nodes to join the cluster in this scenario. -``Joining`` members will be promoted to ``WeaklyUp`` and become part of the cluster if +``Joining`` members will be promoted to ``WeaklyUp`` and become part of the cluster if convergence can't be reached. Once gossip convergence is reached, the leader will move ``WeaklyUp`` members to ``Up``. @@ -227,7 +261,7 @@ Subscribe to Cluster Events You can subscribe to change notifications of the cluster membership by using ``Cluster.get(system).subscribe``. -.. includecode:: ../../../akka-samples/akka-sample-cluster-java/src/main/java/sample/cluster/simple/SimpleClusterListener2.java#subscribe +.. includecode:: code/docs/cluster/SimpleClusterListener2.java#subscribe A snapshot of the full state, ``akka.cluster.ClusterEvent.CurrentClusterState``, is sent to the subscriber as the first message, followed by events for incremental updates. @@ -244,7 +278,7 @@ the events corresponding to the current state to mimic what you would have seen listening to the events when they occurred in the past. Note that those initial events only correspond to the current state and it is not the full history of all changes that actually has occurred in the cluster. -.. includecode:: ../../../akka-samples/akka-sample-cluster-java/src/main/java/sample/cluster/simple/SimpleClusterListener.java#subscribe +.. includecode:: code/docs/cluster/SimpleClusterListener.java#subscribe The events to track the life-cycle of members are: @@ -280,11 +314,11 @@ added or removed to the cluster dynamically. Messages: -.. includecode:: ../../../akka-samples/akka-sample-cluster-java/src/main/java/sample/cluster/transformation/TransformationMessages.java#messages +.. includecode:: code/docs/cluster/TransformationMessages.java#messages The backend worker that performs the transformation job: -.. includecode:: ../../../akka-samples/akka-sample-cluster-java/src/main/java/sample/cluster/transformation/TransformationBackend.java#backend +.. includecode:: code/docs/cluster/TransformationBackend.java#backend Note that the ``TransformationBackend`` actor subscribes to cluster events to detect new, potential, frontend nodes, and send them a registration message so that they know @@ -292,7 +326,7 @@ that they can use the backend worker. The frontend that receives user jobs and delegates to one of the registered backend workers: -.. includecode:: ../../../akka-samples/akka-sample-cluster-java/src/main/java/sample/cluster/transformation/TransformationFrontend.java#frontend +.. includecode:: code/docs/cluster/TransformationFrontend.java#frontend Note that the ``TransformationFrontend`` actor watch the registered backend to be able to remove it from its list of available backend workers. @@ -301,8 +335,8 @@ network failures and JVM crashes, in addition to graceful termination of watched actor. Death watch generates the ``Terminated`` message to the watching actor when the unreachable cluster node has been downed and removed. -The `Lightbend Activator `_ tutorial named -`Akka Cluster Samples with Java `_. +The Akka sample named +`Akka Cluster Sample with Java `_. contains the full source code and instructions of how to run the **Worker Dial-in Example**. Node Roles @@ -326,20 +360,23 @@ A common use case is to start actors after the cluster has been initialized, members have joined, and the cluster has reached a certain size. With a configuration option you can define required number of members -before the leader changes member status of 'Joining' members to 'Up'. +before the leader changes member status of 'Joining' members to 'Up'.:: -.. includecode:: ../../../akka-samples/akka-sample-cluster-java/src/main/resources/factorial.conf#min-nr-of-members + akka.cluster.min-nr-of-members = 3 In a similar way you can define required number of members of a certain role -before the leader changes member status of 'Joining' members to 'Up'. +before the leader changes member status of 'Joining' members to 'Up'.:: -.. includecode:: ../../../akka-samples/akka-sample-cluster-java/src/main/resources/factorial.conf#role-min-nr-of-members + akka.cluster.role { + frontend.min-nr-of-members = 1 + backend.min-nr-of-members = 2 + } You can start the actors in a ``registerOnMemberUp`` callback, which will be invoked when the current member status is changed to 'Up', i.e. the cluster has at least the defined number of members. -.. includecode:: ../../../akka-samples/akka-sample-cluster-java/src/main/java/sample/cluster/factorial/FactorialFrontendMain.java#registerOnUp +.. includecode:: code/docs/cluster/FactorialFrontendMain.java#registerOnUp This callback can be used for other things than starting actors. @@ -508,9 +545,19 @@ Router with Group of Routees ---------------------------- When using a ``Group`` you must start the routee actors on the cluster member nodes. -That is not done by the router. The configuration for a group looks like this: +That is not done by the router. The configuration for a group looks like this::: -.. includecode:: ../../../akka-samples/akka-sample-cluster-java/src/multi-jvm/scala/sample/cluster/stats/StatsSampleSpec.scala#router-lookup-config + akka.actor.deployment { + /statsService/workerRouter { + router = consistent-hashing-group + routees.paths = ["/user/statsWorker"] + cluster { + enabled = on + allow-local-routees = on + use-role = compute + } + } + } .. note:: The routee actors should be started as early as possible when starting the actor system, because @@ -527,7 +574,7 @@ Set it to a lower value if you want to limit total number of routees. The same type of router could also have been defined in code: -.. includecode:: ../../../akka-samples/akka-sample-cluster-java/src/main/java/sample/cluster/stats/Extra.java#router-lookup-in-code +.. includecode:: code/docs/cluster/StatsService.java#router-lookup-in-code See :ref:`cluster_configuration_java` section for further descriptions of the settings. @@ -545,40 +592,60 @@ the average number of characters per word when all results have been collected. Messages: -.. includecode:: ../../../akka-samples/akka-sample-cluster-java/src/main/java/sample/cluster/stats/StatsMessages.java#messages +.. includecode:: code/docs/cluster/StatsMessages.java#messages The worker that counts number of characters in each word: -.. includecode:: ../../../akka-samples/akka-sample-cluster-java/src/main/java/sample/cluster/stats/StatsWorker.java#worker +.. includecode:: code/docs/cluster/StatsWorker.java#worker The service that receives text from users and splits it up into words, delegates to workers and aggregates: -.. includecode:: ../../../akka-samples/akka-sample-cluster-java/src/main/java/sample/cluster/stats/StatsService.java#service +.. includecode:: code/docs/cluster/StatsService.java#service -.. includecode:: ../../../akka-samples/akka-sample-cluster-java/src/main/java/sample/cluster/stats/StatsAggregator.java#aggregator +.. includecode:: code/docs/cluster/StatsAggregator.java#aggregator Note, nothing cluster specific so far, just plain actors. All nodes start ``StatsService`` and ``StatsWorker`` actors. Remember, routees are the workers in this case. -The router is configured with ``routees.paths``: +The router is configured with ``routees.paths``::: -.. includecode:: ../../../akka-samples/akka-sample-cluster-java/src/main/resources/stats1.conf#config-router-lookup + akka.actor.deployment { + /statsService/workerRouter { + router = consistent-hashing-group + routees.paths = ["/user/statsWorker"] + cluster { + enabled = on + allow-local-routees = on + use-role = compute + } + } + } This means that user requests can be sent to ``StatsService`` on any node and it will use ``StatsWorker`` on all nodes. -The `Lightbend Activator `_ tutorial named -`Akka Cluster Samples with Java `_. +The Akka sample named +`Akka Cluster Sample with Java `_. contains the full source code and instructions of how to run the **Router Example with Group of Routees**. Router with Pool of Remote Deployed Routees ------------------------------------------- When using a ``Pool`` with routees created and deployed on the cluster member nodes -the configuration for a router looks like this: +the configuration for a router looks like this::: -.. includecode:: ../../../akka-samples/akka-sample-cluster-java/src/multi-jvm/scala/sample/cluster/stats/StatsSampleSingleMasterSpec.scala#router-deploy-config + akka.actor.deployment { + /statsService/singleton/workerRouter { + router = consistent-hashing-pool + cluster { + enabled = on + max-nr-of-instances-per-node = 3 + allow-local-routees = on + use-role = compute + } + } + } It is possible to limit the deployment of routees to member nodes tagged with a certain role by specifying ``use-role``. @@ -590,7 +657,7 @@ Set it to a lower value if you want to limit total number of routees. The same type of router could also have been defined in code: -.. includecode:: ../../../akka-samples/akka-sample-cluster-java/src/main/java/sample/cluster/stats/Extra.java#router-deploy-in-code +.. includecode:: code/docs/cluster/StatsService.java#router-deploy-in-code See :ref:`cluster_configuration_java` section for further descriptions of the settings. @@ -601,22 +668,32 @@ Let's take a look at how to use a cluster aware router on single master node tha and deploys workers. To keep track of a single master we use the :ref:`cluster-singleton-java` in the cluster-tools module. The ``ClusterSingletonManager`` is started on each node. -.. includecode:: ../../../akka-samples/akka-sample-cluster-java/src/main/java/sample/cluster/stats/StatsSampleOneMasterMain.java#create-singleton-manager +.. includecode:: code/docs/cluster/StatsSampleOneMasterMain.java#create-singleton-manager We also need an actor on each node that keeps track of where current single master exists and delegates jobs to the ``StatsService``. That is provided by the ``ClusterSingletonProxy``. -.. includecode:: ../../../akka-samples/akka-sample-cluster-java/src/main/java/sample/cluster/stats/StatsSampleOneMasterMain.java#singleton-proxy +.. includecode:: code/docs/cluster/StatsSampleOneMasterMain.java#singleton-proxy The ``ClusterSingletonProxy`` receives text from users and delegates to the current ``StatsService``, the single master. It listens to cluster events to lookup the ``StatsService`` on the oldest node. -All nodes start ``ClusterSingletonProxy`` and the ``ClusterSingletonManager``. The router is now configured like this: +All nodes start ``ClusterSingletonProxy`` and the ``ClusterSingletonManager``. The router is now configured like this::: -.. includecode:: ../../../akka-samples/akka-sample-cluster-java/src/main/resources/stats2.conf#config-router-deploy + akka.actor.deployment { + /statsService/singleton/workerRouter { + router = consistent-hashing-pool + cluster { + enabled = on + max-nr-of-instances-per-node = 3 + allow-local-routees = on + use-role = compute + } + } + } -The `Lightbend Activator `_ tutorial named -`Akka Cluster Samples with Java `_. +The Akka sample named +`Akka Cluster Sample with Java `_. contains the full source code and instructions of how to run the **Router Example with Pool of Remote Deployed Routees**. Cluster Metrics @@ -634,7 +711,7 @@ Management HTTP ---- -Information and management of the cluster is available with a HTTP API. +Information and management of the cluster is available with a HTTP API. See documentation of `akka/akka-cluster-management `_. .. _cluster_jmx_java: @@ -662,7 +739,7 @@ Command Line ------------ .. warning:: - **Deprecation warning** - The command line script has been deprecated and is scheduled for removal + **Deprecation warning** - The command line script has been deprecated and is scheduled for removal in the next major version. Use the :ref:`cluster_http_java` API with `curl `_ or similar instead. diff --git a/akka-docs/rst/java/code/docs/actorlambda/fsm/FSMDocTest.java b/akka-docs/rst/java/code/docs/actorlambda/fsm/FSMDocTest.java index 423621a146..3815347e7d 100644 --- a/akka-docs/rst/java/code/docs/actorlambda/fsm/FSMDocTest.java +++ b/akka-docs/rst/java/code/docs/actorlambda/fsm/FSMDocTest.java @@ -174,7 +174,7 @@ public class FSMDocTest extends AbstractJavaTest { expectMsgEquals(Active); expectMsgEquals(Data.Foo); String msg = expectMsgClass(String.class); - assertThat(msg, CoreMatchers.startsWith("LogEntry(SomeState,Foo,Actor[akka://FSMDocTest/system/")); + assertTrue(msg.startsWith("LogEntry(SomeState,Foo,Actor[akka://FSMDocTest/system/")); }}; } } diff --git a/akka-docs/rst/java/code/docs/cluster/ClusterDocTest.java b/akka-docs/rst/java/code/docs/cluster/ClusterDocTest.java index 913c190442..3d27251e80 100644 --- a/akka-docs/rst/java/code/docs/cluster/ClusterDocTest.java +++ b/akka-docs/rst/java/code/docs/cluster/ClusterDocTest.java @@ -21,7 +21,7 @@ public class ClusterDocTest extends AbstractJavaTest { @BeforeClass public static void setup() { system = ActorSystem.create("ClusterDocTest", - ConfigFactory.parseString(ClusterDocSpec.config())); + ConfigFactory.parseString(scala.docs.cluster.ClusterDocSpec.config())); } @AfterClass diff --git a/akka-docs/rst/java/code/docs/cluster/FactorialBackend.java b/akka-docs/rst/java/code/docs/cluster/FactorialBackend.java new file mode 100644 index 0000000000..bdb15acb3a --- /dev/null +++ b/akka-docs/rst/java/code/docs/cluster/FactorialBackend.java @@ -0,0 +1,47 @@ +package docs.cluster; + +import java.math.BigInteger; +import java.util.concurrent.Callable; +import scala.concurrent.Future; +import akka.actor.UntypedActor; +import akka.dispatch.Mapper; +import static akka.dispatch.Futures.future; +import static akka.pattern.Patterns.pipe; + +//#backend +public class FactorialBackend extends UntypedActor { + + @Override + public void onReceive(Object message) { + if (message instanceof Integer) { + final Integer n = (Integer) message; + Future f = future(new Callable() { + public BigInteger call() { + return factorial(n); + } + }, getContext().dispatcher()); + + Future result = f.map( + new Mapper() { + public FactorialResult apply(BigInteger factorial) { + return new FactorialResult(n, factorial); + } + }, getContext().dispatcher()); + + pipe(result, getContext().dispatcher()).to(getSender()); + + } else { + unhandled(message); + } + } + + BigInteger factorial(int n) { + BigInteger acc = BigInteger.ONE; + for (int i = 1; i <= n; ++i) { + acc = acc.multiply(BigInteger.valueOf(i)); + } + return acc; + } +} +//#backend + diff --git a/akka-docs/rst/java/code/docs/cluster/FactorialFrontend.java b/akka-docs/rst/java/code/docs/cluster/FactorialFrontend.java new file mode 100644 index 0000000000..0d0ad7be4e --- /dev/null +++ b/akka-docs/rst/java/code/docs/cluster/FactorialFrontend.java @@ -0,0 +1,107 @@ +package docs.cluster; + +import java.util.Arrays; +import java.util.Collections; +import java.util.concurrent.TimeUnit; + +import akka.actor.Props; +import akka.cluster.metrics.AdaptiveLoadBalancingGroup; +import akka.cluster.metrics.AdaptiveLoadBalancingPool; +import akka.cluster.metrics.HeapMetricsSelector; +import akka.cluster.metrics.SystemLoadAverageMetricsSelector; +import akka.cluster.routing.ClusterRouterGroup; +import akka.cluster.routing.ClusterRouterGroupSettings; +import akka.cluster.routing.ClusterRouterPool; +import akka.cluster.routing.ClusterRouterPoolSettings; +import akka.routing.ConsistentHashingGroup; +import akka.routing.ConsistentHashingPool; +import scala.concurrent.duration.Duration; +import akka.actor.ActorRef; +import akka.actor.ReceiveTimeout; +import akka.actor.UntypedActor; +import akka.event.Logging; +import akka.event.LoggingAdapter; +import akka.routing.FromConfig; + +//#frontend +public class FactorialFrontend extends UntypedActor { + final int upToN; + final boolean repeat; + + LoggingAdapter log = Logging.getLogger(getContext().system(), this); + + ActorRef backend = getContext().actorOf(FromConfig.getInstance().props(), + "factorialBackendRouter"); + + public FactorialFrontend(int upToN, boolean repeat) { + this.upToN = upToN; + this.repeat = repeat; + } + + @Override + public void preStart() { + sendJobs(); + getContext().setReceiveTimeout(Duration.create(10, TimeUnit.SECONDS)); + } + + @Override + public void onReceive(Object message) { + if (message instanceof FactorialResult) { + FactorialResult result = (FactorialResult) message; + if (result.n == upToN) { + log.debug("{}! = {}", result.n, result.factorial); + if (repeat) + sendJobs(); + else + getContext().stop(getSelf()); + } + + } else if (message instanceof ReceiveTimeout) { + log.info("Timeout"); + sendJobs(); + + } else { + unhandled(message); + } + } + + void sendJobs() { + log.info("Starting batch of factorials up to [{}]", upToN); + for (int n = 1; n <= upToN; n++) { + backend.tell(n, getSelf()); + } + } + +} +//#frontend + +//not used, only for documentation +abstract class FactorialFrontend2 extends UntypedActor { + //#router-lookup-in-code + int totalInstances = 100; + Iterable routeesPaths = Arrays.asList("/user/factorialBackend", ""); + boolean allowLocalRoutees = true; + String useRole = "backend"; + ActorRef backend = getContext().actorOf( + new ClusterRouterGroup(new AdaptiveLoadBalancingGroup( + HeapMetricsSelector.getInstance(), Collections. emptyList()), + new ClusterRouterGroupSettings(totalInstances, routeesPaths, + allowLocalRoutees, useRole)).props(), "factorialBackendRouter2"); + //#router-lookup-in-code +} + +//not used, only for documentation +abstract class FactorialFrontend3 extends UntypedActor { + //#router-deploy-in-code + int totalInstances = 100; + int maxInstancesPerNode = 3; + boolean allowLocalRoutees = false; + String useRole = "backend"; + ActorRef backend = getContext().actorOf( + new ClusterRouterPool(new AdaptiveLoadBalancingPool( + SystemLoadAverageMetricsSelector.getInstance(), 0), + new ClusterRouterPoolSettings(totalInstances, maxInstancesPerNode, + allowLocalRoutees, useRole)).props(Props + .create(FactorialBackend.class)), "factorialBackendRouter3"); + //#router-deploy-in-code +} diff --git a/akka-docs/rst/java/code/docs/cluster/FactorialFrontendMain.java b/akka-docs/rst/java/code/docs/cluster/FactorialFrontendMain.java new file mode 100644 index 0000000000..891b200536 --- /dev/null +++ b/akka-docs/rst/java/code/docs/cluster/FactorialFrontendMain.java @@ -0,0 +1,72 @@ +package docs.cluster; + + +import java.util.concurrent.TimeoutException; +import java.util.concurrent.TimeUnit; +import scala.concurrent.Await; +import scala.concurrent.duration.Duration; +import com.typesafe.config.Config; +import com.typesafe.config.ConfigFactory; + +import akka.actor.ActorSystem; +import akka.actor.Props; +import akka.cluster.Cluster; + +public class FactorialFrontendMain { + + public static void main(String[] args) { + final int upToN = 200; + + final Config config = ConfigFactory.parseString( + "akka.cluster.roles = [frontend]").withFallback( + ConfigFactory.load("factorial")); + + final ActorSystem system = ActorSystem.create("ClusterSystem", config); + system.log().info( + "Factorials will start when 2 backend members in the cluster."); + //#registerOnUp + Cluster.get(system).registerOnMemberUp(new Runnable() { + @Override + public void run() { + system.actorOf(Props.create(FactorialFrontend.class, upToN, true), + "factorialFrontend"); + } + }); + //#registerOnUp + + //#registerOnRemoved + Cluster.get(system).registerOnMemberRemoved(new Runnable() { + @Override + public void run() { + // exit JVM when ActorSystem has been terminated + final Runnable exit = new Runnable() { + @Override public void run() { + System.exit(0); + } + }; + system.registerOnTermination(exit); + + // shut down ActorSystem + system.terminate(); + + // In case ActorSystem shutdown takes longer than 10 seconds, + // exit the JVM forcefully anyway. + // We must spawn a separate thread to not block current thread, + // since that would have blocked the shutdown of the ActorSystem. + new Thread() { + @Override public void run(){ + try { + Await.ready(system.whenTerminated(), Duration.create(10, TimeUnit.SECONDS)); + } catch (Exception e) { + System.exit(-1); + } + + } + }.start(); + } + }); + //#registerOnRemoved + + } + +} diff --git a/akka-samples/akka-sample-cluster-java/src/main/java/sample/cluster/factorial/FactorialResult.java b/akka-docs/rst/java/code/docs/cluster/FactorialResult.java similarity index 90% rename from akka-samples/akka-sample-cluster-java/src/main/java/sample/cluster/factorial/FactorialResult.java rename to akka-docs/rst/java/code/docs/cluster/FactorialResult.java index 7cd49ce988..38dbcfc065 100644 --- a/akka-samples/akka-sample-cluster-java/src/main/java/sample/cluster/factorial/FactorialResult.java +++ b/akka-docs/rst/java/code/docs/cluster/FactorialResult.java @@ -1,4 +1,4 @@ -package sample.cluster.factorial; +package docs.cluster; import java.math.BigInteger; import java.io.Serializable; diff --git a/akka-samples/akka-sample-cluster-java/src/main/java/sample/cluster/factorial/MetricsListener.java b/akka-docs/rst/java/code/docs/cluster/MetricsListener.java similarity index 70% rename from akka-samples/akka-sample-cluster-java/src/main/java/sample/cluster/factorial/MetricsListener.java rename to akka-docs/rst/java/code/docs/cluster/MetricsListener.java index d9d8c510fe..265ecf28f6 100644 --- a/akka-samples/akka-sample-cluster-java/src/main/java/sample/cluster/factorial/MetricsListener.java +++ b/akka-docs/rst/java/code/docs/cluster/MetricsListener.java @@ -1,7 +1,7 @@ -package sample.cluster.factorial; +package docs.cluster; //#metrics-listener -import akka.actor.AbstractActor; +import akka.actor.UntypedActor; import akka.cluster.Cluster; import akka.cluster.ClusterEvent.CurrentClusterState; import akka.cluster.metrics.ClusterMetricsChanged; @@ -13,7 +13,7 @@ import akka.cluster.metrics.ClusterMetricsExtension; import akka.event.Logging; import akka.event.LoggingAdapter; -public class MetricsListener extends AbstractActor { +public class MetricsListener extends UntypedActor { LoggingAdapter log = Logging.getLogger(getContext().system(), this); Cluster cluster = Cluster.get(getContext().system()); @@ -33,21 +33,23 @@ public class MetricsListener extends AbstractActor { extension.unsubscribe(getSelf()); } + @Override - public Receive createReceive() { - return receiveBuilder() - .match(ClusterMetricsChanged.class, clusterMetrics -> { - for (NodeMetrics nodeMetrics : clusterMetrics.getNodeMetrics()) { - if (nodeMetrics.address().equals(cluster.selfAddress())) { - logHeap(nodeMetrics); - logCpu(nodeMetrics); - } + public void onReceive(Object message) { + if (message instanceof ClusterMetricsChanged) { + ClusterMetricsChanged clusterMetrics = (ClusterMetricsChanged) message; + for (NodeMetrics nodeMetrics : clusterMetrics.getNodeMetrics()) { + if (nodeMetrics.address().equals(cluster.selfAddress())) { + logHeap(nodeMetrics); + logCpu(nodeMetrics); } - }) - .match(CurrentClusterState.class, message -> { - // Ignore. - }) - .build(); + } + + } else if (message instanceof CurrentClusterState) { + // Ignore. + } else { + unhandled(message); + } } void logHeap(NodeMetrics nodeMetrics) { diff --git a/akka-docs/rst/java/code/docs/cluster/SimpleClusterListener.java b/akka-docs/rst/java/code/docs/cluster/SimpleClusterListener.java new file mode 100644 index 0000000000..b48cdcb67c --- /dev/null +++ b/akka-docs/rst/java/code/docs/cluster/SimpleClusterListener.java @@ -0,0 +1,54 @@ +package docs.cluster; + +import akka.actor.UntypedActor; +import akka.cluster.Cluster; +import akka.cluster.ClusterEvent; +import akka.cluster.ClusterEvent.MemberEvent; +import akka.cluster.ClusterEvent.MemberUp; +import akka.cluster.ClusterEvent.MemberRemoved; +import akka.cluster.ClusterEvent.UnreachableMember; +import akka.event.Logging; +import akka.event.LoggingAdapter; + +public class SimpleClusterListener extends UntypedActor { + LoggingAdapter log = Logging.getLogger(getContext().system(), this); + Cluster cluster = Cluster.get(getContext().system()); + + //subscribe to cluster changes + @Override + public void preStart() { + //#subscribe + cluster.subscribe(getSelf(), ClusterEvent.initialStateAsEvents(), + MemberEvent.class, UnreachableMember.class); + //#subscribe + } + + //re-subscribe when restart + @Override + public void postStop() { + cluster.unsubscribe(getSelf()); + } + + @Override + public void onReceive(Object message) { + if (message instanceof MemberUp) { + MemberUp mUp = (MemberUp) message; + log.info("Member is Up: {}", mUp.member()); + + } else if (message instanceof UnreachableMember) { + UnreachableMember mUnreachable = (UnreachableMember) message; + log.info("Member detected as unreachable: {}", mUnreachable.member()); + + } else if (message instanceof MemberRemoved) { + MemberRemoved mRemoved = (MemberRemoved) message; + log.info("Member is Removed: {}", mRemoved.member()); + + } else if (message instanceof MemberEvent) { + // ignore + + } else { + unhandled(message); + } + + } +} diff --git a/akka-docs/rst/java/code/docs/cluster/SimpleClusterListener2.java b/akka-docs/rst/java/code/docs/cluster/SimpleClusterListener2.java new file mode 100644 index 0000000000..6eebd523ea --- /dev/null +++ b/akka-docs/rst/java/code/docs/cluster/SimpleClusterListener2.java @@ -0,0 +1,57 @@ +package docs.cluster; + +import akka.actor.UntypedActor; +import akka.cluster.Cluster; +import akka.cluster.ClusterEvent.CurrentClusterState; +import akka.cluster.ClusterEvent.MemberEvent; +import akka.cluster.ClusterEvent.MemberUp; +import akka.cluster.ClusterEvent.MemberRemoved; +import akka.cluster.ClusterEvent.UnreachableMember; +import akka.event.Logging; +import akka.event.LoggingAdapter; + +public class SimpleClusterListener2 extends UntypedActor { + LoggingAdapter log = Logging.getLogger(getContext().system(), this); + Cluster cluster = Cluster.get(getContext().system()); + + //subscribe to cluster changes + @Override + public void preStart() { + //#subscribe + cluster.subscribe(getSelf(), MemberEvent.class, UnreachableMember.class); + //#subscribe + } + + //re-subscribe when restart + @Override + public void postStop() { + cluster.unsubscribe(getSelf()); + } + + @Override + public void onReceive(Object message) { + if (message instanceof CurrentClusterState) { + CurrentClusterState state = (CurrentClusterState) message; + log.info("Current members: {}", state.members()); + + } else if (message instanceof MemberUp) { + MemberUp mUp = (MemberUp) message; + log.info("Member is Up: {}", mUp.member()); + + } else if (message instanceof UnreachableMember) { + UnreachableMember mUnreachable = (UnreachableMember) message; + log.info("Member detected as unreachable: {}", mUnreachable.member()); + + } else if (message instanceof MemberRemoved) { + MemberRemoved mRemoved = (MemberRemoved) message; + log.info("Member is Removed: {}", mRemoved.member()); + + } else if (message instanceof MemberEvent) { + // ignore + + } else { + unhandled(message); + } + + } +} diff --git a/akka-docs/rst/java/code/docs/cluster/StatsAggregator.java b/akka-docs/rst/java/code/docs/cluster/StatsAggregator.java new file mode 100644 index 0000000000..76c8144c75 --- /dev/null +++ b/akka-docs/rst/java/code/docs/cluster/StatsAggregator.java @@ -0,0 +1,56 @@ +package docs.cluster; + +import java.util.ArrayList; +import java.util.List; +import java.util.concurrent.TimeUnit; + +import docs.cluster.StatsMessages.JobFailed; +import docs.cluster.StatsMessages.StatsResult; +import scala.concurrent.duration.Duration; +import akka.actor.ActorRef; +import akka.actor.ReceiveTimeout; +import akka.actor.UntypedActor; + +//#aggregator +public class StatsAggregator extends UntypedActor { + + final int expectedResults; + final ActorRef replyTo; + final List results = new ArrayList(); + + public StatsAggregator(int expectedResults, ActorRef replyTo) { + this.expectedResults = expectedResults; + this.replyTo = replyTo; + } + + @Override + public void preStart() { + getContext().setReceiveTimeout(Duration.create(3, TimeUnit.SECONDS)); + } + + @Override + public void onReceive(Object message) { + if (message instanceof Integer) { + Integer wordCount = (Integer) message; + results.add(wordCount); + if (results.size() == expectedResults) { + int sum = 0; + for (int c : results) + sum += c; + double meanWordLength = ((double) sum) / results.size(); + replyTo.tell(new StatsResult(meanWordLength), getSelf()); + getContext().stop(getSelf()); + } + + } else if (message == ReceiveTimeout.getInstance()) { + replyTo.tell(new JobFailed("Service unavailable, try again later"), + getSelf()); + getContext().stop(getSelf()); + + } else { + unhandled(message); + } + } + +} +//#aggregator diff --git a/akka-samples/akka-sample-cluster-java/src/main/java/sample/cluster/stats/StatsMessages.java b/akka-docs/rst/java/code/docs/cluster/StatsMessages.java similarity index 97% rename from akka-samples/akka-sample-cluster-java/src/main/java/sample/cluster/stats/StatsMessages.java rename to akka-docs/rst/java/code/docs/cluster/StatsMessages.java index 52d8c61ae7..b8ca17a50f 100644 --- a/akka-samples/akka-sample-cluster-java/src/main/java/sample/cluster/stats/StatsMessages.java +++ b/akka-docs/rst/java/code/docs/cluster/StatsMessages.java @@ -1,4 +1,4 @@ -package sample.cluster.stats; +package docs.cluster; import java.io.Serializable; diff --git a/akka-samples/akka-sample-cluster-java/src/main/java/sample/cluster/stats/StatsSampleClient.java b/akka-docs/rst/java/code/docs/cluster/StatsSampleClient.java similarity index 94% rename from akka-samples/akka-sample-cluster-java/src/main/java/sample/cluster/stats/StatsSampleClient.java rename to akka-docs/rst/java/code/docs/cluster/StatsSampleClient.java index 73883382d5..ff9cd530af 100644 --- a/akka-samples/akka-sample-cluster-java/src/main/java/sample/cluster/stats/StatsSampleClient.java +++ b/akka-docs/rst/java/code/docs/cluster/StatsSampleClient.java @@ -1,4 +1,4 @@ -package sample.cluster.stats; +package docs.cluster; import java.util.ArrayList; import java.util.HashSet; @@ -6,9 +6,9 @@ import java.util.List; import java.util.Set; import java.util.concurrent.TimeUnit; -import sample.cluster.stats.StatsMessages.JobFailed; -import sample.cluster.stats.StatsMessages.StatsJob; -import sample.cluster.stats.StatsMessages.StatsResult; +import docs.cluster.StatsMessages.JobFailed; +import docs.cluster.StatsMessages.StatsJob; +import docs.cluster.StatsMessages.StatsResult; import java.util.concurrent.ThreadLocalRandom; import scala.concurrent.duration.Duration; import scala.concurrent.duration.FiniteDuration; diff --git a/akka-samples/akka-sample-cluster-java/src/main/java/sample/cluster/stats/StatsSampleOneMasterClientMain.java b/akka-docs/rst/java/code/docs/cluster/StatsSampleOneMasterClientMain.java similarity index 93% rename from akka-samples/akka-sample-cluster-java/src/main/java/sample/cluster/stats/StatsSampleOneMasterClientMain.java rename to akka-docs/rst/java/code/docs/cluster/StatsSampleOneMasterClientMain.java index 7506687283..d4e30ff8d0 100644 --- a/akka-samples/akka-sample-cluster-java/src/main/java/sample/cluster/stats/StatsSampleOneMasterClientMain.java +++ b/akka-docs/rst/java/code/docs/cluster/StatsSampleOneMasterClientMain.java @@ -1,4 +1,4 @@ -package sample.cluster.stats; +package docs.cluster; import com.typesafe.config.ConfigFactory; diff --git a/akka-samples/akka-sample-cluster-java/src/main/java/sample/cluster/stats/StatsSampleOneMasterMain.java b/akka-docs/rst/java/code/docs/cluster/StatsSampleOneMasterMain.java similarity index 98% rename from akka-samples/akka-sample-cluster-java/src/main/java/sample/cluster/stats/StatsSampleOneMasterMain.java rename to akka-docs/rst/java/code/docs/cluster/StatsSampleOneMasterMain.java index 30ad9d7d94..388bdf9745 100644 --- a/akka-samples/akka-sample-cluster-java/src/main/java/sample/cluster/stats/StatsSampleOneMasterMain.java +++ b/akka-docs/rst/java/code/docs/cluster/StatsSampleOneMasterMain.java @@ -1,4 +1,4 @@ -package sample.cluster.stats; +package docs.cluster; import com.typesafe.config.Config; import com.typesafe.config.ConfigFactory; diff --git a/akka-docs/rst/java/code/docs/cluster/StatsService.java b/akka-docs/rst/java/code/docs/cluster/StatsService.java new file mode 100644 index 0000000000..993cfc3f4b --- /dev/null +++ b/akka-docs/rst/java/code/docs/cluster/StatsService.java @@ -0,0 +1,85 @@ +package docs.cluster; + +import akka.cluster.routing.ClusterRouterGroup; +import akka.cluster.routing.ClusterRouterGroupSettings; +import akka.cluster.routing.ClusterRouterPool; +import akka.cluster.routing.ClusterRouterPoolSettings; +import akka.routing.ConsistentHashingGroup; +import akka.routing.ConsistentHashingPool; +import docs.cluster.StatsMessages.StatsJob; +import akka.actor.ActorRef; +import akka.actor.Props; +import akka.actor.UntypedActor; +import akka.routing.ConsistentHashingRouter.ConsistentHashableEnvelope; +import akka.routing.FromConfig; + +import java.util.Collections; + +//#service +public class StatsService extends UntypedActor { + + // This router is used both with lookup and deploy of routees. If you + // have a router with only lookup of routees you can use Props.empty() + // instead of Props.create(StatsWorker.class). + ActorRef workerRouter = getContext().actorOf( + FromConfig.getInstance().props(Props.create(StatsWorker.class)), + "workerRouter"); + + @Override + public void onReceive(Object message) { + if (message instanceof StatsJob) { + StatsJob job = (StatsJob) message; + if (job.getText().equals("")) { + unhandled(message); + } else { + final String[] words = job.getText().split(" "); + final ActorRef replyTo = getSender(); + + // create actor that collects replies from workers + ActorRef aggregator = getContext().actorOf( + Props.create(StatsAggregator.class, words.length, replyTo)); + + // send each word to a worker + for (String word : words) { + workerRouter.tell(new ConsistentHashableEnvelope(word, word), + aggregator); + } + } + + } else { + unhandled(message); + } + } +} +//#service + +//not used, only for documentation +abstract class StatsService2 extends UntypedActor { + //#router-lookup-in-code + int totalInstances = 100; + Iterable routeesPaths = Collections + .singletonList("/user/statsWorker"); + boolean allowLocalRoutees = true; + String useRole = "compute"; + ActorRef workerRouter = getContext().actorOf( + new ClusterRouterGroup(new ConsistentHashingGroup(routeesPaths), + new ClusterRouterGroupSettings(totalInstances, routeesPaths, + allowLocalRoutees, useRole)).props(), "workerRouter2"); + //#router-lookup-in-code +} + +//not used, only for documentation +abstract class StatsService3 extends UntypedActor { + //#router-deploy-in-code + int totalInstances = 100; + int maxInstancesPerNode = 3; + boolean allowLocalRoutees = false; + String useRole = "compute"; + ActorRef workerRouter = getContext().actorOf( + new ClusterRouterPool(new ConsistentHashingPool(0), + new ClusterRouterPoolSettings(totalInstances, maxInstancesPerNode, + allowLocalRoutees, useRole)).props(Props + .create(StatsWorker.class)), "workerRouter3"); + //#router-deploy-in-code +} + diff --git a/akka-docs/rst/java/code/docs/cluster/StatsWorker.java b/akka-docs/rst/java/code/docs/cluster/StatsWorker.java new file mode 100644 index 0000000000..9489fd0848 --- /dev/null +++ b/akka-docs/rst/java/code/docs/cluster/StatsWorker.java @@ -0,0 +1,30 @@ +package docs.cluster; + +import java.util.HashMap; +import java.util.Map; + +import akka.actor.UntypedActor; + +//#worker +public class StatsWorker extends UntypedActor { + + Map cache = new HashMap(); + + @Override + public void onReceive(Object message) { + if (message instanceof String) { + String word = (String) message; + Integer length = cache.get(word); + if (length == null) { + length = word.length(); + cache.put(word, length); + } + getSender().tell(length, getSelf()); + + } else { + unhandled(message); + } + } + +} +//#worker \ No newline at end of file diff --git a/akka-docs/rst/java/code/docs/cluster/TransformationBackend.java b/akka-docs/rst/java/code/docs/cluster/TransformationBackend.java new file mode 100644 index 0000000000..afe677b56e --- /dev/null +++ b/akka-docs/rst/java/code/docs/cluster/TransformationBackend.java @@ -0,0 +1,60 @@ +package docs.cluster; + +import static docs.cluster.TransformationMessages.BACKEND_REGISTRATION; +import docs.cluster.TransformationMessages.TransformationJob; +import docs.cluster.TransformationMessages.TransformationResult; +import akka.actor.UntypedActor; +import akka.cluster.Cluster; +import akka.cluster.ClusterEvent.CurrentClusterState; +import akka.cluster.ClusterEvent.MemberUp; +import akka.cluster.Member; +import akka.cluster.MemberStatus; + +//#backend +public class TransformationBackend extends UntypedActor { + + Cluster cluster = Cluster.get(getContext().system()); + + //subscribe to cluster changes, MemberUp + @Override + public void preStart() { + cluster.subscribe(getSelf(), MemberUp.class); + } + + //re-subscribe when restart + @Override + public void postStop() { + cluster.unsubscribe(getSelf()); + } + + @Override + public void onReceive(Object message) { + if (message instanceof TransformationJob) { + TransformationJob job = (TransformationJob) message; + getSender().tell(new TransformationResult(job.getText().toUpperCase()), + getSelf()); + + } else if (message instanceof CurrentClusterState) { + CurrentClusterState state = (CurrentClusterState) message; + for (Member member : state.getMembers()) { + if (member.status().equals(MemberStatus.up())) { + register(member); + } + } + + } else if (message instanceof MemberUp) { + MemberUp mUp = (MemberUp) message; + register(mUp.member()); + + } else { + unhandled(message); + } + } + + void register(Member member) { + if (member.hasRole("frontend")) + getContext().actorSelection(member.address() + "/user/frontend").tell( + BACKEND_REGISTRATION, getSelf()); + } +} +//#backend diff --git a/akka-docs/rst/java/code/docs/cluster/TransformationFrontend.java b/akka-docs/rst/java/code/docs/cluster/TransformationFrontend.java new file mode 100644 index 0000000000..d06d92ca5e --- /dev/null +++ b/akka-docs/rst/java/code/docs/cluster/TransformationFrontend.java @@ -0,0 +1,48 @@ +package docs.cluster; + +import static docs.cluster.TransformationMessages.BACKEND_REGISTRATION; + +import java.util.ArrayList; +import java.util.List; + +import docs.cluster.TransformationMessages.JobFailed; +import docs.cluster.TransformationMessages.TransformationJob; +import akka.actor.ActorRef; +import akka.actor.Terminated; +import akka.actor.UntypedActor; + +//#frontend +public class TransformationFrontend extends UntypedActor { + + List backends = new ArrayList(); + int jobCounter = 0; + + @Override + public void onReceive(Object message) { + if ((message instanceof TransformationJob) && backends.isEmpty()) { + TransformationJob job = (TransformationJob) message; + getSender().tell( + new JobFailed("Service unavailable, try again later", job), + getSender()); + + } else if (message instanceof TransformationJob) { + TransformationJob job = (TransformationJob) message; + jobCounter++; + backends.get(jobCounter % backends.size()) + .forward(job, getContext()); + + } else if (message.equals(BACKEND_REGISTRATION)) { + getContext().watch(getSender()); + backends.add(getSender()); + + } else if (message instanceof Terminated) { + Terminated terminated = (Terminated) message; + backends.remove(terminated.getActor()); + + } else { + unhandled(message); + } + } + +} +//#frontend diff --git a/akka-samples/akka-sample-cluster-java/src/main/java/sample/cluster/transformation/TransformationMessages.java b/akka-docs/rst/java/code/docs/cluster/TransformationMessages.java similarity index 96% rename from akka-samples/akka-sample-cluster-java/src/main/java/sample/cluster/transformation/TransformationMessages.java rename to akka-docs/rst/java/code/docs/cluster/TransformationMessages.java index 1942122002..677aebd48e 100644 --- a/akka-samples/akka-sample-cluster-java/src/main/java/sample/cluster/transformation/TransformationMessages.java +++ b/akka-docs/rst/java/code/docs/cluster/TransformationMessages.java @@ -1,4 +1,4 @@ -package sample.cluster.transformation; +package docs.cluster; import java.io.Serializable; diff --git a/akka-samples/akka-sample-distributed-data-java/src/main/java/sample/distributeddata/ShoppingCart.java b/akka-docs/rst/java/code/docs/ddata/ShoppingCart.java similarity index 99% rename from akka-samples/akka-sample-distributed-data-java/src/main/java/sample/distributeddata/ShoppingCart.java rename to akka-docs/rst/java/code/docs/ddata/ShoppingCart.java index 6d469a8cce..8e48ba387d 100644 --- a/akka-samples/akka-sample-distributed-data-java/src/main/java/sample/distributeddata/ShoppingCart.java +++ b/akka-docs/rst/java/code/docs/ddata/ShoppingCart.java @@ -1,4 +1,4 @@ -package sample.distributeddata; +package docs.ddata; import static java.util.concurrent.TimeUnit.SECONDS; import java.io.Serializable; diff --git a/akka-samples/akka-sample-persistence-java-lambda/src/main/java/sample/persistence/PersistentActorExample.java b/akka-docs/rst/java/code/docs/persistence/PersistentActorExample.java similarity index 99% rename from akka-samples/akka-sample-persistence-java-lambda/src/main/java/sample/persistence/PersistentActorExample.java rename to akka-docs/rst/java/code/docs/persistence/PersistentActorExample.java index 493e73d549..b1dc915c81 100644 --- a/akka-samples/akka-sample-persistence-java-lambda/src/main/java/sample/persistence/PersistentActorExample.java +++ b/akka-docs/rst/java/code/docs/persistence/PersistentActorExample.java @@ -2,7 +2,7 @@ * Copyright (C) 2009-2017 Lightbend Inc. */ -package sample.persistence; +package docs.persistence; //#persistent-actor-example diff --git a/akka-docs/rst/java/distributed-data.rst b/akka-docs/rst/java/distributed-data.rst index 0a4038b9e2..724f3b7dec 100644 --- a/akka-docs/rst/java/distributed-data.rst +++ b/akka-docs/rst/java/distributed-data.rst @@ -193,11 +193,11 @@ the total size of the cluster. Here is an example of using ``writeMajority`` and ``readMajority``: -.. includecode:: ../../../akka-samples/akka-sample-distributed-data-java/src/main/java/sample/distributeddata/ShoppingCart.java#read-write-majority +.. includecode:: ../../../akka-docs/rst/java/code/docs/ddata/ShoppingCart.java#read-write-majority -.. includecode:: ../../../akka-samples/akka-sample-distributed-data-java/src/main/java/sample/distributeddata/ShoppingCart.java#get-cart +.. includecode:: ../../../akka-docs/rst/java/code/docs/ddata/ShoppingCart.java#get-cart -.. includecode:: ../../../akka-samples/akka-sample-distributed-data-java/src/main/java/sample/distributeddata/ShoppingCart.java#add-item +.. includecode:: ../../../akka-docs/rst/java/code/docs/ddata/ShoppingCart.java#add-item In some rare cases, when performing an ``Update`` it is needed to first try to fetch latest data from other nodes. That can be done by first sending a ``Get`` with ``ReadMajority`` and then continue with @@ -209,7 +209,7 @@ performed (hence the name observed-removed set). The following example illustrates how to do that: -.. includecode:: ../../../akka-samples/akka-sample-distributed-data-java/src/main/java/sample/distributeddata/ShoppingCart.java#remove-item +.. includecode:: ../../../akka-docs/rst/java/code/docs/ddata/ShoppingCart.java#remove-item .. warning:: diff --git a/akka-docs/rst/java/persistence.rst b/akka-docs/rst/java/persistence.rst index 0cae804d8f..b19b241d19 100644 --- a/akka-docs/rst/java/persistence.rst +++ b/akka-docs/rst/java/persistence.rst @@ -87,7 +87,7 @@ Akka persistence supports event sourcing with the ``AbstractPersistentActor`` ab class uses the ``persist`` method to persist and handle events. The behavior of an ``AbstractPersistentActor`` is defined by implementing ``createReceiveRecover`` and ``createReceive``. This is demonstrated in the following example. -.. includecode:: ../../../akka-samples/akka-sample-persistence-java-lambda/src/main/java/sample/persistence/PersistentActorExample.java#persistent-actor-example +.. includecode:: ../../../akka-docs/rst/java/code/docs/persistence/PersistentActorExample.java#persistent-actor-example The example defines two data types, ``Cmd`` and ``Evt`` to represent commands and events, respectively. The ``state`` of the ``ExamplePersistentActor`` is a list of persisted event data contained in ``ExampleState``. diff --git a/akka-docs/rst/scala/cluster-metrics.rst b/akka-docs/rst/scala/cluster-metrics.rst index 9d6086b59a..5464f0f69b 100644 --- a/akka-docs/rst/scala/cluster-metrics.rst +++ b/akka-docs/rst/scala/cluster-metrics.rst @@ -118,25 +118,44 @@ Let's take a look at this router in action. What can be more demanding than calc The backend worker that performs the factorial calculation: -.. includecode:: ../../../akka-samples/akka-sample-cluster-scala/src/main/scala/sample/cluster/factorial/FactorialBackend.scala#backend +.. includecode:: code/docs/cluster/FactorialBackend.scala#backend The frontend that receives user jobs and delegates to the backends via the router: -.. includecode:: ../../../akka-samples/akka-sample-cluster-scala/src/main/scala/sample/cluster/factorial/FactorialFrontend.scala#frontend +.. includecode:: code/docs/cluster/FactorialFrontend.scala#frontend As you can see, the router is defined in the same way as other routers, and in this case it is configured as follows: -.. includecode:: ../../../akka-samples/akka-sample-cluster-scala/src/main/resources/factorial.conf#adaptive-router +:: + + akka.actor.deployment { + /factorialFrontend/factorialBackendRouter = { + # Router type provided by metrics extension. + router = cluster-metrics-adaptive-group + # Router parameter specific for metrics extension. + # metrics-selector = heap + # metrics-selector = load + # metrics-selector = cpu + metrics-selector = mix + # + routees.paths = ["/user/factorialBackend"] + cluster { + enabled = on + use-role = backend + allow-local-routees = off + } + } + } It is only ``router`` type and the ``metrics-selector`` parameter that is specific to this router, other things work in the same way as other routers. The same type of router could also have been defined in code: -.. includecode:: ../../../akka-samples/akka-sample-cluster-scala/src/main/scala/sample/cluster/factorial/Extra.scala#router-lookup-in-code +.. includecode:: code/docs/cluster/FactorialFrontend.scala#router-lookup-in-code -.. includecode:: ../../../akka-samples/akka-sample-cluster-scala/src/main/scala/sample/cluster/factorial/Extra.scala#router-deploy-in-code +.. includecode:: code/docs/cluster/FactorialFrontend.scala#router-deploy-in-code The `Lightbend Activator `_ tutorial named `Akka Cluster Samples with Scala `_. @@ -147,7 +166,7 @@ Subscribe to Metrics Events It is possible to subscribe to the metrics events directly to implement other functionality. -.. includecode:: ../../../akka-samples/akka-sample-cluster-scala/src/main/scala/sample/cluster/factorial/MetricsListener.scala#metrics-listener +.. includecode:: code/docs/cluster/MetricsListener.scala#metrics-listener Custom Metrics Collector ------------------------ diff --git a/akka-docs/rst/scala/cluster-usage.rst b/akka-docs/rst/scala/cluster-usage.rst index bf875926d0..93131361c5 100644 --- a/akka-docs/rst/scala/cluster-usage.rst +++ b/akka-docs/rst/scala/cluster-usage.rst @@ -22,7 +22,41 @@ It joins the cluster and an actor subscribes to cluster membership events and lo The ``application.conf`` configuration looks like this: -.. includecode:: ../../../akka-samples/akka-sample-cluster-scala/src/main/resources/application.conf#snippet +:: + + akka { + actor { + provider = "cluster" + } + remote { + log-remote-lifecycle-events = off + netty.tcp { + hostname = "127.0.0.1" + port = 0 + } + } + + cluster { + seed-nodes = [ + "akka.tcp://ClusterSystem@127.0.0.1:2551", + "akka.tcp://ClusterSystem@127.0.0.1:2552"] + + # auto downing is NOT safe for production deployments. + # you may want to use it during development, read more about it in the docs. + # + # auto-down-unreachable-after = 10s + } + } + + # Disable legacy metrics in akka-cluster. + akka.cluster.metrics.enabled=off + + # Enable metrics extension in akka-cluster-metrics. + akka.extensions=["akka.cluster.metrics.ClusterMetricsExtension"] + + # Sigar native library extract location during tests. + # Note: use per-jvm-instance folder when running multiple jvm on one host. + akka.cluster.metrics.native-library-extract-folder=${user.dir}/target/native To enable cluster capabilities in your Akka project you should, at a minimum, add the :ref:`remoting-scala` settings, but with ``cluster``. @@ -39,7 +73,7 @@ ip-addresses or host names of the machines in ``application.conf`` instead of `` An actor that uses the cluster extension may look like this: -.. literalinclude:: ../../../akka-samples/akka-sample-cluster-scala/src/main/scala/sample/cluster/simple/SimpleClusterListener.scala +.. literalinclude:: code/docs/cluster/SimpleClusterListener.scala :language: scala The actor registers itself as subscriber of certain cluster events. It receives events corresponding to the current state @@ -224,7 +258,7 @@ Subscribe to Cluster Events You can subscribe to change notifications of the cluster membership by using ``Cluster(system).subscribe``. -.. includecode:: ../../../akka-samples/akka-sample-cluster-scala/src/main/scala/sample/cluster/simple/SimpleClusterListener2.scala#subscribe +.. includecode:: code/docs/cluster/SimpleClusterListener2.scala A snapshot of the full state, ``akka.cluster.ClusterEvent.CurrentClusterState``, is sent to the subscriber as the first message, followed by events for incremental updates. @@ -241,7 +275,7 @@ the events corresponding to the current state to mimic what you would have seen listening to the events when they occurred in the past. Note that those initial events only correspond to the current state and it is not the full history of all changes that actually has occurred in the cluster. -.. includecode:: ../../../akka-samples/akka-sample-cluster-scala/src/main/scala/sample/cluster/simple/SimpleClusterListener.scala#subscribe +.. includecode:: code/docs/cluster/SimpleClusterListener.scala#subscribe The events to track the life-cycle of members are: @@ -277,11 +311,11 @@ added or removed to the cluster dynamically. Messages: -.. includecode:: ../../../akka-samples/akka-sample-cluster-scala/src/main/scala/sample/cluster/transformation/TransformationMessages.scala#messages +.. includecode:: code/docs/cluster/TransformationMessages.scala#messages The backend worker that performs the transformation job: -.. includecode:: ../../../akka-samples/akka-sample-cluster-scala/src/main/scala/sample/cluster/transformation/TransformationBackend.scala#backend +.. includecode:: code/docs/cluster/TransformationBackend.scala#backend Note that the ``TransformationBackend`` actor subscribes to cluster events to detect new, potential, frontend nodes, and send them a registration message so that they know @@ -289,7 +323,7 @@ that they can use the backend worker. The frontend that receives user jobs and delegates to one of the registered backend workers: -.. includecode:: ../../../akka-samples/akka-sample-cluster-scala/src/main/scala/sample/cluster/transformation/TransformationFrontend.scala#frontend +.. includecode:: code/docs/cluster/TransformationFrontend.scala#frontend Note that the ``TransformationFrontend`` actor watch the registered backend to be able to remove it from its list of available backend workers. @@ -323,20 +357,23 @@ A common use case is to start actors after the cluster has been initialized, members have joined, and the cluster has reached a certain size. With a configuration option you can define required number of members -before the leader changes member status of 'Joining' members to 'Up'. +before the leader changes member status of 'Joining' members to 'Up'.:: -.. includecode:: ../../../akka-samples/akka-sample-cluster-scala/src/main/resources/factorial.conf#min-nr-of-members + akka.cluster.min-nr-of-members = 3 In a similar way you can define required number of members of a certain role -before the leader changes member status of 'Joining' members to 'Up'. +before the leader changes member status of 'Joining' members to 'Up'.:: -.. includecode:: ../../../akka-samples/akka-sample-cluster-scala/src/main/resources/factorial.conf#role-min-nr-of-members + akka.cluster.role { + frontend.min-nr-of-members = 1 + backend.min-nr-of-members = 2 + } You can start the actors in a ``registerOnMemberUp`` callback, which will be invoked when the current member status is changed to 'Up', i.e. the cluster has at least the defined number of members. -.. includecode:: ../../../akka-samples/akka-sample-cluster-scala/src/main/scala/sample/cluster/factorial/FactorialFrontend.scala#registerOnUp +.. includecode:: code/docs/cluster/FactorialFrontend.scala#registerOnUp This callback can be used for other things than starting actors. @@ -509,9 +546,19 @@ Router with Group of Routees ---------------------------- When using a ``Group`` you must start the routee actors on the cluster member nodes. -That is not done by the router. The configuration for a group looks like this: +That is not done by the router. The configuration for a group looks like this::: -.. includecode:: ../../../akka-samples/akka-sample-cluster-scala/src/multi-jvm/scala/sample/cluster/stats/StatsSampleSpec.scala#router-lookup-config + akka.actor.deployment { + /statsService/workerRouter { + router = consistent-hashing-group + routees.paths = ["/user/statsWorker"] + cluster { + enabled = on + allow-local-routees = on + use-role = compute + } + } + } .. note:: The routee actors should be started as early as possible when starting the actor system, because @@ -528,7 +575,7 @@ Set it to a lower value if you want to limit total number of routees. The same type of router could also have been defined in code: -.. includecode:: ../../../akka-samples/akka-sample-cluster-scala/src/main/scala/sample/cluster/stats/Extra.scala#router-lookup-in-code +.. includecode:: ../../../akka-cluster-metrics/src/multi-jvm/scala/akka/cluster/metrics/sample/StatsService.scala#router-lookup-in-code See :ref:`cluster_configuration_scala` section for further descriptions of the settings. @@ -546,23 +593,33 @@ the average number of characters per word when all results have been collected. Messages: -.. includecode:: ../../../akka-samples/akka-sample-cluster-scala/src/main/scala/sample/cluster/stats/StatsMessages.scala#messages +.. includecode:: ../../../akka-cluster-metrics/src/multi-jvm/scala/akka/cluster/metrics/sample/StatsMessages.scala#messages The worker that counts number of characters in each word: -.. includecode:: ../../../akka-samples/akka-sample-cluster-scala/src/main/scala/sample/cluster/stats/StatsWorker.scala#worker +.. includecode:: ../../../akka-cluster-metrics/src/multi-jvm/scala/akka/cluster/metrics/sample/StatsWorker.scala#worker The service that receives text from users and splits it up into words, delegates to workers and aggregates: -.. includecode:: ../../../akka-samples/akka-sample-cluster-scala/src/main/scala/sample/cluster/stats/StatsService.scala#service +.. includecode:: ../../../akka-cluster-metrics/src/multi-jvm/scala/akka/cluster/metrics/sample/StatsService.scala#service Note, nothing cluster specific so far, just plain actors. All nodes start ``StatsService`` and ``StatsWorker`` actors. Remember, routees are the workers in this case. -The router is configured with ``routees.paths``: +The router is configured with ``routees.paths``::: -.. includecode:: ../../../akka-samples/akka-sample-cluster-scala/src/main/resources/stats1.conf#config-router-lookup + akka.actor.deployment { + /statsService/workerRouter { + router = consistent-hashing-group + routees.paths = ["/user/statsWorker"] + cluster { + enabled = on + allow-local-routees = on + use-role = compute + } + } + } This means that user requests can be sent to ``StatsService`` on any node and it will use ``StatsWorker`` on all nodes. @@ -575,9 +632,19 @@ Router with Pool of Remote Deployed Routees ------------------------------------------- When using a ``Pool`` with routees created and deployed on the cluster member nodes -the configuration for a router looks like this: +the configuration for a router looks like this::: -.. includecode:: ../../../akka-samples/akka-sample-cluster-scala/src/multi-jvm/scala/sample/cluster/stats/StatsSampleSingleMasterSpec.scala#router-deploy-config + akka.actor.deployment { + /statsService/singleton/workerRouter { + router = consistent-hashing-pool + cluster { + enabled = on + max-nr-of-instances-per-node = 3 + allow-local-routees = on + use-role = compute + } + } + } It is possible to limit the deployment of routees to member nodes tagged with a certain role by specifying ``use-role``. @@ -589,7 +656,7 @@ Set it to a lower value if you want to limit total number of routees. The same type of router could also have been defined in code: -.. includecode:: ../../../akka-samples/akka-sample-cluster-scala/src/main/scala/sample/cluster/stats/Extra.scala#router-deploy-in-code +.. includecode:: ../../../akka-cluster-metrics/src/multi-jvm/scala/akka/cluster/metrics/sample/StatsService.scala#router-deploy-in-code See :ref:`cluster_configuration_scala` section for further descriptions of the settings. @@ -598,21 +665,40 @@ Router Example with Pool of Remote Deployed Routees Let's take a look at how to use a cluster aware router on single master node that creates and deploys workers. To keep track of a single master we use the :ref:`cluster-singleton-scala` -in the cluster-tools module. The ``ClusterSingletonManager`` is started on each node. +in the cluster-tools module. The ``ClusterSingletonManager`` is started on each node.:: -.. includecode:: ../../../akka-samples/akka-sample-cluster-scala/src/main/scala/sample/cluster/stats/StatsSampleOneMaster.scala#create-singleton-manager + system.actorOf( + ClusterSingletonManager.props( + singletonProps = Props[StatsService], + terminationMessage = PoisonPill, + settings = ClusterSingletonManagerSettings(system).withRole("compute")), + name = "statsService") We also need an actor on each node that keeps track of where current single master exists and -delegates jobs to the ``StatsService``. That is provided by the ``ClusterSingletonProxy``. +delegates jobs to the ``StatsService``. That is provided by the ``ClusterSingletonProxy``.:: -.. includecode:: ../../../akka-samples/akka-sample-cluster-scala/src/main/scala/sample/cluster/stats/StatsSampleOneMaster.scala#singleton-proxy + system.actorOf( + ClusterSingletonProxy.props( + singletonManagerPath = "/user/statsService", + settings = ClusterSingletonProxySettings(system).withRole("compute")), + name = "statsServiceProxy") The ``ClusterSingletonProxy`` receives text from users and delegates to the current ``StatsService``, the single master. It listens to cluster events to lookup the ``StatsService`` on the oldest node. -All nodes start ``ClusterSingletonProxy`` and the ``ClusterSingletonManager``. The router is now configured like this: +All nodes start ``ClusterSingletonProxy`` and the ``ClusterSingletonManager``. The router is now configured like this::: -.. includecode:: ../../../akka-samples/akka-sample-cluster-scala/src/main/resources/stats2.conf#config-router-deploy + akka.actor.deployment { + /statsService/singleton/workerRouter { + router = consistent-hashing-pool + cluster { + enabled = on + max-nr-of-instances-per-node = 3 + allow-local-routees = on + use-role = compute + } + } + } The `Lightbend Activator `_ tutorial named `Akka Cluster Samples with Scala `_. @@ -636,14 +722,14 @@ add the ``sbt-multi-jvm`` plugin and the dependency to ``akka-multi-node-testkit First, as described in :ref:`multi-node-testing`, we need some scaffolding to configure the ``MultiNodeSpec``. Define the participating roles and their :ref:`cluster_configuration_scala` in an object extending ``MultiNodeConfig``: -.. includecode:: ../../../akka-samples/akka-sample-cluster-scala/src/multi-jvm/scala/sample/cluster/stats/StatsSampleSpec.scala +.. includecode:: ../../../akka-cluster-metrics/src/multi-jvm/scala/akka/cluster/metrics/sample/StatsSampleSpec.scala :include: MultiNodeConfig :exclude: router-lookup-config Define one concrete test class for each role/node. These will be instantiated on the different nodes (JVMs). They can be implemented differently, but often they are the same and extend an abstract test class, as illustrated here. -.. includecode:: ../../../akka-samples/akka-sample-cluster-scala/src/multi-jvm/scala/sample/cluster/stats/StatsSampleSpec.scala#concrete-tests +.. includecode:: ../../../akka-cluster-metrics/src/multi-jvm/scala/akka/cluster/metrics/sample/StatsSampleSpec.scala#concrete-tests Note the naming convention of these classes. The name of the classes must end with ``MultiJvmNode1``, ``MultiJvmNode2`` and so on. It is possible to define another suffix to be used by the ``sbt-multi-jvm``, but the default should be @@ -651,18 +737,18 @@ fine in most cases. Then the abstract ``MultiNodeSpec``, which takes the ``MultiNodeConfig`` as constructor parameter. -.. includecode:: ../../../akka-samples/akka-sample-cluster-scala/src/multi-jvm/scala/sample/cluster/stats/StatsSampleSpec.scala#abstract-test +.. includecode:: ../../../akka-cluster-metrics/src/multi-jvm/scala/akka/cluster/metrics/sample/StatsSampleSpec.scala#abstract-test Most of this can of course be extracted to a separate trait to avoid repeating this in all your tests. Typically you begin your test by starting up the cluster and let the members join, and create some actors. That can be done like this: -.. includecode:: ../../../akka-samples/akka-sample-cluster-scala/src/multi-jvm/scala/sample/cluster/stats/StatsSampleSpec.scala#startup-cluster +.. includecode:: ../../../akka-cluster-metrics/src/multi-jvm/scala/akka/cluster/metrics/sample/StatsSampleSpec.scala#startup-cluster From the test you interact with the cluster using the ``Cluster`` extension, e.g. ``join``. -.. includecode:: ../../../akka-samples/akka-sample-cluster-scala/src/multi-jvm/scala/sample/cluster/stats/StatsSampleSpec.scala#join +.. includecode:: ../../../akka-cluster-metrics/src/multi-jvm/scala/akka/cluster/metrics/sample/StatsSampleSpec.scala#join Notice how the `testActor` from :ref:`testkit ` is added as :ref:`subscriber ` to cluster changes and then waiting for certain events, such as in this case all members becoming 'Up'. @@ -670,7 +756,7 @@ to cluster changes and then waiting for certain events, such as in this case all The above code was running for all roles (JVMs). ``runOn`` is a convenient utility to declare that a certain block of code should only run for a specific role. -.. includecode:: ../../../akka-samples/akka-sample-cluster-scala/src/multi-jvm/scala/sample/cluster/stats/StatsSampleSpec.scala#test-statsService +.. includecode:: ../../../akka-cluster-metrics/src/multi-jvm/scala/akka/cluster/metrics/sample/StatsSampleSpec.scala#test-statsService Once again we take advantage of the facilities in :ref:`testkit ` to verify expected behavior. Here using ``testActor`` as sender (via ``ImplicitSender``) and verifying the reply with ``expectMsgPF``. @@ -678,7 +764,7 @@ Here using ``testActor`` as sender (via ``ImplicitSender``) and verifying the re In the above code you can see ``node(third)``, which is useful facility to get the root actor reference of the actor system for a specific role. This can also be used to grab the ``akka.actor.Address`` of that node. -.. includecode:: ../../../akka-samples/akka-sample-cluster-scala/src/multi-jvm/scala/sample/cluster/stats/StatsSampleSpec.scala#addresses +.. includecode:: ../../../akka-cluster-metrics/src/multi-jvm/scala/akka/cluster/metrics/sample/StatsSampleSpec.scala#addresses Management diff --git a/akka-docs/rst/scala/code/docs/cluster/ClusterDocSpec.scala b/akka-docs/rst/scala/code/docs/cluster/ClusterDocSpec.scala index 3fa90d8b40..c5381cb149 100644 --- a/akka-docs/rst/scala/code/docs/cluster/ClusterDocSpec.scala +++ b/akka-docs/rst/scala/code/docs/cluster/ClusterDocSpec.scala @@ -1,7 +1,7 @@ /** * Copyright (C) 2015-2017 Lightbend Inc. */ -package docs.cluster +package scala.docs.cluster import akka.cluster.Cluster import akka.testkit.AkkaSpec diff --git a/akka-samples/akka-sample-cluster-scala/src/main/scala/sample/cluster/factorial/FactorialBackend.scala b/akka-docs/rst/scala/code/docs/cluster/FactorialBackend.scala similarity index 97% rename from akka-samples/akka-sample-cluster-scala/src/main/scala/sample/cluster/factorial/FactorialBackend.scala rename to akka-docs/rst/scala/code/docs/cluster/FactorialBackend.scala index 11ee8c2074..3acfbdec03 100644 --- a/akka-samples/akka-sample-cluster-scala/src/main/scala/sample/cluster/factorial/FactorialBackend.scala +++ b/akka-docs/rst/scala/code/docs/cluster/FactorialBackend.scala @@ -1,4 +1,4 @@ -package sample.cluster.factorial +package scala.docs.cluster import scala.annotation.tailrec import scala.concurrent.Future diff --git a/akka-docs/rst/scala/code/docs/cluster/FactorialFrontend.scala b/akka-docs/rst/scala/code/docs/cluster/FactorialFrontend.scala new file mode 100644 index 0000000000..8e8bc6d60e --- /dev/null +++ b/akka-docs/rst/scala/code/docs/cluster/FactorialFrontend.scala @@ -0,0 +1,101 @@ +package scala.docs.cluster + +import scala.concurrent.duration._ +import com.typesafe.config.ConfigFactory +import akka.actor.Actor +import akka.actor.ActorLogging +import akka.actor.ActorSystem +import akka.actor.Props +import akka.cluster.Cluster +import akka.routing.FromConfig +import akka.actor.ReceiveTimeout +import scala.util.Try +import scala.concurrent.Await + +//#frontend +class FactorialFrontend(upToN: Int, repeat: Boolean) extends Actor with ActorLogging { + + val backend = context.actorOf( + FromConfig.props(), + name = "factorialBackendRouter") + + override def preStart(): Unit = { + sendJobs() + if (repeat) { + context.setReceiveTimeout(10.seconds) + } + } + + def receive = { + case (n: Int, factorial: BigInt) => + if (n == upToN) { + log.debug("{}! = {}", n, factorial) + if (repeat) sendJobs() + else context.stop(self) + } + case ReceiveTimeout => + log.info("Timeout") + sendJobs() + } + + def sendJobs(): Unit = { + log.info("Starting batch of factorials up to [{}]", upToN) + 1 to upToN foreach { backend ! _ } + } +} +//#frontend + +object FactorialFrontend { + def main(args: Array[String]): Unit = { + val upToN = 200 + + val config = ConfigFactory.parseString("akka.cluster.roles = [frontend]"). + withFallback(ConfigFactory.load("factorial")) + + val system = ActorSystem("ClusterSystem", config) + system.log.info("Factorials will start when 2 backend members in the cluster.") + //#registerOnUp + Cluster(system) registerOnMemberUp { + system.actorOf( + Props(classOf[FactorialFrontend], upToN, true), + name = "factorialFrontend") + } + //#registerOnUp + + } +} + +// not used, only for documentation +abstract class FactorialFrontend2 extends Actor { + //#router-lookup-in-code + import akka.cluster.routing.ClusterRouterGroup + import akka.cluster.routing.ClusterRouterGroupSettings + import akka.cluster.metrics.AdaptiveLoadBalancingGroup + import akka.cluster.metrics.HeapMetricsSelector + + val backend = context.actorOf( + ClusterRouterGroup( + AdaptiveLoadBalancingGroup(HeapMetricsSelector), + ClusterRouterGroupSettings( + totalInstances = 100, routeesPaths = List("/user/factorialBackend"), + allowLocalRoutees = true, useRole = Some("backend"))).props(), + name = "factorialBackendRouter2") + //#router-lookup-in-code +} + +// not used, only for documentation +abstract class FactorialFrontend3 extends Actor { + //#router-deploy-in-code + import akka.cluster.routing.ClusterRouterPool + import akka.cluster.routing.ClusterRouterPoolSettings + import akka.cluster.metrics.AdaptiveLoadBalancingPool + import akka.cluster.metrics.SystemLoadAverageMetricsSelector + + val backend = context.actorOf( + ClusterRouterPool(AdaptiveLoadBalancingPool( + SystemLoadAverageMetricsSelector), ClusterRouterPoolSettings( + totalInstances = 100, maxInstancesPerNode = 3, + allowLocalRoutees = false, useRole = Some("backend"))).props(Props[FactorialBackend]), + name = "factorialBackendRouter3") + //#router-deploy-in-code +} diff --git a/akka-samples/akka-sample-cluster-scala/src/main/scala/sample/cluster/factorial/MetricsListener.scala b/akka-docs/rst/scala/code/docs/cluster/MetricsListener.scala similarity index 97% rename from akka-samples/akka-sample-cluster-scala/src/main/scala/sample/cluster/factorial/MetricsListener.scala rename to akka-docs/rst/scala/code/docs/cluster/MetricsListener.scala index 2183bdb083..6dcaf70625 100644 --- a/akka-samples/akka-sample-cluster-scala/src/main/scala/sample/cluster/factorial/MetricsListener.scala +++ b/akka-docs/rst/scala/code/docs/cluster/MetricsListener.scala @@ -1,4 +1,4 @@ -package sample.cluster.factorial +package scala.docs.cluster //#metrics-listener import akka.actor.ActorLogging @@ -18,7 +18,7 @@ class MetricsListener extends Actor with ActorLogging { // Subscribe unto ClusterMetricsEvent events. override def preStart(): Unit = extension.subscribe(self) - + // Unsubscribe from ClusterMetricsEvent events. override def postStop(): Unit = extension.unsubscribe(self) diff --git a/akka-samples/akka-sample-cluster-scala/src/main/scala/sample/cluster/simple/SimpleClusterListener.scala b/akka-docs/rst/scala/code/docs/cluster/SimpleClusterListener.scala similarity index 84% rename from akka-samples/akka-sample-cluster-scala/src/main/scala/sample/cluster/simple/SimpleClusterListener.scala rename to akka-docs/rst/scala/code/docs/cluster/SimpleClusterListener.scala index 32c229402f..45a07c6b48 100644 --- a/akka-samples/akka-sample-cluster-scala/src/main/scala/sample/cluster/simple/SimpleClusterListener.scala +++ b/akka-docs/rst/scala/code/docs/cluster/SimpleClusterListener.scala @@ -1,4 +1,4 @@ -package sample.cluster.simple +package scala.docs.cluster import akka.cluster.Cluster import akka.cluster.ClusterEvent._ @@ -9,7 +9,7 @@ class SimpleClusterListener extends Actor with ActorLogging { val cluster = Cluster(context.system) - // subscribe to cluster changes, re-subscribe when restart + // subscribe to cluster changes, re-subscribe when restart override def preStart(): Unit = { //#subscribe cluster.subscribe(self, initialStateMode = InitialStateAsEvents, @@ -24,8 +24,9 @@ class SimpleClusterListener extends Actor with ActorLogging { case UnreachableMember(member) => log.info("Member detected as unreachable: {}", member) case MemberRemoved(member, previousStatus) => - log.info("Member is Removed: {} after {}", + log.info( + "Member is Removed: {} after {}", member.address, previousStatus) case _: MemberEvent => // ignore } -} \ No newline at end of file +} diff --git a/akka-samples/akka-sample-cluster-scala/src/main/scala/sample/cluster/simple/SimpleClusterListener2.scala b/akka-docs/rst/scala/code/docs/cluster/SimpleClusterListener2.scala similarity index 85% rename from akka-samples/akka-sample-cluster-scala/src/main/scala/sample/cluster/simple/SimpleClusterListener2.scala rename to akka-docs/rst/scala/code/docs/cluster/SimpleClusterListener2.scala index 13daf8c9e8..7c06cfb66a 100644 --- a/akka-samples/akka-sample-cluster-scala/src/main/scala/sample/cluster/simple/SimpleClusterListener2.scala +++ b/akka-docs/rst/scala/code/docs/cluster/SimpleClusterListener2.scala @@ -1,4 +1,4 @@ -package sample.cluster.simple +package scala.docs.cluster import akka.cluster.Cluster import akka.cluster.ClusterEvent._ @@ -9,7 +9,7 @@ class SimpleClusterListener2 extends Actor with ActorLogging { val cluster = Cluster(context.system) - // subscribe to cluster changes, re-subscribe when restart + // subscribe to cluster changes, re-subscribe when restart override def preStart(): Unit = { //#subscribe cluster.subscribe(self, classOf[MemberEvent], classOf[UnreachableMember]) @@ -25,8 +25,9 @@ class SimpleClusterListener2 extends Actor with ActorLogging { case UnreachableMember(member) => log.info("Member detected as unreachable: {}", member) case MemberRemoved(member, previousStatus) => - log.info("Member is Removed: {} after {}", + log.info( + "Member is Removed: {} after {}", member.address, previousStatus) case _: MemberEvent => // ignore } -} \ No newline at end of file +} diff --git a/akka-samples/akka-sample-cluster-scala/src/main/scala/sample/cluster/transformation/TransformationBackend.scala b/akka-docs/rst/scala/code/docs/cluster/TransformationBackend.scala similarity index 97% rename from akka-samples/akka-sample-cluster-scala/src/main/scala/sample/cluster/transformation/TransformationBackend.scala rename to akka-docs/rst/scala/code/docs/cluster/TransformationBackend.scala index 2d164b4bb0..25eceb0332 100644 --- a/akka-samples/akka-sample-cluster-scala/src/main/scala/sample/cluster/transformation/TransformationBackend.scala +++ b/akka-docs/rst/scala/code/docs/cluster/TransformationBackend.scala @@ -1,4 +1,4 @@ -package sample.cluster.transformation +package scala.docs.cluster import language.postfixOps import scala.concurrent.duration._ @@ -49,4 +49,4 @@ object TransformationBackend { val system = ActorSystem("ClusterSystem", config) system.actorOf(Props[TransformationBackend], name = "backend") } -} \ No newline at end of file +} diff --git a/akka-samples/akka-sample-cluster-scala/src/main/scala/sample/cluster/transformation/TransformationFrontend.scala b/akka-docs/rst/scala/code/docs/cluster/TransformationFrontend.scala similarity index 97% rename from akka-samples/akka-sample-cluster-scala/src/main/scala/sample/cluster/transformation/TransformationFrontend.scala rename to akka-docs/rst/scala/code/docs/cluster/TransformationFrontend.scala index deb34d5158..da2744be73 100644 --- a/akka-samples/akka-sample-cluster-scala/src/main/scala/sample/cluster/transformation/TransformationFrontend.scala +++ b/akka-docs/rst/scala/code/docs/cluster/TransformationFrontend.scala @@ -1,4 +1,4 @@ -package sample.cluster.transformation +package scala.docs.cluster import language.postfixOps import scala.concurrent.duration._ @@ -57,4 +57,4 @@ object TransformationFrontend { } } -} \ No newline at end of file +} diff --git a/akka-samples/akka-sample-cluster-scala/src/main/scala/sample/cluster/transformation/TransformationMessages.scala b/akka-docs/rst/scala/code/docs/cluster/TransformationMessages.scala similarity index 80% rename from akka-samples/akka-sample-cluster-scala/src/main/scala/sample/cluster/transformation/TransformationMessages.scala rename to akka-docs/rst/scala/code/docs/cluster/TransformationMessages.scala index 2ebf20f7a3..591d5d9862 100644 --- a/akka-samples/akka-sample-cluster-scala/src/main/scala/sample/cluster/transformation/TransformationMessages.scala +++ b/akka-docs/rst/scala/code/docs/cluster/TransformationMessages.scala @@ -1,8 +1,8 @@ -package sample.cluster.transformation +package scala.docs.cluster //#messages final case class TransformationJob(text: String) final case class TransformationResult(text: String) final case class JobFailed(reason: String, job: TransformationJob) case object BackendRegistration -//#messages +//#messages \ No newline at end of file diff --git a/akka-samples/akka-sample-distributed-data-scala/src/main/scala/sample/distributeddata/ShoppingCart.scala b/akka-docs/rst/scala/code/docs/ddata/ShoppingCart.scala similarity index 99% rename from akka-samples/akka-sample-distributed-data-scala/src/main/scala/sample/distributeddata/ShoppingCart.scala rename to akka-docs/rst/scala/code/docs/ddata/ShoppingCart.scala index 320bf24400..ecfcec750e 100644 --- a/akka-samples/akka-sample-distributed-data-scala/src/main/scala/sample/distributeddata/ShoppingCart.scala +++ b/akka-docs/rst/scala/code/docs/ddata/ShoppingCart.scala @@ -1,4 +1,4 @@ -package sample.distributeddata +package scala.docs.ddata import scala.concurrent.duration._ import akka.actor.Actor diff --git a/akka-samples/akka-sample-persistence-scala/src/main/scala/sample/persistence/PersistentActorExample.scala b/akka-docs/rst/scala/code/docs/persistence/PersistentActorExample.scala similarity index 98% rename from akka-samples/akka-sample-persistence-scala/src/main/scala/sample/persistence/PersistentActorExample.scala rename to akka-docs/rst/scala/code/docs/persistence/PersistentActorExample.scala index 12e170b0a0..19635bfd7c 100644 --- a/akka-samples/akka-sample-persistence-scala/src/main/scala/sample/persistence/PersistentActorExample.scala +++ b/akka-docs/rst/scala/code/docs/persistence/PersistentActorExample.scala @@ -1,4 +1,4 @@ -package sample.persistence +package scala.docs.persistence //#persistent-actor-example import akka.actor._ diff --git a/akka-docs/rst/scala/distributed-data.rst b/akka-docs/rst/scala/distributed-data.rst index 00758214c0..fc462300af 100644 --- a/akka-docs/rst/scala/distributed-data.rst +++ b/akka-docs/rst/scala/distributed-data.rst @@ -202,11 +202,11 @@ the total size of the cluster. Here is an example of using ``WriteMajority`` and ``ReadMajority``: -.. includecode:: ../../../akka-samples/akka-sample-distributed-data-scala/src/main/scala/sample/distributeddata/ShoppingCart.scala#read-write-majority +.. includecode:: code/docs/ddata/ShoppingCart.scala#read-write-majority -.. includecode:: ../../../akka-samples/akka-sample-distributed-data-scala/src/main/scala/sample/distributeddata/ShoppingCart.scala#get-cart +.. includecode:: code/docs/ddata/ShoppingCart.scala#get-cart -.. includecode:: ../../../akka-samples/akka-sample-distributed-data-scala/src/main/scala/sample/distributeddata/ShoppingCart.scala#add-item +.. includecode:: code/docs/ddata/ShoppingCart.scala#add-item In some rare cases, when performing an ``Update`` it is needed to first try to fetch latest data from other nodes. That can be done by first sending a ``Get`` with ``ReadMajority`` and then continue with @@ -218,7 +218,7 @@ performed (hence the name observed-removed set). The following example illustrates how to do that: -.. includecode:: ../../../akka-samples/akka-sample-distributed-data-scala/src/main/scala/sample/distributeddata/ShoppingCart.scala#remove-item +.. includecode:: code/docs/ddata/ShoppingCart.scala#remove-item .. warning:: diff --git a/akka-docs/rst/scala/persistence.rst b/akka-docs/rst/scala/persistence.rst index 80b95c015f..f2168a9a22 100644 --- a/akka-docs/rst/scala/persistence.rst +++ b/akka-docs/rst/scala/persistence.rst @@ -81,7 +81,7 @@ Akka persistence supports event sourcing with the ``PersistentActor`` trait. An ``persist`` method to persist and handle events. The behavior of a ``PersistentActor`` is defined by implementing ``receiveRecover`` and ``receiveCommand``. This is demonstrated in the following example. -.. includecode:: ../../../akka-samples/akka-sample-persistence-scala/src/main/scala/sample/persistence/PersistentActorExample.scala#persistent-actor-example +.. includecode:: code/docs/persistence/PersistentActorExample.scala#persistent-actor-example The example defines two data types, ``Cmd`` and ``Evt`` to represent commands and events, respectively. The ``state`` of the ``ExamplePersistentActor`` is a list of persisted event data contained in ``ExampleState``. diff --git a/akka-samples/akka-sample-multi-node-scala/src/multi-jvm/scala/sample/multinode/MultiNodeSample.scala b/akka-remote-tests/src/multi-jvm/scala/akka/remote/sample/MultiNodeSample.scala similarity index 88% rename from akka-samples/akka-sample-multi-node-scala/src/multi-jvm/scala/sample/multinode/MultiNodeSample.scala rename to akka-remote-tests/src/multi-jvm/scala/akka/remote/sample/MultiNodeSample.scala index 66050abe2a..9efa331740 100644 --- a/akka-samples/akka-sample-multi-node-scala/src/multi-jvm/scala/sample/multinode/MultiNodeSample.scala +++ b/akka-remote-tests/src/multi-jvm/scala/akka/remote/sample/MultiNodeSample.scala @@ -1,9 +1,10 @@ //#package -package sample.multinode +package akka.remote.sample + //#package //#config -import akka.remote.testkit.MultiNodeConfig +import akka.remote.testkit.{ MultiNodeConfig, STMultiNodeSpec } object MultiNodeSampleConfig extends MultiNodeConfig { val node1 = role("node1") @@ -12,9 +13,9 @@ object MultiNodeSampleConfig extends MultiNodeConfig { //#config //#spec +import akka.actor.{ Actor, Props } import akka.remote.testkit.MultiNodeSpec import akka.testkit.ImplicitSender -import akka.actor.{ Props, Actor } class MultiNodeSampleSpecMultiJvmNode1 extends MultiNodeSample class MultiNodeSampleSpecMultiJvmNode2 extends MultiNodeSample @@ -22,7 +23,7 @@ class MultiNodeSampleSpecMultiJvmNode2 extends MultiNodeSample object MultiNodeSample { class Ponger extends Actor { def receive = { - case "ping" => sender() ! "pong" + case "ping" ⇒ sender() ! "pong" } } } @@ -30,8 +31,8 @@ object MultiNodeSample { class MultiNodeSample extends MultiNodeSpec(MultiNodeSampleConfig) with STMultiNodeSpec with ImplicitSender { - import MultiNodeSampleConfig._ import MultiNodeSample._ + import MultiNodeSampleConfig._ def initialParticipants = roles.size diff --git a/akka-remote-tests/src/test/scala/akka/remote/testkit/STMultiNodeSpec.scala b/akka-remote-tests/src/test/scala/akka/remote/testkit/STMultiNodeSpec.scala index 31518bb7c3..3031fbaef1 100644 --- a/akka-remote-tests/src/test/scala/akka/remote/testkit/STMultiNodeSpec.scala +++ b/akka-remote-tests/src/test/scala/akka/remote/testkit/STMultiNodeSpec.scala @@ -2,6 +2,7 @@ * Copyright (C) 2009-2017 Lightbend Inc. */ +//#example package akka.remote.testkit import org.scalatest.{ BeforeAndAfterAll, WordSpecLike } @@ -10,10 +11,12 @@ import org.scalatest.Matchers /** * Hooks up MultiNodeSpec with ScalaTest */ -trait STMultiNodeSpec extends MultiNodeSpecCallbacks with WordSpecLike with Matchers with BeforeAndAfterAll { +trait STMultiNodeSpec extends MultiNodeSpecCallbacks + with WordSpecLike with Matchers with BeforeAndAfterAll { override def beforeAll() = multiNodeSpecBeforeAll() override def afterAll() = multiNodeSpecAfterAll() } +//#example diff --git a/akka-samples/README.md b/akka-samples/README.md deleted file mode 100644 index 1059718002..0000000000 --- a/akka-samples/README.md +++ /dev/null @@ -1,8 +0,0 @@ -Use Lightbend Activator to run samples --------------------------------------- - -Use [Lightbend Activator](https://www.lightbend.com/activator/download) to run samples in this akka-samples directory. -Follow the instruction on the Activator download page, and the [Activator documentation](https://www.lightbend.com/activator/docs). -Once activator ui is up, you an find akka-sample-* projects by their names. - - diff --git a/akka-samples/akka-sample-camel-java/.gitignore b/akka-samples/akka-sample-camel-java/.gitignore deleted file mode 100644 index 660c959e44..0000000000 --- a/akka-samples/akka-sample-camel-java/.gitignore +++ /dev/null @@ -1,17 +0,0 @@ -*# -*.iml -*.ipr -*.iws -*.pyc -*.tm.epoch -*.vim -*-shim.sbt -.idea/ -/project/plugins/project -project/boot -target/ -/logs -.cache -.classpath -.project -.settings \ No newline at end of file diff --git a/akka-samples/akka-sample-camel-java/COPYING b/akka-samples/akka-sample-camel-java/COPYING deleted file mode 100644 index 0e259d42c9..0000000000 --- a/akka-samples/akka-sample-camel-java/COPYING +++ /dev/null @@ -1,121 +0,0 @@ -Creative Commons Legal Code - -CC0 1.0 Universal - - CREATIVE COMMONS CORPORATION IS NOT A LAW FIRM AND DOES NOT PROVIDE - LEGAL SERVICES. DISTRIBUTION OF THIS DOCUMENT DOES NOT CREATE AN - ATTORNEY-CLIENT RELATIONSHIP. CREATIVE COMMONS PROVIDES THIS - INFORMATION ON AN "AS-IS" BASIS. CREATIVE COMMONS MAKES NO WARRANTIES - REGARDING THE USE OF THIS DOCUMENT OR THE INFORMATION OR WORKS - PROVIDED HEREUNDER, AND DISCLAIMS LIABILITY FOR DAMAGES RESULTING FROM - THE USE OF THIS DOCUMENT OR THE INFORMATION OR WORKS PROVIDED - HEREUNDER. - -Statement of Purpose - -The laws of most jurisdictions throughout the world automatically confer -exclusive Copyright and Related Rights (defined below) upon the creator -and subsequent owner(s) (each and all, an "owner") of an original work of -authorship and/or a database (each, a "Work"). - -Certain owners wish to permanently relinquish those rights to a Work for -the purpose of contributing to a commons of creative, cultural and -scientific works ("Commons") that the public can reliably and without fear -of later claims of infringement build upon, modify, incorporate in other -works, reuse and redistribute as freely as possible in any form whatsoever -and for any purposes, including without limitation commercial purposes. -These owners may contribute to the Commons to promote the ideal of a free -culture and the further production of creative, cultural and scientific -works, or to gain reputation or greater distribution for their Work in -part through the use and efforts of others. - -For these and/or other purposes and motivations, and without any -expectation of additional consideration or compensation, the person -associating CC0 with a Work (the "Affirmer"), to the extent that he or she -is an owner of Copyright and Related Rights in the Work, voluntarily -elects to apply CC0 to the Work and publicly distribute the Work under its -terms, with knowledge of his or her Copyright and Related Rights in the -Work and the meaning and intended legal effect of CC0 on those rights. - -1. Copyright and Related Rights. A Work made available under CC0 may be -protected by copyright and related or neighboring rights ("Copyright and -Related Rights"). Copyright and Related Rights include, but are not -limited to, the following: - - i. the right to reproduce, adapt, distribute, perform, display, - communicate, and translate a Work; - ii. moral rights retained by the original author(s) and/or performer(s); -iii. publicity and privacy rights pertaining to a person's image or - likeness depicted in a Work; - iv. rights protecting against unfair competition in regards to a Work, - subject to the limitations in paragraph 4(a), below; - v. rights protecting the extraction, dissemination, use and reuse of data - in a Work; - vi. database rights (such as those arising under Directive 96/9/EC of the - European Parliament and of the Council of 11 March 1996 on the legal - protection of databases, and under any national implementation - thereof, including any amended or successor version of such - directive); and -vii. other similar, equivalent or corresponding rights throughout the - world based on applicable law or treaty, and any national - implementations thereof. - -2. Waiver. To the greatest extent permitted by, but not in contravention -of, applicable law, Affirmer hereby overtly, fully, permanently, -irrevocably and unconditionally waives, abandons, and surrenders all of -Affirmer's Copyright and Related Rights and associated claims and causes -of action, whether now known or unknown (including existing as well as -future claims and causes of action), in the Work (i) in all territories -worldwide, (ii) for the maximum duration provided by applicable law or -treaty (including future time extensions), (iii) in any current or future -medium and for any number of copies, and (iv) for any purpose whatsoever, -including without limitation commercial, advertising or promotional -purposes (the "Waiver"). Affirmer makes the Waiver for the benefit of each -member of the public at large and to the detriment of Affirmer's heirs and -successors, fully intending that such Waiver shall not be subject to -revocation, rescission, cancellation, termination, or any other legal or -equitable action to disrupt the quiet enjoyment of the Work by the public -as contemplated by Affirmer's express Statement of Purpose. - -3. Public License Fallback. Should any part of the Waiver for any reason -be judged legally invalid or ineffective under applicable law, then the -Waiver shall be preserved to the maximum extent permitted taking into -account Affirmer's express Statement of Purpose. In addition, to the -extent the Waiver is so judged Affirmer hereby grants to each affected -person a royalty-free, non transferable, non sublicensable, non exclusive, -irrevocable and unconditional license to exercise Affirmer's Copyright and -Related Rights in the Work (i) in all territories worldwide, (ii) for the -maximum duration provided by applicable law or treaty (including future -time extensions), (iii) in any current or future medium and for any number -of copies, and (iv) for any purpose whatsoever, including without -limitation commercial, advertising or promotional purposes (the -"License"). The License shall be deemed effective as of the date CC0 was -applied by Affirmer to the Work. Should any part of the License for any -reason be judged legally invalid or ineffective under applicable law, such -partial invalidity or ineffectiveness shall not invalidate the remainder -of the License, and in such case Affirmer hereby affirms that he or she -will not (i) exercise any of his or her remaining Copyright and Related -Rights in the Work or (ii) assert any associated claims and causes of -action with respect to the Work, in either case contrary to Affirmer's -express Statement of Purpose. - -4. Limitations and Disclaimers. - - a. No trademark or patent rights held by Affirmer are waived, abandoned, - surrendered, licensed or otherwise affected by this document. - b. Affirmer offers the Work as-is and makes no representations or - warranties of any kind concerning the Work, express, implied, - statutory or otherwise, including without limitation warranties of - title, merchantability, fitness for a particular purpose, non - infringement, or the absence of latent or other defects, accuracy, or - the present or absence of errors, whether or not discoverable, all to - the greatest extent permissible under applicable law. - c. Affirmer disclaims responsibility for clearing rights of other persons - that may apply to the Work or any use thereof, including without - limitation any person's Copyright and Related Rights in the Work. - Further, Affirmer disclaims responsibility for obtaining any necessary - consents, permissions or other rights required for any use of the - Work. - d. Affirmer understands and acknowledges that Creative Commons is not a - party to this document and has no duty or obligation with respect to - this CC0 or use of the Work. diff --git a/akka-samples/akka-sample-camel-java/LICENSE b/akka-samples/akka-sample-camel-java/LICENSE deleted file mode 100644 index 2a18b45589..0000000000 --- a/akka-samples/akka-sample-camel-java/LICENSE +++ /dev/null @@ -1,10 +0,0 @@ -Activator Template by Lightbend - -Licensed under Public Domain (CC0) - -To the extent possible under law, the person who associated CC0 with -this Activator Tempate has waived all copyright and related or neighboring -rights to this Activator Template. - -You should have received a copy of the CC0 legalcode along with this -work. If not, see . diff --git a/akka-samples/akka-sample-camel-java/activator.properties b/akka-samples/akka-sample-camel-java/activator.properties deleted file mode 100644 index f66a7eabfd..0000000000 --- a/akka-samples/akka-sample-camel-java/activator.properties +++ /dev/null @@ -1,7 +0,0 @@ -name=akka-sample-camel-java -title=Akka Camel Samples with Java -description=Akka Camel Samples with Java -tags=akka,camel,java,sample -authorName=Akka Team -authorLink=http://akka.io/ -sourceLink=https://github.com/akka/akka diff --git a/akka-samples/akka-sample-camel-java/build.sbt b/akka-samples/akka-sample-camel-java/build.sbt deleted file mode 100644 index 234f29d7d2..0000000000 --- a/akka-samples/akka-sample-camel-java/build.sbt +++ /dev/null @@ -1,15 +0,0 @@ -name := "akka-sample-camel-java" - -version := "2.5-SNAPSHOT" - -scalaVersion := "2.11.8" - -libraryDependencies ++= Seq( - "com.typesafe.akka" %% "akka-camel" % "2.5-SNAPSHOT", - "org.apache.camel" % "camel-jetty" % "2.13.4", - "org.apache.camel" % "camel-quartz" % "2.13.4", - "org.slf4j" % "slf4j-api" % "1.7.16", - "ch.qos.logback" % "logback-classic" % "1.1.3" -) - -licenses := Seq(("CC0", url("http://creativecommons.org/publicdomain/zero/1.0"))) diff --git a/akka-samples/akka-sample-camel-java/project/build.properties b/akka-samples/akka-sample-camel-java/project/build.properties deleted file mode 100644 index 748703f770..0000000000 --- a/akka-samples/akka-sample-camel-java/project/build.properties +++ /dev/null @@ -1 +0,0 @@ -sbt.version=0.13.7 diff --git a/akka-samples/akka-sample-camel-java/src/main/java/sample/camel/http/HttpConsumer.java b/akka-samples/akka-sample-camel-java/src/main/java/sample/camel/http/HttpConsumer.java deleted file mode 100644 index 34e373cecd..0000000000 --- a/akka-samples/akka-sample-camel-java/src/main/java/sample/camel/http/HttpConsumer.java +++ /dev/null @@ -1,21 +0,0 @@ -package sample.camel.http; - -import akka.actor.ActorRef; -import akka.camel.javaapi.UntypedConsumerActor; - -public class HttpConsumer extends UntypedConsumerActor { - - private ActorRef producer; - - public HttpConsumer(ActorRef producer) { - this.producer = producer; - } - - public String getEndpointUri() { - return "jetty:http://0.0.0.0:8875/"; - } - - public void onReceive(Object message) { - producer.forward(message, getContext()); - } -} diff --git a/akka-samples/akka-sample-camel-java/src/main/java/sample/camel/http/HttpProducer.java b/akka-samples/akka-sample-camel-java/src/main/java/sample/camel/http/HttpProducer.java deleted file mode 100644 index 8c37b50c4d..0000000000 --- a/akka-samples/akka-sample-camel-java/src/main/java/sample/camel/http/HttpProducer.java +++ /dev/null @@ -1,43 +0,0 @@ -package sample.camel.http; - -import akka.actor.ActorRef; -import akka.camel.CamelMessage; -import akka.camel.javaapi.UntypedProducerActor; -import org.apache.camel.Exchange; - -import java.util.HashSet; -import java.util.Set; - -public class HttpProducer extends UntypedProducerActor { - private ActorRef transformer; - - public HttpProducer(ActorRef transformer) { - this.transformer = transformer; - } - - public String getEndpointUri() { - // bridgeEndpoint=true makes the producer ignore the Exchange.HTTP_URI header, - // and use the endpoint's URI for request - return "jetty://http://akka.io/?bridgeEndpoint=true"; - } - - // before producing messages to endpoints, producer actors can pre-process - // them by overriding the onTransformOutgoingMessage method - @Override - public Object onTransformOutgoingMessage(Object message) { - if (message instanceof CamelMessage) { - CamelMessage camelMessage = (CamelMessage) message; - Set httpPath = new HashSet(); - httpPath.add(Exchange.HTTP_PATH); - return camelMessage.withHeaders(camelMessage.getHeaders(httpPath)); - } else - return super.onTransformOutgoingMessage(message); - } - - // instead of replying to the initial sender, producer actors can implement custom - // response processing by overriding the onRouteResponse method - @Override - public void onRouteResponse(Object message) { - transformer.forward(message, getContext()); - } -} diff --git a/akka-samples/akka-sample-camel-java/src/main/java/sample/camel/http/HttpSample.java b/akka-samples/akka-sample-camel-java/src/main/java/sample/camel/http/HttpSample.java deleted file mode 100644 index 19c1eef98f..0000000000 --- a/akka-samples/akka-sample-camel-java/src/main/java/sample/camel/http/HttpSample.java +++ /dev/null @@ -1,15 +0,0 @@ -package sample.camel.http; - -import akka.actor.*; - -public class HttpSample { - public static void main(String[] args) { - ActorSystem system = ActorSystem.create("some-system"); - - final ActorRef httpTransformer = system.actorOf(Props.create(HttpTransformer.class)); - - final ActorRef httpProducer = system.actorOf(Props.create(HttpProducer.class, httpTransformer)); - - final ActorRef httpConsumer = system.actorOf(Props.create(HttpConsumer.class, httpProducer)); - } -} diff --git a/akka-samples/akka-sample-camel-java/src/main/java/sample/camel/http/HttpTransformer.java b/akka-samples/akka-sample-camel-java/src/main/java/sample/camel/http/HttpTransformer.java deleted file mode 100644 index 6ae1e2bfcc..0000000000 --- a/akka-samples/akka-sample-camel-java/src/main/java/sample/camel/http/HttpTransformer.java +++ /dev/null @@ -1,25 +0,0 @@ -package sample.camel.http; - -import akka.actor.Status; -import akka.actor.UntypedActor; -import akka.camel.CamelMessage; -import akka.dispatch.Mapper; - -public class HttpTransformer extends UntypedActor { - public void onReceive(Object message) { - if (message instanceof CamelMessage) { - CamelMessage camelMessage = (CamelMessage) message; - CamelMessage replacedMessage = camelMessage.mapBody(new Mapper() { - @Override - public String apply(Object body) { - String text = new String((byte[]) body); - return text.replaceAll("Akka ", "AKKA "); - } - }); - getSender().tell(replacedMessage, getSelf()); - } else if (message instanceof Status.Failure) { - getSender().tell(message, getSelf()); - } else - unhandled(message); - } -} diff --git a/akka-samples/akka-sample-camel-java/src/main/java/sample/camel/quartz/MyQuartzActor.java b/akka-samples/akka-sample-camel-java/src/main/java/sample/camel/quartz/MyQuartzActor.java deleted file mode 100644 index 967abea0c8..0000000000 --- a/akka-samples/akka-sample-camel-java/src/main/java/sample/camel/quartz/MyQuartzActor.java +++ /dev/null @@ -1,18 +0,0 @@ -package sample.camel.quartz; - -import akka.camel.CamelMessage; -import akka.camel.javaapi.UntypedConsumerActor; - -public class MyQuartzActor extends UntypedConsumerActor { - public String getEndpointUri() { - return "quartz://example?cron=0/2+*+*+*+*+?"; - } - - public void onReceive(Object message) { - if (message instanceof CamelMessage) { - CamelMessage camelMessage = (CamelMessage) message; - System.out.println(String.format("==============> received %s ", camelMessage)); - } else - unhandled(message); - } -} diff --git a/akka-samples/akka-sample-camel-java/src/main/java/sample/camel/quartz/QuartzSample.java b/akka-samples/akka-sample-camel-java/src/main/java/sample/camel/quartz/QuartzSample.java deleted file mode 100644 index 2f4c73d38a..0000000000 --- a/akka-samples/akka-sample-camel-java/src/main/java/sample/camel/quartz/QuartzSample.java +++ /dev/null @@ -1,11 +0,0 @@ -package sample.camel.quartz; - -import akka.actor.ActorSystem; -import akka.actor.Props; - -public class QuartzSample { - public static void main(String[] args) { - ActorSystem system = ActorSystem.create("my-quartz-system"); - system.actorOf(Props.create(MyQuartzActor.class)); - } -} diff --git a/akka-samples/akka-sample-camel-java/src/main/java/sample/camel/route/CustomRouteBuilder.java b/akka-samples/akka-sample-camel-java/src/main/java/sample/camel/route/CustomRouteBuilder.java deleted file mode 100644 index 97ace488e5..0000000000 --- a/akka-samples/akka-sample-camel-java/src/main/java/sample/camel/route/CustomRouteBuilder.java +++ /dev/null @@ -1,15 +0,0 @@ -package sample.camel.route; - -import org.apache.camel.Exchange; -import org.apache.camel.Processor; -import org.apache.camel.builder.RouteBuilder; - -public class CustomRouteBuilder extends RouteBuilder { - public void configure() throws Exception { - from("direct:welcome").process(new Processor() { - public void process(Exchange exchange) throws Exception { - exchange.getOut().setBody(String.format("Welcome %s", exchange.getIn().getBody())); - } - }); - } -} diff --git a/akka-samples/akka-sample-camel-java/src/main/java/sample/camel/route/CustomRouteSample.java b/akka-samples/akka-sample-camel-java/src/main/java/sample/camel/route/CustomRouteSample.java deleted file mode 100644 index 669800b9f6..0000000000 --- a/akka-samples/akka-sample-camel-java/src/main/java/sample/camel/route/CustomRouteSample.java +++ /dev/null @@ -1,19 +0,0 @@ -package sample.camel.route; - -import akka.actor.*; -import akka.camel.CamelExtension; - -public class CustomRouteSample { - @SuppressWarnings("unused") - public static void main(String[] args) { - try { - ActorSystem system = ActorSystem.create("some-system"); - final ActorRef producer = system.actorOf(Props.create(RouteProducer.class)); - final ActorRef mediator = system.actorOf(Props.create(RouteTransformer.class, producer)); - final ActorRef consumer = system.actorOf(Props.create(RouteConsumer.class, mediator)); - CamelExtension.get(system).context().addRoutes(new CustomRouteBuilder()); - } catch (Exception e) { - e.printStackTrace(); - } - } -} diff --git a/akka-samples/akka-sample-camel-java/src/main/java/sample/camel/route/RouteConsumer.java b/akka-samples/akka-sample-camel-java/src/main/java/sample/camel/route/RouteConsumer.java deleted file mode 100644 index d3067fd8ef..0000000000 --- a/akka-samples/akka-sample-camel-java/src/main/java/sample/camel/route/RouteConsumer.java +++ /dev/null @@ -1,27 +0,0 @@ -package sample.camel.route; - -import akka.actor.ActorRef; -import akka.camel.CamelMessage; -import akka.camel.javaapi.UntypedConsumerActor; - -public class RouteConsumer extends UntypedConsumerActor { - private ActorRef transformer; - - public RouteConsumer(ActorRef transformer) { - this.transformer = transformer; - } - - public String getEndpointUri() { - return "jetty:http://0.0.0.0:8877/camel/welcome"; - } - - public void onReceive(Object message) { - if (message instanceof CamelMessage) { - CamelMessage camelMessage = (CamelMessage) message; - // Forward a string representation of the message body to transformer - String body = camelMessage.getBodyAs(String.class, getCamelContext()); - transformer.forward(camelMessage.withBody(body), getContext()); - } else - unhandled(message); - } -} diff --git a/akka-samples/akka-sample-camel-java/src/main/java/sample/camel/route/RouteProducer.java b/akka-samples/akka-sample-camel-java/src/main/java/sample/camel/route/RouteProducer.java deleted file mode 100644 index 5a47947087..0000000000 --- a/akka-samples/akka-sample-camel-java/src/main/java/sample/camel/route/RouteProducer.java +++ /dev/null @@ -1,9 +0,0 @@ -package sample.camel.route; - -import akka.camel.javaapi.UntypedProducerActor; - -public class RouteProducer extends UntypedProducerActor { - public String getEndpointUri() { - return "direct:welcome"; - } -} diff --git a/akka-samples/akka-sample-camel-java/src/main/java/sample/camel/route/RouteTransformer.java b/akka-samples/akka-sample-camel-java/src/main/java/sample/camel/route/RouteTransformer.java deleted file mode 100644 index 5d0ff079c7..0000000000 --- a/akka-samples/akka-sample-camel-java/src/main/java/sample/camel/route/RouteTransformer.java +++ /dev/null @@ -1,30 +0,0 @@ -package sample.camel.route; - -import akka.actor.ActorRef; -import akka.actor.UntypedActor; -import akka.camel.CamelMessage; -import akka.dispatch.Mapper; - -public class RouteTransformer extends UntypedActor { - private ActorRef producer; - - public RouteTransformer(ActorRef producer) { - this.producer = producer; - } - - public void onReceive(Object message) { - if (message instanceof CamelMessage) { - // example: transform message body "foo" to "- foo -" and forward result - // to producer - CamelMessage camelMessage = (CamelMessage) message; - CamelMessage transformedMessage = camelMessage.mapBody(new Mapper() { - @Override - public String apply(String body) { - return String.format("- %s -", body); - } - }); - producer.forward(transformedMessage, getContext()); - } else - unhandled(message); - } -} diff --git a/akka-samples/akka-sample-camel-java/tutorial/camel-async-interact.png b/akka-samples/akka-sample-camel-java/tutorial/camel-async-interact.png deleted file mode 100644 index 55a2a4505b..0000000000 Binary files a/akka-samples/akka-sample-camel-java/tutorial/camel-async-interact.png and /dev/null differ diff --git a/akka-samples/akka-sample-camel-java/tutorial/camel-async-sequence.png b/akka-samples/akka-sample-camel-java/tutorial/camel-async-sequence.png deleted file mode 100644 index 416c5a181b..0000000000 Binary files a/akka-samples/akka-sample-camel-java/tutorial/camel-async-sequence.png and /dev/null differ diff --git a/akka-samples/akka-sample-camel-java/tutorial/camel-custom-route.png b/akka-samples/akka-sample-camel-java/tutorial/camel-custom-route.png deleted file mode 100644 index efacdb8f82..0000000000 Binary files a/akka-samples/akka-sample-camel-java/tutorial/camel-custom-route.png and /dev/null differ diff --git a/akka-samples/akka-sample-camel-java/tutorial/index.html b/akka-samples/akka-sample-camel-java/tutorial/index.html deleted file mode 100644 index 1d4e0d91d3..0000000000 --- a/akka-samples/akka-sample-camel-java/tutorial/index.html +++ /dev/null @@ -1,163 +0,0 @@ - - - Akka Camel Samples with Java - - - - -
-

-This tutorial contains 3 samples of -Akka Camel. -

- -
    -
  • Asynchronous routing and transformation
  • -
  • Custom Camel route
  • -
  • Quartz scheduler
  • -
- -
- -
- -

Asynchronous routing and transformation

- -

-This example demonstrates how to implement consumer and producer actors that -support - -Asynchronous routing with their Camel endpoints. The sample -application transforms the content of the Akka homepage, http://akka.io, -by replacing every occurrence of *Akka* with *AKKA*. -

- -

-To run this example, go to the Run -tab, and start the application main class sample.camel.http.HttpExample if it's not already started. -Then direct the browser to http://localhost:8875 and the -transformed Akka homepage should be displayed. Please note that this example will probably not work if you're -behind an HTTP proxy. -

- -

-The following figure gives an overview how the example actors interact with -external systems and with each other. A browser sends a GET request to -http://localhost:8875 which is the published endpoint of the -HttpConsumer -actor. The HttpConsumeractor forwards the requests to the -HttpProducer.java -actor which retrieves the Akka homepage from http://akka.io. The retrieved HTML -is then forwarded to the -HttpTransformer.java -actor which replaces all occurrences of *Akka* with *AKKA*. The transformation result is sent back the HttpConsumer -which finally returns it to the browser. -

- - - -

-Implementing the example actor classes and wiring them together is rather easy -as shown in HttpConsumer.java, -HttpProducer.java and -HttpTransformer.java. -

- - -

-The jetty endpoints of HttpConsumer and -HttpProducer support asynchronous in-out message exchanges and do not allocate threads for the full duration of -the exchange. This is achieved by using Jetty continuations -on the consumer-side and by using Jetty's asynchronous HTTP client -on the producer side. The following high-level sequence diagram illustrates that. -

- - - -
-
- -

Custom Camel route example

- -

-This section also demonstrates the combined usage of a -RouteProducer and a -RouteConsumer -actor as well as the inclusion of a -custom Camel route. -The following figure gives an overview. -

- - - -
    -
  • A consumer actor receives a message from an HTTP client
  • - -
  • It forwards the message to another actor that transforms the message (encloses - the original message into hyphens)
  • - -
  • The transformer actor forwards the transformed message to a producer actor
  • - -
  • The producer actor sends the message to a custom Camel route beginning at the - direct:welcome endpoint
  • - -
  • A processor (transformer) in the custom Camel route prepends "Welcome" to the - original message and creates a result message
  • - -
  • The producer actor sends the result back to the consumer actor which returns - it to the HTTP client
  • -
- -

-The producer actor knows where to reply the message to because the consumer and -transformer actors have forwarded the original sender reference as well. The -application configuration and the route starting from direct:welcome are done in the code above. -

- -

-To run this example, go to the Run -tab, and start the application main class sample.camel.route.CustomRouteExample -

- -

-POST a message to http://localhost:8877/camel/welcome. -

- -

-   curl -H "Content-Type: text/plain" -d "Anke" http://localhost:8877/camel/welcome
-
- -

-The response should be: -

- -

-   Welcome - Anke -
-
- -
-
- -

Quartz Scheduler Example

- -

-Here is an example showing how simple it is to implement a cron-style scheduler by -using the Camel Quartz component in Akka. -

-

-Open MyQuartzActor.java. -

-

-The example creates a "timer" actor which fires a message every 2 -seconds. -

- -

-For more information about the Camel Quartz component, see here: -http://camel.apache.org/quartz.html -

- -
- - - diff --git a/akka-samples/akka-sample-camel-scala/.gitignore b/akka-samples/akka-sample-camel-scala/.gitignore deleted file mode 100644 index 660c959e44..0000000000 --- a/akka-samples/akka-sample-camel-scala/.gitignore +++ /dev/null @@ -1,17 +0,0 @@ -*# -*.iml -*.ipr -*.iws -*.pyc -*.tm.epoch -*.vim -*-shim.sbt -.idea/ -/project/plugins/project -project/boot -target/ -/logs -.cache -.classpath -.project -.settings \ No newline at end of file diff --git a/akka-samples/akka-sample-camel-scala/COPYING b/akka-samples/akka-sample-camel-scala/COPYING deleted file mode 100644 index 0e259d42c9..0000000000 --- a/akka-samples/akka-sample-camel-scala/COPYING +++ /dev/null @@ -1,121 +0,0 @@ -Creative Commons Legal Code - -CC0 1.0 Universal - - CREATIVE COMMONS CORPORATION IS NOT A LAW FIRM AND DOES NOT PROVIDE - LEGAL SERVICES. DISTRIBUTION OF THIS DOCUMENT DOES NOT CREATE AN - ATTORNEY-CLIENT RELATIONSHIP. CREATIVE COMMONS PROVIDES THIS - INFORMATION ON AN "AS-IS" BASIS. CREATIVE COMMONS MAKES NO WARRANTIES - REGARDING THE USE OF THIS DOCUMENT OR THE INFORMATION OR WORKS - PROVIDED HEREUNDER, AND DISCLAIMS LIABILITY FOR DAMAGES RESULTING FROM - THE USE OF THIS DOCUMENT OR THE INFORMATION OR WORKS PROVIDED - HEREUNDER. - -Statement of Purpose - -The laws of most jurisdictions throughout the world automatically confer -exclusive Copyright and Related Rights (defined below) upon the creator -and subsequent owner(s) (each and all, an "owner") of an original work of -authorship and/or a database (each, a "Work"). - -Certain owners wish to permanently relinquish those rights to a Work for -the purpose of contributing to a commons of creative, cultural and -scientific works ("Commons") that the public can reliably and without fear -of later claims of infringement build upon, modify, incorporate in other -works, reuse and redistribute as freely as possible in any form whatsoever -and for any purposes, including without limitation commercial purposes. -These owners may contribute to the Commons to promote the ideal of a free -culture and the further production of creative, cultural and scientific -works, or to gain reputation or greater distribution for their Work in -part through the use and efforts of others. - -For these and/or other purposes and motivations, and without any -expectation of additional consideration or compensation, the person -associating CC0 with a Work (the "Affirmer"), to the extent that he or she -is an owner of Copyright and Related Rights in the Work, voluntarily -elects to apply CC0 to the Work and publicly distribute the Work under its -terms, with knowledge of his or her Copyright and Related Rights in the -Work and the meaning and intended legal effect of CC0 on those rights. - -1. Copyright and Related Rights. A Work made available under CC0 may be -protected by copyright and related or neighboring rights ("Copyright and -Related Rights"). Copyright and Related Rights include, but are not -limited to, the following: - - i. the right to reproduce, adapt, distribute, perform, display, - communicate, and translate a Work; - ii. moral rights retained by the original author(s) and/or performer(s); -iii. publicity and privacy rights pertaining to a person's image or - likeness depicted in a Work; - iv. rights protecting against unfair competition in regards to a Work, - subject to the limitations in paragraph 4(a), below; - v. rights protecting the extraction, dissemination, use and reuse of data - in a Work; - vi. database rights (such as those arising under Directive 96/9/EC of the - European Parliament and of the Council of 11 March 1996 on the legal - protection of databases, and under any national implementation - thereof, including any amended or successor version of such - directive); and -vii. other similar, equivalent or corresponding rights throughout the - world based on applicable law or treaty, and any national - implementations thereof. - -2. Waiver. To the greatest extent permitted by, but not in contravention -of, applicable law, Affirmer hereby overtly, fully, permanently, -irrevocably and unconditionally waives, abandons, and surrenders all of -Affirmer's Copyright and Related Rights and associated claims and causes -of action, whether now known or unknown (including existing as well as -future claims and causes of action), in the Work (i) in all territories -worldwide, (ii) for the maximum duration provided by applicable law or -treaty (including future time extensions), (iii) in any current or future -medium and for any number of copies, and (iv) for any purpose whatsoever, -including without limitation commercial, advertising or promotional -purposes (the "Waiver"). Affirmer makes the Waiver for the benefit of each -member of the public at large and to the detriment of Affirmer's heirs and -successors, fully intending that such Waiver shall not be subject to -revocation, rescission, cancellation, termination, or any other legal or -equitable action to disrupt the quiet enjoyment of the Work by the public -as contemplated by Affirmer's express Statement of Purpose. - -3. Public License Fallback. Should any part of the Waiver for any reason -be judged legally invalid or ineffective under applicable law, then the -Waiver shall be preserved to the maximum extent permitted taking into -account Affirmer's express Statement of Purpose. In addition, to the -extent the Waiver is so judged Affirmer hereby grants to each affected -person a royalty-free, non transferable, non sublicensable, non exclusive, -irrevocable and unconditional license to exercise Affirmer's Copyright and -Related Rights in the Work (i) in all territories worldwide, (ii) for the -maximum duration provided by applicable law or treaty (including future -time extensions), (iii) in any current or future medium and for any number -of copies, and (iv) for any purpose whatsoever, including without -limitation commercial, advertising or promotional purposes (the -"License"). The License shall be deemed effective as of the date CC0 was -applied by Affirmer to the Work. Should any part of the License for any -reason be judged legally invalid or ineffective under applicable law, such -partial invalidity or ineffectiveness shall not invalidate the remainder -of the License, and in such case Affirmer hereby affirms that he or she -will not (i) exercise any of his or her remaining Copyright and Related -Rights in the Work or (ii) assert any associated claims and causes of -action with respect to the Work, in either case contrary to Affirmer's -express Statement of Purpose. - -4. Limitations and Disclaimers. - - a. No trademark or patent rights held by Affirmer are waived, abandoned, - surrendered, licensed or otherwise affected by this document. - b. Affirmer offers the Work as-is and makes no representations or - warranties of any kind concerning the Work, express, implied, - statutory or otherwise, including without limitation warranties of - title, merchantability, fitness for a particular purpose, non - infringement, or the absence of latent or other defects, accuracy, or - the present or absence of errors, whether or not discoverable, all to - the greatest extent permissible under applicable law. - c. Affirmer disclaims responsibility for clearing rights of other persons - that may apply to the Work or any use thereof, including without - limitation any person's Copyright and Related Rights in the Work. - Further, Affirmer disclaims responsibility for obtaining any necessary - consents, permissions or other rights required for any use of the - Work. - d. Affirmer understands and acknowledges that Creative Commons is not a - party to this document and has no duty or obligation with respect to - this CC0 or use of the Work. diff --git a/akka-samples/akka-sample-camel-scala/LICENSE b/akka-samples/akka-sample-camel-scala/LICENSE deleted file mode 100644 index 2a18b45589..0000000000 --- a/akka-samples/akka-sample-camel-scala/LICENSE +++ /dev/null @@ -1,10 +0,0 @@ -Activator Template by Lightbend - -Licensed under Public Domain (CC0) - -To the extent possible under law, the person who associated CC0 with -this Activator Tempate has waived all copyright and related or neighboring -rights to this Activator Template. - -You should have received a copy of the CC0 legalcode along with this -work. If not, see . diff --git a/akka-samples/akka-sample-camel-scala/activator.properties b/akka-samples/akka-sample-camel-scala/activator.properties deleted file mode 100644 index 0708578c65..0000000000 --- a/akka-samples/akka-sample-camel-scala/activator.properties +++ /dev/null @@ -1,7 +0,0 @@ -name=akka-sample-camel-scala -title=Akka Camel Samples with Scala -description=Akka Camel Samples with Scala -tags=akka,camel,scala,sample -authorName=Akka Team -authorLink=http://akka.io/ -sourceLink=https://github.com/akka/akka diff --git a/akka-samples/akka-sample-camel-scala/build.sbt b/akka-samples/akka-sample-camel-scala/build.sbt deleted file mode 100644 index 2dd9a86132..0000000000 --- a/akka-samples/akka-sample-camel-scala/build.sbt +++ /dev/null @@ -1,16 +0,0 @@ -name := "akka-sample-camel-scala" - -version := "2.5-SNAPSHOT" - -scalaVersion := "2.11.8" - -libraryDependencies ++= Seq( - "com.typesafe.akka" %% "akka-actor" % "2.5-SNAPSHOT", - "com.typesafe.akka" %% "akka-camel" % "2.5-SNAPSHOT", - "org.apache.camel" % "camel-jetty" % "2.13.4", - "org.apache.camel" % "camel-quartz" % "2.13.4", - "org.slf4j" % "slf4j-api" % "1.7.16", - "ch.qos.logback" % "logback-classic" % "1.1.3" -) - -licenses := Seq(("CC0", url("http://creativecommons.org/publicdomain/zero/1.0"))) diff --git a/akka-samples/akka-sample-camel-scala/project/build.properties b/akka-samples/akka-sample-camel-scala/project/build.properties deleted file mode 100644 index 748703f770..0000000000 --- a/akka-samples/akka-sample-camel-scala/project/build.properties +++ /dev/null @@ -1 +0,0 @@ -sbt.version=0.13.7 diff --git a/akka-samples/akka-sample-camel-scala/src/main/scala/sample/camel/CustomRouteExample.scala b/akka-samples/akka-sample-camel-scala/src/main/scala/sample/camel/CustomRouteExample.scala deleted file mode 100644 index c606f7832f..0000000000 --- a/akka-samples/akka-sample-camel-scala/src/main/scala/sample/camel/CustomRouteExample.scala +++ /dev/null @@ -1,58 +0,0 @@ -package sample.camel - -import org.apache.camel.Exchange -import org.apache.camel.Processor -import org.apache.camel.builder.RouteBuilder -import akka.actor.Actor -import akka.actor.ActorRef -import akka.actor.ActorSystem -import akka.actor.Props -import akka.camel.CamelExtension -import akka.camel.CamelMessage -import akka.camel.Consumer -import akka.camel.Producer - -object CustomRouteExample { - - def main(args: Array[String]): Unit = { - val system = ActorSystem("some-system") - val producer = system.actorOf(Props[RouteProducer]) - val mediator = system.actorOf(Props(classOf[RouteTransformer], producer)) - val consumer = system.actorOf(Props(classOf[RouteConsumer], mediator)) - CamelExtension(system).context.addRoutes(new CustomRouteBuilder) - } - - class RouteConsumer(transformer: ActorRef) extends Actor with Consumer { - def endpointUri = "jetty:http://0.0.0.0:8877/camel/welcome" - - def receive = { - // Forward a string representation of the message body to transformer - case msg: CamelMessage => transformer.forward(msg.withBodyAs[String]) - } - } - - class RouteTransformer(producer: ActorRef) extends Actor { - def receive = { - // example: transform message body "foo" to "- foo -" and forward result - // to producer - case msg: CamelMessage => - producer.forward(msg.mapBody((body: String) => "- %s -" format body)) - } - } - - class RouteProducer extends Actor with Producer { - def endpointUri = "direct:welcome" - } - - class CustomRouteBuilder extends RouteBuilder { - def configure { - from("direct:welcome").process(new Processor() { - def process(exchange: Exchange) { - // Create a 'welcome' message from the input message - exchange.getOut.setBody("Welcome %s" format exchange.getIn.getBody) - } - }) - } - } - -} diff --git a/akka-samples/akka-sample-camel-scala/src/main/scala/sample/camel/HttpExample.scala b/akka-samples/akka-sample-camel-scala/src/main/scala/sample/camel/HttpExample.scala deleted file mode 100644 index 1bb486e38c..0000000000 --- a/akka-samples/akka-sample-camel-scala/src/main/scala/sample/camel/HttpExample.scala +++ /dev/null @@ -1,58 +0,0 @@ -package sample.camel - -import org.apache.camel.Exchange -import akka.actor.Actor -import akka.actor.ActorRef -import akka.actor.ActorSystem -import akka.actor.Props -import akka.actor.Status.Failure -import akka.actor.actorRef2Scala -import akka.camel.CamelMessage -import akka.camel.Consumer -import akka.camel.Producer - -object HttpExample { - - def main(args: Array[String]): Unit = { - val system = ActorSystem("some-system") - val httpTransformer = system.actorOf(Props[HttpTransformer]) - val httpProducer = system.actorOf(Props(classOf[HttpProducer], httpTransformer)) - val httpConsumer = system.actorOf(Props(classOf[HttpConsumer], httpProducer)) - } - - class HttpConsumer(producer: ActorRef) extends Consumer { - def endpointUri = "jetty:http://0.0.0.0:8875/" - - def receive = { - case msg => producer forward msg - } - } - - class HttpProducer(transformer: ActorRef) extends Actor with Producer { - // bridgeEndpoint=true makes the producer ignore the Exchange.HTTP_URI header, - // and use the endpoint's URI for request - def endpointUri = "jetty://http://akka.io/?bridgeEndpoint=true" - - // before producing messages to endpoints, producer actors can pre-process - // them by overriding the transformOutgoingMessage method - override def transformOutgoingMessage(msg: Any) = msg match { - case camelMsg: CamelMessage => camelMsg.copy(headers = - camelMsg.headers(Set(Exchange.HTTP_PATH))) - } - - // instead of replying to the initial sender(), producer actors can implement custom - // response processing by overriding the routeResponse method - override def routeResponse(msg: Any) { transformer forward msg } - } - - class HttpTransformer extends Actor { - def receive = { - case msg: CamelMessage => - sender() ! (msg.mapBody { body: Array[Byte] => - new String(body).replaceAll("Akka ", "AKKA ") - }) - case msg: Failure => sender() ! msg - } - } - -} diff --git a/akka-samples/akka-sample-camel-scala/src/main/scala/sample/camel/QuartzExample.scala b/akka-samples/akka-sample-camel-scala/src/main/scala/sample/camel/QuartzExample.scala deleted file mode 100644 index 3a02a5be08..0000000000 --- a/akka-samples/akka-sample-camel-scala/src/main/scala/sample/camel/QuartzExample.scala +++ /dev/null @@ -1,26 +0,0 @@ -package sample.camel - -import akka.actor.ActorSystem -import akka.actor.Props -import akka.camel.Consumer - -object QuartzExample { - - def main(args: Array[String]): Unit = { - val system = ActorSystem("my-quartz-system") - system.actorOf(Props[MyQuartzActor]) - } - - class MyQuartzActor extends Consumer { - - def endpointUri = "quartz://example?cron=0/2+*+*+*+*+?" - - def receive = { - - case msg => println("==============> received %s " format msg) - - } - - } - -} diff --git a/akka-samples/akka-sample-camel-scala/tutorial/camel-async-interact.png b/akka-samples/akka-sample-camel-scala/tutorial/camel-async-interact.png deleted file mode 100644 index 55a2a4505b..0000000000 Binary files a/akka-samples/akka-sample-camel-scala/tutorial/camel-async-interact.png and /dev/null differ diff --git a/akka-samples/akka-sample-camel-scala/tutorial/camel-async-sequence.png b/akka-samples/akka-sample-camel-scala/tutorial/camel-async-sequence.png deleted file mode 100644 index 416c5a181b..0000000000 Binary files a/akka-samples/akka-sample-camel-scala/tutorial/camel-async-sequence.png and /dev/null differ diff --git a/akka-samples/akka-sample-camel-scala/tutorial/camel-custom-route.png b/akka-samples/akka-sample-camel-scala/tutorial/camel-custom-route.png deleted file mode 100644 index efacdb8f82..0000000000 Binary files a/akka-samples/akka-sample-camel-scala/tutorial/camel-custom-route.png and /dev/null differ diff --git a/akka-samples/akka-sample-camel-scala/tutorial/index.html b/akka-samples/akka-sample-camel-scala/tutorial/index.html deleted file mode 100644 index 1026f1065f..0000000000 --- a/akka-samples/akka-sample-camel-scala/tutorial/index.html +++ /dev/null @@ -1,161 +0,0 @@ - - - Akka Camel Samples with Scala - - - - -
-

-This tutorial contains 3 samples of -Akka Camel. -

- -
    -
  • Asynchronous routing and transformation
  • -
  • Custom Camel route
  • -
  • Quartz scheduler
  • -
- -
- -
- -

Asynchronous routing and transformation

- -

-This example demonstrates how to implement consumer and producer actors that -support - -Asynchronous routing with their Camel endpoints. The sample -application transforms the content of the Akka homepage, http://akka.io, -by replacing every occurrence of *Akka* with *AKKA*. -

- -

-To run this example, go to the Run -tab, and start the application main class sample.camel.HttpExample if it's not already started. -Then direct the browser to http://localhost:8875 and the -transformed Akka homepage should be displayed. Please note that this example will probably not work if you're -behind an HTTP proxy. -

- -

-The following figure gives an overview how the example actors interact with -external systems and with each other. A browser sends a GET request to -http://localhost:8875 which is the published endpoint of the -HttpConsumer -actor. The HttpConsumer actor forwards the requests to the -HttpProducer -actor which retrieves the Akka homepage from http://akka.io. The retrieved HTML -is then forwarded to the -HttpTransformer -actor which replaces all occurrences of *Akka* with *AKKA*. The transformation result is sent back the HttpConsumer -which finally returns it to the browser. -

- - - -

-Implementing the example actor classes and wiring them together is rather easy -as shown in HttpExample.scala. -

- - -

-The jetty endpoints of HttpConsumer and -HttpProducer support asynchronous in-out message exchanges and do not allocate threads for the full duration of -the exchange. This is achieved by using Jetty continuations -on the consumer-side and by using Jetty's asynchronous HTTP client -on the producer side. The following high-level sequence diagram illustrates that. -

- - - -
-
- -

Custom Camel route example

- -

-This section also demonstrates the combined usage of a -RouteProducer -and a RouteConsumer -actor as well as the inclusion of a -custom Camel route. -The following figure gives an overview. -

- - - -
    -
  • A consumer actor receives a message from an HTTP client
  • - -
  • It forwards the message to another actor that transforms the message (encloses - the original message into hyphens)
  • - -
  • The transformer actor forwards the transformed message to a producer actor
  • - -
  • The producer actor sends the message to a custom Camel route beginning at the - direct:welcome endpoint
  • - -
  • A processor (transformer) in the custom Camel route prepends "Welcome" to the - original message and creates a result message
  • - -
  • The producer actor sends the result back to the consumer actor which returns - it to the HTTP client
  • -
- -

-The producer actor knows where to reply the message to because the consumer and -transformer actors have forwarded the original sender reference as well. The -application configuration and the route starting from direct:welcome are done in the code above. -

- -

-To run this example, go to the Run -tab, and start the application main class sample.camel.CustomRouteExample -

- -

-POST a message to http://localhost:8877/camel/welcome. -

- -

-   curl -H "Content-Type: text/plain" -d "Anke" http://localhost:8877/camel/welcome
-
- -

-The response should be: -

- -

-   Welcome - Anke -
-
- -
-
- -

Quartz Scheduler Example

- -

-Here is an example showing how simple it is to implement a cron-style scheduler by -using the Camel Quartz component in Akka. -

-

-Open QuartzExample.scala. -

-

-The example creates a "timer" actor which fires a message every 2 -seconds. -

- -

-For more information about the Camel Quartz component, see here: -http://camel.apache.org/quartz.html -

- -
- - - diff --git a/akka-samples/akka-sample-cluster-java/.gitignore b/akka-samples/akka-sample-cluster-java/.gitignore deleted file mode 100644 index b0814a06c4..0000000000 --- a/akka-samples/akka-sample-cluster-java/.gitignore +++ /dev/null @@ -1,18 +0,0 @@ -*# -*.iml -*.ipr -*.iws -*.pyc -*.tm.epoch -*.vim -*-shim.sbt -.idea/ -/project/plugins/project -project/boot -target/ -/logs -.cache -.classpath -.project -.settings -native/ diff --git a/akka-samples/akka-sample-cluster-java/COPYING b/akka-samples/akka-sample-cluster-java/COPYING deleted file mode 100644 index 0e259d42c9..0000000000 --- a/akka-samples/akka-sample-cluster-java/COPYING +++ /dev/null @@ -1,121 +0,0 @@ -Creative Commons Legal Code - -CC0 1.0 Universal - - CREATIVE COMMONS CORPORATION IS NOT A LAW FIRM AND DOES NOT PROVIDE - LEGAL SERVICES. DISTRIBUTION OF THIS DOCUMENT DOES NOT CREATE AN - ATTORNEY-CLIENT RELATIONSHIP. CREATIVE COMMONS PROVIDES THIS - INFORMATION ON AN "AS-IS" BASIS. CREATIVE COMMONS MAKES NO WARRANTIES - REGARDING THE USE OF THIS DOCUMENT OR THE INFORMATION OR WORKS - PROVIDED HEREUNDER, AND DISCLAIMS LIABILITY FOR DAMAGES RESULTING FROM - THE USE OF THIS DOCUMENT OR THE INFORMATION OR WORKS PROVIDED - HEREUNDER. - -Statement of Purpose - -The laws of most jurisdictions throughout the world automatically confer -exclusive Copyright and Related Rights (defined below) upon the creator -and subsequent owner(s) (each and all, an "owner") of an original work of -authorship and/or a database (each, a "Work"). - -Certain owners wish to permanently relinquish those rights to a Work for -the purpose of contributing to a commons of creative, cultural and -scientific works ("Commons") that the public can reliably and without fear -of later claims of infringement build upon, modify, incorporate in other -works, reuse and redistribute as freely as possible in any form whatsoever -and for any purposes, including without limitation commercial purposes. -These owners may contribute to the Commons to promote the ideal of a free -culture and the further production of creative, cultural and scientific -works, or to gain reputation or greater distribution for their Work in -part through the use and efforts of others. - -For these and/or other purposes and motivations, and without any -expectation of additional consideration or compensation, the person -associating CC0 with a Work (the "Affirmer"), to the extent that he or she -is an owner of Copyright and Related Rights in the Work, voluntarily -elects to apply CC0 to the Work and publicly distribute the Work under its -terms, with knowledge of his or her Copyright and Related Rights in the -Work and the meaning and intended legal effect of CC0 on those rights. - -1. Copyright and Related Rights. A Work made available under CC0 may be -protected by copyright and related or neighboring rights ("Copyright and -Related Rights"). Copyright and Related Rights include, but are not -limited to, the following: - - i. the right to reproduce, adapt, distribute, perform, display, - communicate, and translate a Work; - ii. moral rights retained by the original author(s) and/or performer(s); -iii. publicity and privacy rights pertaining to a person's image or - likeness depicted in a Work; - iv. rights protecting against unfair competition in regards to a Work, - subject to the limitations in paragraph 4(a), below; - v. rights protecting the extraction, dissemination, use and reuse of data - in a Work; - vi. database rights (such as those arising under Directive 96/9/EC of the - European Parliament and of the Council of 11 March 1996 on the legal - protection of databases, and under any national implementation - thereof, including any amended or successor version of such - directive); and -vii. other similar, equivalent or corresponding rights throughout the - world based on applicable law or treaty, and any national - implementations thereof. - -2. Waiver. To the greatest extent permitted by, but not in contravention -of, applicable law, Affirmer hereby overtly, fully, permanently, -irrevocably and unconditionally waives, abandons, and surrenders all of -Affirmer's Copyright and Related Rights and associated claims and causes -of action, whether now known or unknown (including existing as well as -future claims and causes of action), in the Work (i) in all territories -worldwide, (ii) for the maximum duration provided by applicable law or -treaty (including future time extensions), (iii) in any current or future -medium and for any number of copies, and (iv) for any purpose whatsoever, -including without limitation commercial, advertising or promotional -purposes (the "Waiver"). Affirmer makes the Waiver for the benefit of each -member of the public at large and to the detriment of Affirmer's heirs and -successors, fully intending that such Waiver shall not be subject to -revocation, rescission, cancellation, termination, or any other legal or -equitable action to disrupt the quiet enjoyment of the Work by the public -as contemplated by Affirmer's express Statement of Purpose. - -3. Public License Fallback. Should any part of the Waiver for any reason -be judged legally invalid or ineffective under applicable law, then the -Waiver shall be preserved to the maximum extent permitted taking into -account Affirmer's express Statement of Purpose. In addition, to the -extent the Waiver is so judged Affirmer hereby grants to each affected -person a royalty-free, non transferable, non sublicensable, non exclusive, -irrevocable and unconditional license to exercise Affirmer's Copyright and -Related Rights in the Work (i) in all territories worldwide, (ii) for the -maximum duration provided by applicable law or treaty (including future -time extensions), (iii) in any current or future medium and for any number -of copies, and (iv) for any purpose whatsoever, including without -limitation commercial, advertising or promotional purposes (the -"License"). The License shall be deemed effective as of the date CC0 was -applied by Affirmer to the Work. Should any part of the License for any -reason be judged legally invalid or ineffective under applicable law, such -partial invalidity or ineffectiveness shall not invalidate the remainder -of the License, and in such case Affirmer hereby affirms that he or she -will not (i) exercise any of his or her remaining Copyright and Related -Rights in the Work or (ii) assert any associated claims and causes of -action with respect to the Work, in either case contrary to Affirmer's -express Statement of Purpose. - -4. Limitations and Disclaimers. - - a. No trademark or patent rights held by Affirmer are waived, abandoned, - surrendered, licensed or otherwise affected by this document. - b. Affirmer offers the Work as-is and makes no representations or - warranties of any kind concerning the Work, express, implied, - statutory or otherwise, including without limitation warranties of - title, merchantability, fitness for a particular purpose, non - infringement, or the absence of latent or other defects, accuracy, or - the present or absence of errors, whether or not discoverable, all to - the greatest extent permissible under applicable law. - c. Affirmer disclaims responsibility for clearing rights of other persons - that may apply to the Work or any use thereof, including without - limitation any person's Copyright and Related Rights in the Work. - Further, Affirmer disclaims responsibility for obtaining any necessary - consents, permissions or other rights required for any use of the - Work. - d. Affirmer understands and acknowledges that Creative Commons is not a - party to this document and has no duty or obligation with respect to - this CC0 or use of the Work. diff --git a/akka-samples/akka-sample-cluster-java/LICENSE b/akka-samples/akka-sample-cluster-java/LICENSE deleted file mode 100644 index 2a18b45589..0000000000 --- a/akka-samples/akka-sample-cluster-java/LICENSE +++ /dev/null @@ -1,10 +0,0 @@ -Activator Template by Lightbend - -Licensed under Public Domain (CC0) - -To the extent possible under law, the person who associated CC0 with -this Activator Tempate has waived all copyright and related or neighboring -rights to this Activator Template. - -You should have received a copy of the CC0 legalcode along with this -work. If not, see . diff --git a/akka-samples/akka-sample-cluster-java/activator.properties b/akka-samples/akka-sample-cluster-java/activator.properties deleted file mode 100644 index c1d11ef48c..0000000000 --- a/akka-samples/akka-sample-cluster-java/activator.properties +++ /dev/null @@ -1,7 +0,0 @@ -name=akka-sample-cluster-java -title=Akka Cluster Samples with Java -description=Akka Cluster Samples with Java -tags=akka,cluster,java,sample -authorName=Akka Team -authorLink=http://akka.io/ -sourceLink=https://github.com/akka/akka diff --git a/akka-samples/akka-sample-cluster-java/build.sbt b/akka-samples/akka-sample-cluster-java/build.sbt deleted file mode 100644 index 4f43a617a2..0000000000 --- a/akka-samples/akka-sample-cluster-java/build.sbt +++ /dev/null @@ -1,50 +0,0 @@ -import com.typesafe.sbt.SbtMultiJvm -import com.typesafe.sbt.SbtMultiJvm.MultiJvmKeys.MultiJvm - -val akkaVersion = "2.5-SNAPSHOT" - -val project = Project( - id = "akka-sample-cluster-java", - base = file(".") - ) - .settings(SbtMultiJvm.multiJvmSettings: _*) - .settings( - name := "akka-sample-cluster-java", - version := "2.5-SNAPSHOT", - scalaVersion := "2.11.8", - scalacOptions in Compile ++= Seq("-encoding", "UTF-8", "-target:jvm-1.8", "-deprecation", "-feature", "-unchecked", "-Xlog-reflective-calls", "-Xlint"), - javacOptions in Compile ++= Seq("-source", "1.8", "-target", "1.8", "-Xlint:unchecked", "-Xlint:deprecation"), - javacOptions in doc in Compile := Seq("-source", "1.8", "-Xdoclint:none"), - libraryDependencies ++= Seq( - "com.typesafe.akka" %% "akka-actor" % akkaVersion, - "com.typesafe.akka" %% "akka-remote" % akkaVersion, - "com.typesafe.akka" %% "akka-cluster" % akkaVersion, - "com.typesafe.akka" %% "akka-cluster-metrics" % akkaVersion, - "com.typesafe.akka" %% "akka-cluster-tools" % akkaVersion, - "com.typesafe.akka" %% "akka-multi-node-testkit" % akkaVersion, - "org.scalatest" %% "scalatest" % "2.2.1" % "test", - "io.kamon" % "sigar-loader" % "1.6.6-rev002"), - javaOptions in run ++= Seq( - "-Xms128m", "-Xmx1024m", "-Djava.library.path=./target/native"), - Keys.fork in run := true, - mainClass in (Compile, run) := Some("sample.cluster.simple.SimpleClusterApp"), - // make sure that MultiJvm test are compiled by the default test compilation - compile in MultiJvm <<= (compile in MultiJvm) triggeredBy (compile in Test), - // disable parallel tests - parallelExecution in Test := false, - // make sure that MultiJvm tests are executed by the default test target, - // and combine the results from ordinary test and multi-jvm tests - executeTests in Test <<= (executeTests in Test, executeTests in MultiJvm) map { - case (testResults, multiNodeResults) => - val overall = - if (testResults.overall.id < multiNodeResults.overall.id) - multiNodeResults.overall - else - testResults.overall - Tests.Output(overall, - testResults.events ++ multiNodeResults.events, - testResults.summaries ++ multiNodeResults.summaries) - }, - licenses := Seq(("CC0", url("http://creativecommons.org/publicdomain/zero/1.0"))) - ) - .configs (MultiJvm) diff --git a/akka-samples/akka-sample-cluster-java/project/build.properties b/akka-samples/akka-sample-cluster-java/project/build.properties deleted file mode 100644 index 748703f770..0000000000 --- a/akka-samples/akka-sample-cluster-java/project/build.properties +++ /dev/null @@ -1 +0,0 @@ -sbt.version=0.13.7 diff --git a/akka-samples/akka-sample-cluster-java/project/plugins.sbt b/akka-samples/akka-sample-cluster-java/project/plugins.sbt deleted file mode 100644 index c3e7d797de..0000000000 --- a/akka-samples/akka-sample-cluster-java/project/plugins.sbt +++ /dev/null @@ -1,4 +0,0 @@ - -resolvers += Classpaths.typesafeResolver - -addSbtPlugin("com.typesafe.sbt" % "sbt-multi-jvm" % "0.3.8") diff --git a/akka-samples/akka-sample-cluster-java/src/main/java/sample/cluster/factorial/Extra.java b/akka-samples/akka-sample-cluster-java/src/main/java/sample/cluster/factorial/Extra.java deleted file mode 100644 index dd35e36bf1..0000000000 --- a/akka-samples/akka-sample-cluster-java/src/main/java/sample/cluster/factorial/Extra.java +++ /dev/null @@ -1,47 +0,0 @@ -package sample.cluster.factorial; - -import java.util.Arrays; -import java.util.Collections; - -import akka.actor.ActorRef; -import akka.actor.Props; -import akka.actor.AbstractActor; -import akka.cluster.metrics.AdaptiveLoadBalancingGroup; -import akka.cluster.metrics.AdaptiveLoadBalancingPool; -import akka.cluster.routing.ClusterRouterGroup; -import akka.cluster.routing.ClusterRouterGroupSettings; -import akka.cluster.routing.ClusterRouterPool; -import akka.cluster.routing.ClusterRouterPoolSettings; -import akka.cluster.metrics.HeapMetricsSelector; -import akka.cluster.metrics.SystemLoadAverageMetricsSelector; - -//not used, only for documentation -abstract class FactorialFrontend2 extends AbstractActor { - //#router-lookup-in-code - int totalInstances = 100; - Iterable routeesPaths = Arrays.asList("/user/factorialBackend", ""); - boolean allowLocalRoutees = true; - String useRole = "backend"; - ActorRef backend = getContext().actorOf( - new ClusterRouterGroup(new AdaptiveLoadBalancingGroup( - HeapMetricsSelector.getInstance(), Collections. emptyList()), - new ClusterRouterGroupSettings(totalInstances, routeesPaths, - allowLocalRoutees, useRole)).props(), "factorialBackendRouter2"); - //#router-lookup-in-code -} - -//not used, only for documentation -abstract class FactorialFrontend3 extends AbstractActor { - //#router-deploy-in-code - int totalInstances = 100; - int maxInstancesPerNode = 3; - boolean allowLocalRoutees = false; - String useRole = "backend"; - ActorRef backend = getContext().actorOf( - new ClusterRouterPool(new AdaptiveLoadBalancingPool( - SystemLoadAverageMetricsSelector.getInstance(), 0), - new ClusterRouterPoolSettings(totalInstances, maxInstancesPerNode, - allowLocalRoutees, useRole)).props(Props - .create(FactorialBackend.class)), "factorialBackendRouter3"); - //#router-deploy-in-code -} diff --git a/akka-samples/akka-sample-cluster-java/src/main/java/sample/cluster/factorial/FactorialApp.java b/akka-samples/akka-sample-cluster-java/src/main/java/sample/cluster/factorial/FactorialApp.java deleted file mode 100644 index 9c8c6ddeef..0000000000 --- a/akka-samples/akka-sample-cluster-java/src/main/java/sample/cluster/factorial/FactorialApp.java +++ /dev/null @@ -1,12 +0,0 @@ -package sample.cluster.factorial; - -public class FactorialApp { - - public static void main(String[] args) { - // starting 3 backend nodes and 1 frontend node - FactorialBackendMain.main(new String[] { "2551" }); - FactorialBackendMain.main(new String[] { "2552" }); - FactorialBackendMain.main(new String[0]); - FactorialFrontendMain.main(new String[0]); - } -} diff --git a/akka-samples/akka-sample-cluster-java/src/main/java/sample/cluster/factorial/FactorialBackend.java b/akka-samples/akka-sample-cluster-java/src/main/java/sample/cluster/factorial/FactorialBackend.java deleted file mode 100644 index 53dccd8c8b..0000000000 --- a/akka-samples/akka-sample-cluster-java/src/main/java/sample/cluster/factorial/FactorialBackend.java +++ /dev/null @@ -1,37 +0,0 @@ -package sample.cluster.factorial; - -import java.math.BigInteger; -import scala.concurrent.Future; -import akka.actor.AbstractActor; -import static akka.dispatch.Futures.future; -import static akka.pattern.Patterns.pipe; - -//#backend -public class FactorialBackend extends AbstractActor { - - @Override - public Receive createReceive() { - return receiveBuilder() - .match(Integer.class, n -> { - Future f = future(() -> factorial(n), - getContext().dispatcher()); - - Future result = - f.map(factorial -> new FactorialResult(n, factorial), - getContext().dispatcher()); - - pipe(result, getContext().dispatcher()).to(sender()); - }) - .build(); - } - - BigInteger factorial(int n) { - BigInteger acc = BigInteger.ONE; - for (int i = 1; i <= n; ++i) { - acc = acc.multiply(BigInteger.valueOf(i)); - } - return acc; - } -} -//#backend - diff --git a/akka-samples/akka-sample-cluster-java/src/main/java/sample/cluster/factorial/FactorialBackendMain.java b/akka-samples/akka-sample-cluster-java/src/main/java/sample/cluster/factorial/FactorialBackendMain.java deleted file mode 100644 index 6c322a6aa1..0000000000 --- a/akka-samples/akka-sample-cluster-java/src/main/java/sample/cluster/factorial/FactorialBackendMain.java +++ /dev/null @@ -1,25 +0,0 @@ -package sample.cluster.factorial; - -import com.typesafe.config.Config; -import com.typesafe.config.ConfigFactory; -import akka.actor.ActorSystem; -import akka.actor.Props; - -public class FactorialBackendMain { - - public static void main(String[] args) { - // Override the configuration of the port when specified as program argument - final String port = args.length > 0 ? args[0] : "0"; - final Config config = ConfigFactory.parseString("akka.remote.netty.tcp.port=" + port). - withFallback(ConfigFactory.parseString("akka.cluster.roles = [backend]")). - withFallback(ConfigFactory.load("factorial")); - - ActorSystem system = ActorSystem.create("ClusterSystem", config); - - system.actorOf(Props.create(FactorialBackend.class), "factorialBackend"); - - system.actorOf(Props.create(MetricsListener.class), "metricsListener"); - - } - -} diff --git a/akka-samples/akka-sample-cluster-java/src/main/java/sample/cluster/factorial/FactorialFrontend.java b/akka-samples/akka-sample-cluster-java/src/main/java/sample/cluster/factorial/FactorialFrontend.java deleted file mode 100644 index 4ee81535a2..0000000000 --- a/akka-samples/akka-sample-cluster-java/src/main/java/sample/cluster/factorial/FactorialFrontend.java +++ /dev/null @@ -1,62 +0,0 @@ -package sample.cluster.factorial; - -import java.util.concurrent.TimeUnit; -import scala.concurrent.duration.Duration; -import akka.actor.ActorRef; -import akka.actor.ReceiveTimeout; -import akka.actor.AbstractActor; -import akka.event.Logging; -import akka.event.LoggingAdapter; -import akka.routing.FromConfig; - -//#frontend -public class FactorialFrontend extends AbstractActor { - final int upToN; - final boolean repeat; - - LoggingAdapter log = Logging.getLogger(getContext().system(), this); - - ActorRef backend = getContext().actorOf(FromConfig.getInstance().props(), - "factorialBackendRouter"); - - public FactorialFrontend(int upToN, boolean repeat) { - this.upToN = upToN; - this.repeat = repeat; - } - - @Override - public void preStart() { - sendJobs(); - getContext().setReceiveTimeout(Duration.create(10, TimeUnit.SECONDS)); - } - - @Override - public Receive createReceive() { - return receiveBuilder() - .match(FactorialResult.class, result -> { - if (result.n == upToN) { - log.debug("{}! = {}", result.n, result.factorial); - if (repeat) - sendJobs(); - else - getContext().stop(self()); - } - }) - .match(ReceiveTimeout.class, x -> { - log.info("Timeout"); - sendJobs(); - }) - .build(); - } - - void sendJobs() { - log.info("Starting batch of factorials up to [{}]", upToN); - for (int n = 1; n <= upToN; n++) { - backend.tell(n, getSelf()); - } - } - -} - -//#frontend - diff --git a/akka-samples/akka-sample-cluster-java/src/main/java/sample/cluster/factorial/FactorialFrontendMain.java b/akka-samples/akka-sample-cluster-java/src/main/java/sample/cluster/factorial/FactorialFrontendMain.java deleted file mode 100644 index f25a355be1..0000000000 --- a/akka-samples/akka-sample-cluster-java/src/main/java/sample/cluster/factorial/FactorialFrontendMain.java +++ /dev/null @@ -1,39 +0,0 @@ -package sample.cluster.factorial; - - -import java.util.concurrent.TimeoutException; -import java.util.concurrent.TimeUnit; -import scala.concurrent.Await; -import scala.concurrent.duration.Duration; -import com.typesafe.config.Config; -import com.typesafe.config.ConfigFactory; - -import akka.actor.ActorSystem; -import akka.actor.Props; -import akka.cluster.Cluster; - -public class FactorialFrontendMain { - - public static void main(String[] args) { - final int upToN = 200; - - final Config config = ConfigFactory.parseString( - "akka.cluster.roles = [frontend]").withFallback( - ConfigFactory.load("factorial")); - - final ActorSystem system = ActorSystem.create("ClusterSystem", config); - system.log().info( - "Factorials will start when 2 backend members in the cluster."); - //#registerOnUp - Cluster.get(system).registerOnMemberUp(new Runnable() { - @Override - public void run() { - system.actorOf(Props.create(FactorialFrontend.class, upToN, true), - "factorialFrontend"); - } - }); - //#registerOnUp - - } - -} diff --git a/akka-samples/akka-sample-cluster-java/src/main/java/sample/cluster/simple/SimpleClusterApp.java b/akka-samples/akka-sample-cluster-java/src/main/java/sample/cluster/simple/SimpleClusterApp.java deleted file mode 100644 index c13041a513..0000000000 --- a/akka-samples/akka-sample-cluster-java/src/main/java/sample/cluster/simple/SimpleClusterApp.java +++ /dev/null @@ -1,34 +0,0 @@ -package sample.cluster.simple; - -import akka.actor.ActorSystem; -import akka.actor.Props; - -import com.typesafe.config.Config; -import com.typesafe.config.ConfigFactory; - -public class SimpleClusterApp { - - public static void main(String[] args) { - if (args.length == 0) - startup(new String[] { "2551", "2552", "0" }); - else - startup(args); - } - - public static void startup(String[] ports) { - for (String port : ports) { - // Override the configuration of the port - Config config = ConfigFactory.parseString( - "akka.remote.netty.tcp.port=" + port).withFallback( - ConfigFactory.load()); - - // Create an Akka system - ActorSystem system = ActorSystem.create("ClusterSystem", config); - - // Create an actor that handles cluster domain events - system.actorOf(Props.create(SimpleClusterListener.class), - "clusterListener"); - - } - } -} diff --git a/akka-samples/akka-sample-cluster-java/src/main/java/sample/cluster/simple/SimpleClusterListener.java b/akka-samples/akka-sample-cluster-java/src/main/java/sample/cluster/simple/SimpleClusterListener.java deleted file mode 100644 index d6a861c708..0000000000 --- a/akka-samples/akka-sample-cluster-java/src/main/java/sample/cluster/simple/SimpleClusterListener.java +++ /dev/null @@ -1,49 +0,0 @@ -package sample.cluster.simple; - -import akka.actor.AbstractActor; -import akka.cluster.Cluster; -import akka.cluster.ClusterEvent; -import akka.cluster.ClusterEvent.MemberEvent; -import akka.cluster.ClusterEvent.MemberUp; -import akka.cluster.ClusterEvent.MemberRemoved; -import akka.cluster.ClusterEvent.UnreachableMember; -import akka.event.Logging; -import akka.event.LoggingAdapter; - -public class SimpleClusterListener extends AbstractActor { - LoggingAdapter log = Logging.getLogger(getContext().system(), this); - Cluster cluster = Cluster.get(getContext().system()); - - //subscribe to cluster changes - @Override - public void preStart() { - //#subscribe - cluster.subscribe(self(), ClusterEvent.initialStateAsEvents(), - MemberEvent.class, UnreachableMember.class); - //#subscribe - } - - //re-subscribe when restart - @Override - public void postStop() { - cluster.unsubscribe(self()); - } - - @Override - public Receive createReceive() { - return receiveBuilder() - .match(MemberUp.class, mUp -> { - log.info("Member is Up: {}", mUp.member()); - }) - .match(UnreachableMember.class, mUnreachable -> { - log.info("Member detected as unreachable: {}", mUnreachable.member()); - }) - .match(MemberRemoved.class, mRemoved -> { - log.info("Member is Removed: {}", mRemoved.member()); - }) - .match(MemberEvent.class, mEvent -> { - // ignore - }) - .build(); - } -} diff --git a/akka-samples/akka-sample-cluster-java/src/main/java/sample/cluster/simple/SimpleClusterListener2.java b/akka-samples/akka-sample-cluster-java/src/main/java/sample/cluster/simple/SimpleClusterListener2.java deleted file mode 100644 index b7d7872830..0000000000 --- a/akka-samples/akka-sample-cluster-java/src/main/java/sample/cluster/simple/SimpleClusterListener2.java +++ /dev/null @@ -1,51 +0,0 @@ -package sample.cluster.simple; - -import akka.actor.AbstractActor; -import akka.cluster.Cluster; -import akka.cluster.ClusterEvent.CurrentClusterState; -import akka.cluster.ClusterEvent.MemberEvent; -import akka.cluster.ClusterEvent.MemberUp; -import akka.cluster.ClusterEvent.MemberRemoved; -import akka.cluster.ClusterEvent.UnreachableMember; -import akka.event.Logging; -import akka.event.LoggingAdapter; - -public class SimpleClusterListener2 extends AbstractActor { - LoggingAdapter log = Logging.getLogger(getContext().system(), this); - Cluster cluster = Cluster.get(getContext().system()); - - //subscribe to cluster changes - @Override - public void preStart() { - //#subscribe - cluster.subscribe(self(), MemberEvent.class, UnreachableMember.class); - //#subscribe - } - - //re-subscribe when restart - @Override - public void postStop() { - cluster.unsubscribe(self()); - } - - @Override - public Receive createReceive() { - return receiveBuilder() - .match(CurrentClusterState.class, state -> { - log.info("Current members: {}", state.members()); - }) - .match(MemberUp.class, mUp -> { - log.info("Member is Up: {}", mUp.member()); - }) - .match(UnreachableMember.class, mUnreachable -> { - log.info("Member detected as unreachable: {}", mUnreachable.member()); - }) - .match(MemberRemoved.class, mRemoved -> { - log.info("Member is Removed: {}", mRemoved.member()); - }) - .match(MemberEvent.class, mEvent -> { - // ignore - }) - .build(); - } -} diff --git a/akka-samples/akka-sample-cluster-java/src/main/java/sample/cluster/stats/Extra.java b/akka-samples/akka-sample-cluster-java/src/main/java/sample/cluster/stats/Extra.java deleted file mode 100644 index 7f92c3218e..0000000000 --- a/akka-samples/akka-sample-cluster-java/src/main/java/sample/cluster/stats/Extra.java +++ /dev/null @@ -1,43 +0,0 @@ -package sample.cluster.stats; - -import java.util.Collections; - -import akka.actor.ActorRef; -import akka.actor.Props; -import akka.actor.AbstractActor; -import akka.cluster.routing.ClusterRouterGroup; -import akka.cluster.routing.ClusterRouterGroupSettings; -import akka.cluster.routing.ClusterRouterPool; -import akka.cluster.routing.ClusterRouterPoolSettings; -import akka.routing.ConsistentHashingGroup; -import akka.routing.ConsistentHashingPool; - -//not used, only for documentation -abstract class StatsService2 extends AbstractActor { - //#router-lookup-in-code - int totalInstances = 100; - Iterable routeesPaths = Collections - .singletonList("/user/statsWorker"); - boolean allowLocalRoutees = true; - String useRole = "compute"; - ActorRef workerRouter = getContext().actorOf( - new ClusterRouterGroup(new ConsistentHashingGroup(routeesPaths), - new ClusterRouterGroupSettings(totalInstances, routeesPaths, - allowLocalRoutees, useRole)).props(), "workerRouter2"); - //#router-lookup-in-code -} - -//not used, only for documentation -abstract class StatsService3 extends AbstractActor { - //#router-deploy-in-code - int totalInstances = 100; - int maxInstancesPerNode = 3; - boolean allowLocalRoutees = false; - String useRole = "compute"; - ActorRef workerRouter = getContext().actorOf( - new ClusterRouterPool(new ConsistentHashingPool(0), - new ClusterRouterPoolSettings(totalInstances, maxInstancesPerNode, - allowLocalRoutees, useRole)).props(Props - .create(StatsWorker.class)), "workerRouter3"); - //#router-deploy-in-code -} diff --git a/akka-samples/akka-sample-cluster-java/src/main/java/sample/cluster/stats/StatsAggregator.java b/akka-samples/akka-sample-cluster-java/src/main/java/sample/cluster/stats/StatsAggregator.java deleted file mode 100644 index 10fa307bb2..0000000000 --- a/akka-samples/akka-sample-cluster-java/src/main/java/sample/cluster/stats/StatsAggregator.java +++ /dev/null @@ -1,54 +0,0 @@ -package sample.cluster.stats; - -import java.util.ArrayList; -import java.util.List; -import java.util.concurrent.TimeUnit; - -import sample.cluster.stats.StatsMessages.JobFailed; -import sample.cluster.stats.StatsMessages.StatsResult; -import scala.concurrent.duration.Duration; -import akka.actor.ActorRef; -import akka.actor.ReceiveTimeout; -import akka.actor.AbstractActor; - -//#aggregator -public class StatsAggregator extends AbstractActor { - - final int expectedResults; - final ActorRef replyTo; - final List results = new ArrayList(); - - public StatsAggregator(int expectedResults, ActorRef replyTo) { - this.expectedResults = expectedResults; - this.replyTo = replyTo; - } - - @Override - public void preStart() { - getContext().setReceiveTimeout(Duration.create(3, TimeUnit.SECONDS)); - } - - @Override - public Receive createReceive() { - return receiveBuilder() - .match(Integer.class, wordCount -> { - results.add(wordCount); - if (results.size() == expectedResults) { - int sum = 0; - for (int c : results) - sum += c; - double meanWordLength = ((double) sum) / results.size(); - replyTo.tell(new StatsResult(meanWordLength), self()); - getContext().stop(self()); - } - }) - .match(ReceiveTimeout.class, timeout -> { - replyTo.tell(new JobFailed("Service unavailable, try again later"), - self()); - getContext().stop(self()); - }) - .build(); - } - -} -//#aggregator diff --git a/akka-samples/akka-sample-cluster-java/src/main/java/sample/cluster/stats/StatsSampleClientMain.java b/akka-samples/akka-sample-cluster-java/src/main/java/sample/cluster/stats/StatsSampleClientMain.java deleted file mode 100644 index ecc255aa77..0000000000 --- a/akka-samples/akka-sample-cluster-java/src/main/java/sample/cluster/stats/StatsSampleClientMain.java +++ /dev/null @@ -1,17 +0,0 @@ -package sample.cluster.stats; - -import akka.actor.ActorSystem; -import akka.actor.Props; - -import com.typesafe.config.ConfigFactory; - -public class StatsSampleClientMain { - - public static void main(String[] args) { - // note that client is not a compute node, role not defined - ActorSystem system = ActorSystem.create("ClusterSystem", - ConfigFactory.load("stats1")); - system.actorOf(Props.create(StatsSampleClient.class, "/user/statsService"), - "client"); - } -} diff --git a/akka-samples/akka-sample-cluster-java/src/main/java/sample/cluster/stats/StatsSampleMain.java b/akka-samples/akka-sample-cluster-java/src/main/java/sample/cluster/stats/StatsSampleMain.java deleted file mode 100644 index 5cb5ffbdc0..0000000000 --- a/akka-samples/akka-sample-cluster-java/src/main/java/sample/cluster/stats/StatsSampleMain.java +++ /dev/null @@ -1,36 +0,0 @@ -package sample.cluster.stats; - -import com.typesafe.config.Config; -import com.typesafe.config.ConfigFactory; - -import akka.actor.ActorSystem; -import akka.actor.Props; - -public class StatsSampleMain { - - public static void main(String[] args) { - if (args.length == 0) { - startup(new String[] { "2551", "2552", "0" }); - StatsSampleClientMain.main(new String[0]); - } else { - startup(args); - } - } - - public static void startup(String[] ports) { - for (String port : ports) { - // Override the configuration of the port - Config config = ConfigFactory - .parseString("akka.remote.netty.tcp.port=" + port) - .withFallback( - ConfigFactory.parseString("akka.cluster.roles = [compute]")) - .withFallback(ConfigFactory.load("stats1")); - - ActorSystem system = ActorSystem.create("ClusterSystem", config); - - system.actorOf(Props.create(StatsWorker.class), "statsWorker"); - system.actorOf(Props.create(StatsService.class), "statsService"); - } - - } -} diff --git a/akka-samples/akka-sample-cluster-java/src/main/java/sample/cluster/stats/StatsService.java b/akka-samples/akka-sample-cluster-java/src/main/java/sample/cluster/stats/StatsService.java deleted file mode 100644 index 310e20766a..0000000000 --- a/akka-samples/akka-sample-cluster-java/src/main/java/sample/cluster/stats/StatsService.java +++ /dev/null @@ -1,42 +0,0 @@ -package sample.cluster.stats; - -import sample.cluster.stats.StatsMessages.StatsJob; -import akka.actor.ActorRef; -import akka.actor.Props; -import akka.actor.AbstractActor; -import akka.routing.ConsistentHashingRouter.ConsistentHashableEnvelope; -import akka.routing.FromConfig; - -//#service -public class StatsService extends AbstractActor { - - // This router is used both with lookup and deploy of routees. If you - // have a router with only lookup of routees you can use Props.empty() - // instead of Props.create(StatsWorker.class). - ActorRef workerRouter = getContext().actorOf( - FromConfig.getInstance().props(Props.create(StatsWorker.class)), - "workerRouter"); - - @Override - public Receive createReceive() { - return receiveBuilder() - .match(StatsJob.class, job -> !"".equals(job.getText()), job -> { - final String[] words = job.getText().split(" "); - final ActorRef replyTo = sender(); - - // create actor that collects replies from workers - ActorRef aggregator = getContext().actorOf( - Props.create(StatsAggregator.class, words.length, replyTo)); - - // send each word to a worker - for (String word : words) { - workerRouter.tell(new ConsistentHashableEnvelope(word, word), - aggregator); - } - }) - .build(); - } -} - -//#service - diff --git a/akka-samples/akka-sample-cluster-java/src/main/java/sample/cluster/stats/StatsWorker.java b/akka-samples/akka-sample-cluster-java/src/main/java/sample/cluster/stats/StatsWorker.java deleted file mode 100644 index 66b0fe76af..0000000000 --- a/akka-samples/akka-sample-cluster-java/src/main/java/sample/cluster/stats/StatsWorker.java +++ /dev/null @@ -1,27 +0,0 @@ -package sample.cluster.stats; - -import java.util.HashMap; -import java.util.Map; - -import akka.actor.AbstractActor; - -//#worker -public class StatsWorker extends AbstractActor { - - Map cache = new HashMap(); - - @Override - public Receive createReceive() { - return receiveBuilder() - .match(String.class, word -> { - Integer length = cache.get(word); - if (length == null) { - length = word.length(); - cache.put(word, length); - } - sender().tell(length, self()); - }) - .build(); - } -} -//#worker \ No newline at end of file diff --git a/akka-samples/akka-sample-cluster-java/src/main/java/sample/cluster/transformation/TransformationApp.java b/akka-samples/akka-sample-cluster-java/src/main/java/sample/cluster/transformation/TransformationApp.java deleted file mode 100644 index 123c39f309..0000000000 --- a/akka-samples/akka-sample-cluster-java/src/main/java/sample/cluster/transformation/TransformationApp.java +++ /dev/null @@ -1,12 +0,0 @@ -package sample.cluster.transformation; - -public class TransformationApp { - - public static void main(String[] args) { - // starting 2 frontend nodes and 3 backend nodes - TransformationBackendMain.main(new String[] { "2551" }); - TransformationBackendMain.main(new String[] { "2552" }); - TransformationBackendMain.main(new String[0]); - TransformationFrontendMain.main(new String[0]); - } -} diff --git a/akka-samples/akka-sample-cluster-java/src/main/java/sample/cluster/transformation/TransformationBackend.java b/akka-samples/akka-sample-cluster-java/src/main/java/sample/cluster/transformation/TransformationBackend.java deleted file mode 100644 index 219b3f6a5c..0000000000 --- a/akka-samples/akka-sample-cluster-java/src/main/java/sample/cluster/transformation/TransformationBackend.java +++ /dev/null @@ -1,56 +0,0 @@ -package sample.cluster.transformation; - -import static sample.cluster.transformation.TransformationMessages.BACKEND_REGISTRATION; -import sample.cluster.transformation.TransformationMessages.TransformationJob; -import sample.cluster.transformation.TransformationMessages.TransformationResult; -import akka.actor.AbstractActor; -import akka.cluster.Cluster; -import akka.cluster.ClusterEvent.CurrentClusterState; -import akka.cluster.ClusterEvent.MemberUp; -import akka.cluster.Member; -import akka.cluster.MemberStatus; - -//#backend -public class TransformationBackend extends AbstractActor { - - Cluster cluster = Cluster.get(getContext().system()); - - //subscribe to cluster changes, MemberUp - @Override - public void preStart() { - cluster.subscribe(self(), MemberUp.class); - } - - //re-subscribe when restart - @Override - public void postStop() { - cluster.unsubscribe(self()); - } - - @Override - public Receive createReceive() { - return receiveBuilder() - .match(TransformationJob.class, job -> { - sender().tell(new TransformationResult(job.getText().toUpperCase()), - self()); - }) - .match(CurrentClusterState.class, state -> { - for (Member member : state.getMembers()) { - if (member.status().equals(MemberStatus.up())) { - register(member); - } - } - }) - .match(MemberUp.class, mUp -> { - register(mUp.member()); - }) - .build(); - } - - void register(Member member) { - if (member.hasRole("frontend")) - getContext().actorSelection(member.address() + "/user/frontend").tell( - BACKEND_REGISTRATION, getSelf()); - } -} -//#backend diff --git a/akka-samples/akka-sample-cluster-java/src/main/java/sample/cluster/transformation/TransformationBackendMain.java b/akka-samples/akka-sample-cluster-java/src/main/java/sample/cluster/transformation/TransformationBackendMain.java deleted file mode 100644 index dd042b8d5b..0000000000 --- a/akka-samples/akka-sample-cluster-java/src/main/java/sample/cluster/transformation/TransformationBackendMain.java +++ /dev/null @@ -1,24 +0,0 @@ -package sample.cluster.transformation; - -import com.typesafe.config.Config; -import com.typesafe.config.ConfigFactory; - -import akka.actor.ActorSystem; -import akka.actor.Props; - -public class TransformationBackendMain { - - public static void main(String[] args) { - // Override the configuration of the port when specified as program argument - final String port = args.length > 0 ? args[0] : "0"; - final Config config = ConfigFactory.parseString("akka.remote.netty.tcp.port=" + port). - withFallback(ConfigFactory.parseString("akka.cluster.roles = [backend]")). - withFallback(ConfigFactory.load()); - - ActorSystem system = ActorSystem.create("ClusterSystem", config); - - system.actorOf(Props.create(TransformationBackend.class), "backend"); - - } - -} diff --git a/akka-samples/akka-sample-cluster-java/src/main/java/sample/cluster/transformation/TransformationFrontend.java b/akka-samples/akka-sample-cluster-java/src/main/java/sample/cluster/transformation/TransformationFrontend.java deleted file mode 100644 index d7b434cdc1..0000000000 --- a/akka-samples/akka-sample-cluster-java/src/main/java/sample/cluster/transformation/TransformationFrontend.java +++ /dev/null @@ -1,43 +0,0 @@ -package sample.cluster.transformation; - -import static sample.cluster.transformation.TransformationMessages.BACKEND_REGISTRATION; - -import java.util.ArrayList; -import java.util.List; - -import sample.cluster.transformation.TransformationMessages.JobFailed; -import sample.cluster.transformation.TransformationMessages.TransformationJob; -import akka.actor.ActorRef; -import akka.actor.Terminated; -import akka.actor.AbstractActor; - -//#frontend -public class TransformationFrontend extends AbstractActor { - - List backends = new ArrayList(); - int jobCounter = 0; - - @Override - public Receive createReceive() { - return receiveBuilder() - .match(TransformationJob.class, job -> backends.isEmpty(), job -> { - sender().tell( - new JobFailed("Service unavailable, try again later", job), - sender()); - }) - .match(TransformationJob.class, job -> { - jobCounter++; - backends.get(jobCounter % backends.size()) - .forward(job, getContext()); - }) - .matchEquals(BACKEND_REGISTRATION, m -> { - getContext().watch(sender()); - backends.add(sender()); - }) - .match(Terminated.class, terminated -> { - backends.remove(terminated.getActor()); - }) - .build(); - } -} -//#frontend diff --git a/akka-samples/akka-sample-cluster-java/src/main/java/sample/cluster/transformation/TransformationFrontendMain.java b/akka-samples/akka-sample-cluster-java/src/main/java/sample/cluster/transformation/TransformationFrontendMain.java deleted file mode 100644 index 19ac9da177..0000000000 --- a/akka-samples/akka-sample-cluster-java/src/main/java/sample/cluster/transformation/TransformationFrontendMain.java +++ /dev/null @@ -1,51 +0,0 @@ -package sample.cluster.transformation; - -import java.util.concurrent.TimeUnit; -import java.util.concurrent.atomic.AtomicInteger; - -import com.typesafe.config.Config; -import com.typesafe.config.ConfigFactory; - -import sample.cluster.transformation.TransformationMessages.TransformationJob; -import scala.concurrent.ExecutionContext; -import scala.concurrent.duration.Duration; -import scala.concurrent.duration.FiniteDuration; -import akka.actor.ActorRef; -import akka.actor.ActorSystem; -import akka.actor.Props; -import akka.dispatch.OnSuccess; -import akka.util.Timeout; -import static akka.pattern.Patterns.ask; - -public class TransformationFrontendMain { - - public static void main(String[] args) { - // Override the configuration of the port when specified as program argument - final String port = args.length > 0 ? args[0] : "0"; - final Config config = ConfigFactory.parseString("akka.remote.netty.tcp.port=" + port). - withFallback(ConfigFactory.parseString("akka.cluster.roles = [frontend]")). - withFallback(ConfigFactory.load()); - - ActorSystem system = ActorSystem.create("ClusterSystem", config); - - final ActorRef frontend = system.actorOf( - Props.create(TransformationFrontend.class), "frontend"); - final FiniteDuration interval = Duration.create(2, TimeUnit.SECONDS); - final Timeout timeout = new Timeout(Duration.create(5, TimeUnit.SECONDS)); - final ExecutionContext ec = system.dispatcher(); - final AtomicInteger counter = new AtomicInteger(); - system.scheduler().schedule(interval, interval, new Runnable() { - public void run() { - ask(frontend, - new TransformationJob("hello-" + counter.incrementAndGet()), - timeout).onSuccess(new OnSuccess() { - public void onSuccess(Object result) { - System.out.println(result); - } - }, ec); - } - - }, ec); - - } -} diff --git a/akka-samples/akka-sample-cluster-java/src/main/resources/application.conf b/akka-samples/akka-sample-cluster-java/src/main/resources/application.conf deleted file mode 100644 index f45e18038a..0000000000 --- a/akka-samples/akka-sample-cluster-java/src/main/resources/application.conf +++ /dev/null @@ -1,36 +0,0 @@ -#//#snippet -akka { - actor { - provider = "cluster" - } - remote { - log-remote-lifecycle-events = off - netty.tcp { - hostname = "127.0.0.1" - port = 0 - } - } - - cluster { - seed-nodes = [ - "akka.tcp://ClusterSystem@127.0.0.1:2551", - "akka.tcp://ClusterSystem@127.0.0.1:2552"] - - #//#snippet - # excluded from snippet - auto-down-unreachable-after = 10s - #//#snippet - # auto downing is NOT safe for production deployments. - # you may want to use it during development, read more about it in the docs. - # - # auto-down-unreachable-after = 10s - } -} - -# Enable metrics extension in akka-cluster-metrics. -akka.extensions=["akka.cluster.metrics.ClusterMetricsExtension"] - -# Sigar native library extract location during tests. -# Note: use per-jvm-instance folder when running multiple jvm on one host. -akka.cluster.metrics.native-library-extract-folder=${user.dir}/target/native -#//#snippet diff --git a/akka-samples/akka-sample-cluster-java/src/main/resources/factorial.conf b/akka-samples/akka-sample-cluster-java/src/main/resources/factorial.conf deleted file mode 100644 index a3240a6aed..0000000000 --- a/akka-samples/akka-sample-cluster-java/src/main/resources/factorial.conf +++ /dev/null @@ -1,33 +0,0 @@ -include "application" - -# //#min-nr-of-members -akka.cluster.min-nr-of-members = 3 -# //#min-nr-of-members - -# //#role-min-nr-of-members -akka.cluster.role { - frontend.min-nr-of-members = 1 - backend.min-nr-of-members = 2 -} -# //#role-min-nr-of-members - -# //#adaptive-router -akka.actor.deployment { - /factorialFrontend/factorialBackendRouter = { - # Router type provided by metrics extension. - router = cluster-metrics-adaptive-group - # Router parameter specific for metrics extension. - # metrics-selector = heap - # metrics-selector = load - # metrics-selector = cpu - metrics-selector = mix - # - routees.paths = ["/user/factorialBackend"] - cluster { - enabled = on - use-role = backend - allow-local-routees = off - } - } -} -# //#adaptive-router diff --git a/akka-samples/akka-sample-cluster-java/src/main/resources/stats1.conf b/akka-samples/akka-sample-cluster-java/src/main/resources/stats1.conf deleted file mode 100644 index 4ae4ed091d..0000000000 --- a/akka-samples/akka-sample-cluster-java/src/main/resources/stats1.conf +++ /dev/null @@ -1,15 +0,0 @@ -include "application" - -# //#config-router-lookup -akka.actor.deployment { - /statsService/workerRouter { - router = consistent-hashing-group - routees.paths = ["/user/statsWorker"] - cluster { - enabled = on - allow-local-routees = on - use-role = compute - } - } -} -# //#config-router-lookup diff --git a/akka-samples/akka-sample-cluster-java/src/main/resources/stats2.conf b/akka-samples/akka-sample-cluster-java/src/main/resources/stats2.conf deleted file mode 100644 index 40c3d487ef..0000000000 --- a/akka-samples/akka-sample-cluster-java/src/main/resources/stats2.conf +++ /dev/null @@ -1,16 +0,0 @@ -include "application" - -# //#config-router-deploy -akka.actor.deployment { - /statsService/singleton/workerRouter { - router = consistent-hashing-pool - cluster { - enabled = on - max-nr-of-instances-per-node = 3 - allow-local-routees = on - use-role = compute - } - } -} -# //#config-router-deploy - diff --git a/akka-samples/akka-sample-cluster-java/src/multi-jvm/scala/sample/cluster/stats/StatsSampleSingleMasterSpec.scala b/akka-samples/akka-sample-cluster-java/src/multi-jvm/scala/sample/cluster/stats/StatsSampleSingleMasterSpec.scala deleted file mode 100644 index da1b5c20f9..0000000000 --- a/akka-samples/akka-sample-cluster-java/src/multi-jvm/scala/sample/cluster/stats/StatsSampleSingleMasterSpec.scala +++ /dev/null @@ -1,128 +0,0 @@ -package sample.cluster.stats - -import language.postfixOps -import scala.concurrent.duration._ -import com.typesafe.config.ConfigFactory -import org.scalatest.BeforeAndAfterAll -import org.scalatest.WordSpecLike -import org.scalatest.Matchers -import akka.actor.PoisonPill -import akka.actor.Props -import akka.actor.RootActorPath -import akka.cluster.Cluster -import akka.cluster.Member -import akka.cluster.MemberStatus -import akka.cluster.ClusterEvent.CurrentClusterState -import akka.cluster.ClusterEvent.MemberUp -import akka.cluster.singleton.ClusterSingletonManager -import akka.cluster.singleton.ClusterSingletonManagerSettings -import akka.cluster.singleton.ClusterSingletonProxy -import akka.remote.testkit.MultiNodeConfig -import akka.remote.testkit.MultiNodeSpec -import akka.testkit.ImplicitSender -import sample.cluster.stats.StatsMessages._ -import akka.cluster.singleton.ClusterSingletonProxySettings - -object StatsSampleSingleMasterSpecConfig extends MultiNodeConfig { - // register the named roles (nodes) of the test - val first = role("first") - val second = role("second") - val third = role("third") - - def nodeList = Seq(first, second, third) - - // Extract individual sigar library for every node. - nodeList foreach { role => - nodeConfig(role) { - ConfigFactory.parseString(s""" - # Enable metrics extension in akka-cluster-metrics. - akka.extensions=["akka.cluster.metrics.ClusterMetricsExtension"] - # Sigar native library extract location during tests. - akka.cluster.metrics.native-library-extract-folder=target/native/${role.name} - """) - } - } - - // this configuration will be used for all nodes - // note that no fixed host names and ports are used - commonConfig(ConfigFactory.parseString(""" - akka.loglevel = INFO - akka.actor.provider = "cluster" - akka.remote.log-remote-lifecycle-events = off - akka.cluster.roles = [compute] - #//#router-deploy-config - akka.actor.deployment { - /statsService/singleton/workerRouter { - router = consistent-hashing-pool - cluster { - enabled = on - max-nr-of-instances-per-node = 3 - allow-local-routees = on - use-role = compute - } - } - } - #//#router-deploy-config - """)) - -} - -// need one concrete test class per node -class StatsSampleSingleMasterSpecMultiJvmNode1 extends StatsSampleSingleMasterSpec -class StatsSampleSingleMasterSpecMultiJvmNode2 extends StatsSampleSingleMasterSpec -class StatsSampleSingleMasterSpecMultiJvmNode3 extends StatsSampleSingleMasterSpec - -abstract class StatsSampleSingleMasterSpec extends MultiNodeSpec(StatsSampleSingleMasterSpecConfig) - with WordSpecLike with Matchers with BeforeAndAfterAll with ImplicitSender { - - import StatsSampleSingleMasterSpecConfig._ - - override def initialParticipants = roles.size - - override def beforeAll() = multiNodeSpecBeforeAll() - - override def afterAll() = multiNodeSpecAfterAll() - - "The japi stats sample with single master" must { - "illustrate how to startup cluster" in within(15 seconds) { - Cluster(system).subscribe(testActor, classOf[MemberUp]) - expectMsgClass(classOf[CurrentClusterState]) - - val firstAddress = node(first).address - val secondAddress = node(second).address - val thirdAddress = node(third).address - - Cluster(system) join firstAddress - - receiveN(3).collect { case MemberUp(m) => m.address }.toSet should be( - Set(firstAddress, secondAddress, thirdAddress)) - - Cluster(system).unsubscribe(testActor) - - system.actorOf(ClusterSingletonManager.props( - Props[StatsService], - terminationMessage = PoisonPill, - settings = ClusterSingletonManagerSettings(system)), - name = "statsService") - - system.actorOf(ClusterSingletonProxy.props("/user/statsService", - ClusterSingletonProxySettings(system).withRole("compute")), "statsServiceProxy") - - testConductor.enter("all-up") - } - - "show usage of the statsServiceProxy" in within(40 seconds) { - val proxy = system.actorSelection(RootActorPath(node(third).address) / "user" / "statsServiceProxy") - - // eventually the service should be ok, - // service and worker nodes might not be up yet - awaitAssert { - proxy ! new StatsJob("this is the text that will be analyzed") - expectMsgType[StatsResult](1.second).getMeanWordLength should be(3.875 +- 0.001) - } - - testConductor.enter("done") - } - } - -} diff --git a/akka-samples/akka-sample-cluster-java/src/multi-jvm/scala/sample/cluster/stats/StatsSampleSpec.scala b/akka-samples/akka-sample-cluster-java/src/multi-jvm/scala/sample/cluster/stats/StatsSampleSpec.scala deleted file mode 100644 index 02a9f09580..0000000000 --- a/akka-samples/akka-sample-cluster-java/src/multi-jvm/scala/sample/cluster/stats/StatsSampleSpec.scala +++ /dev/null @@ -1,132 +0,0 @@ -package sample.cluster.stats - -import language.postfixOps -import scala.concurrent.duration._ - -import akka.actor.Props -import akka.actor.RootActorPath -import akka.cluster.Cluster -import akka.cluster.Member -import akka.cluster.MemberStatus -import akka.cluster.ClusterEvent.CurrentClusterState -import akka.cluster.ClusterEvent.MemberUp -import sample.cluster.stats.StatsMessages._ -import akka.remote.testkit.MultiNodeConfig -import com.typesafe.config.ConfigFactory -import org.scalatest.BeforeAndAfterAll -import org.scalatest.WordSpecLike -import org.scalatest.Matchers -import akka.remote.testkit.MultiNodeSpec -import akka.testkit.ImplicitSender - -object StatsSampleSpecConfig extends MultiNodeConfig { - // register the named roles (nodes) of the test - val first = role("first") - val second = role("second") - val third = role("third") - - def nodeList = Seq(first, second, third) - - // Extract individual sigar library for every node. - nodeList foreach { role => - nodeConfig(role) { - ConfigFactory.parseString(s""" - # Enable metrics extension in akka-cluster-metrics. - akka.extensions=["akka.cluster.metrics.ClusterMetricsExtension"] - # Sigar native library extract location during tests. - akka.cluster.metrics.native-library-extract-folder=target/native/${role.name} - """) - } - } - - // this configuration will be used for all nodes - // note that no fixed host names and ports are used - commonConfig(ConfigFactory.parseString(""" - akka.actor.provider = "cluster" - akka.remote.log-remote-lifecycle-events = off - akka.cluster.roles = [compute] - #//#router-lookup-config - akka.actor.deployment { - /statsService/workerRouter { - router = consistent-hashing-group - routees.paths = ["/user/statsWorker"] - cluster { - enabled = on - allow-local-routees = on - use-role = compute - } - } - } - #//#router-lookup-config - """)) - -} - -// need one concrete test class per node -class StatsSampleSpecMultiJvmNode1 extends StatsSampleSpec -class StatsSampleSpecMultiJvmNode2 extends StatsSampleSpec -class StatsSampleSpecMultiJvmNode3 extends StatsSampleSpec - -abstract class StatsSampleSpec extends MultiNodeSpec(StatsSampleSpecConfig) - with WordSpecLike with Matchers with BeforeAndAfterAll - with ImplicitSender { - - import StatsSampleSpecConfig._ - - override def initialParticipants = roles.size - - override def beforeAll() = multiNodeSpecBeforeAll() - - override def afterAll() = multiNodeSpecAfterAll() - - "The japi stats sample" must { - - "illustrate how to startup cluster" in within(15 seconds) { - Cluster(system).subscribe(testActor, classOf[MemberUp]) - expectMsgClass(classOf[CurrentClusterState]) - - val firstAddress = node(first).address - val secondAddress = node(second).address - val thirdAddress = node(third).address - - Cluster(system) join firstAddress - - system.actorOf(Props[StatsWorker], "statsWorker") - system.actorOf(Props[StatsService], "statsService") - - receiveN(3).collect { case MemberUp(m) => m.address }.toSet should be( - Set(firstAddress, secondAddress, thirdAddress)) - - Cluster(system).unsubscribe(testActor) - - testConductor.enter("all-up") - } - - "show usage of the statsService from one node" in within(15 seconds) { - runOn(second) { - assertServiceOk() - } - - testConductor.enter("done-2") - } - - def assertServiceOk(): Unit = { - val service = system.actorSelection(node(third) / "user" / "statsService") - // eventually the service should be ok, - // first attempts might fail because worker actors not started yet - awaitAssert { - service ! new StatsJob("this is the text that will be analyzed") - expectMsgType[StatsResult](1.second).getMeanWordLength should be(3.875 +- 0.001) - } - } - //#test-statsService - - "show usage of the statsService from all nodes" in within(15 seconds) { - assertServiceOk() - - testConductor.enter("done-3") - } - - } - -} diff --git a/akka-samples/akka-sample-cluster-java/src/multi-jvm/scala/sample/cluster/transformation/TransformationSampleSpec.scala b/akka-samples/akka-sample-cluster-java/src/multi-jvm/scala/sample/cluster/transformation/TransformationSampleSpec.scala deleted file mode 100644 index ac698c2fd1..0000000000 --- a/akka-samples/akka-sample-cluster-java/src/multi-jvm/scala/sample/cluster/transformation/TransformationSampleSpec.scala +++ /dev/null @@ -1,139 +0,0 @@ -package sample.cluster.transformation - -import language.postfixOps -import scala.concurrent.duration._ - -import com.typesafe.config.ConfigFactory - -import org.scalatest.BeforeAndAfterAll -import org.scalatest.WordSpecLike -import org.scalatest.Matchers - -import akka.actor.Props -import akka.cluster.Cluster -import akka.remote.testkit.MultiNodeConfig -import akka.remote.testkit.MultiNodeSpec -import akka.testkit.ImplicitSender -import sample.cluster.transformation.TransformationMessages._ - -object TransformationSampleSpecConfig extends MultiNodeConfig { - // register the named roles (nodes) of the test - val frontend1 = role("frontend1") - val frontend2 = role("frontend2") - val backend1 = role("backend1") - val backend2 = role("backend2") - val backend3 = role("backend3") - - def nodeList = Seq(frontend1, frontend2, backend1, backend2, backend3) - - // Extract individual sigar library for every node. - nodeList foreach { role => - nodeConfig(role) { - ConfigFactory.parseString(s""" - # Enable metrics extension in akka-cluster-metrics. - akka.extensions=["akka.cluster.metrics.ClusterMetricsExtension"] - # Sigar native library extract location during tests. - akka.cluster.metrics.native-library-extract-folder=target/native/${role.name} - """) - } - } - - // this configuration will be used for all nodes - // note that no fixed host names and ports are used - commonConfig(ConfigFactory.parseString(""" - akka.actor.provider = "cluster" - akka.remote.log-remote-lifecycle-events = off - """)) - - nodeConfig(frontend1, frontend2)( - ConfigFactory.parseString("akka.cluster.roles =[frontend]")) - - nodeConfig(backend1, backend2, backend3)( - ConfigFactory.parseString("akka.cluster.roles =[backend]")) - -} - -// need one concrete test class per node -class TransformationSampleSpecMultiJvmNode1 extends TransformationSampleSpec -class TransformationSampleSpecMultiJvmNode2 extends TransformationSampleSpec -class TransformationSampleSpecMultiJvmNode3 extends TransformationSampleSpec -class TransformationSampleSpecMultiJvmNode4 extends TransformationSampleSpec -class TransformationSampleSpecMultiJvmNode5 extends TransformationSampleSpec - -abstract class TransformationSampleSpec extends MultiNodeSpec(TransformationSampleSpecConfig) - with WordSpecLike with Matchers with BeforeAndAfterAll with ImplicitSender { - - import TransformationSampleSpecConfig._ - - override def initialParticipants = roles.size - - override def beforeAll() = multiNodeSpecBeforeAll() - - override def afterAll() = multiNodeSpecAfterAll() - - "The japi transformation sample" must { - "illustrate how to start first frontend" in within(15 seconds) { - runOn(frontend1) { - // this will only run on the 'first' node - Cluster(system) join node(frontend1).address - val transformationFrontend = system.actorOf(Props[TransformationFrontend], name = "frontend") - transformationFrontend ! new TransformationJob("hello") - expectMsgPF() { - // no backends yet, service unavailable - case f: JobFailed => - } - } - - // this will run on all nodes - // use barrier to coordinate test steps - testConductor.enter("frontend1-started") - } - - "illustrate how a backend automatically registers" in within(15 seconds) { - runOn(backend1) { - Cluster(system) join node(frontend1).address - system.actorOf(Props[TransformationBackend], name = "backend") - } - testConductor.enter("backend1-started") - - runOn(frontend1) { - assertServiceOk() - } - - testConductor.enter("frontend1-backend1-ok") - } - - "illustrate how more nodes registers" in within(20 seconds) { - runOn(frontend2) { - Cluster(system) join node(frontend1).address - system.actorOf(Props[TransformationFrontend], name = "frontend") - } - testConductor.enter("frontend2-started") - runOn(backend2, backend3) { - Cluster(system) join node(backend1).address - system.actorOf(Props[TransformationBackend], name = "backend") - } - - testConductor.enter("all-started") - - runOn(frontend1, frontend2) { - assertServiceOk() - } - - testConductor.enter("all-ok") - - } - - } - - def assertServiceOk(): Unit = { - val transformationFrontend = system.actorSelection("akka://" + system.name + "/user/frontend") - // eventually the service should be ok, - // backends might not have registered initially - awaitAssert { - transformationFrontend ! new TransformationJob("hello") - expectMsgType[TransformationResult](1.second).getText should be("HELLO") - } - } - -} diff --git a/akka-samples/akka-sample-cluster-java/src/test/resources/reference.conf b/akka-samples/akka-sample-cluster-java/src/test/resources/reference.conf deleted file mode 100644 index 90492329b7..0000000000 --- a/akka-samples/akka-sample-cluster-java/src/test/resources/reference.conf +++ /dev/null @@ -1,4 +0,0 @@ -# Don't terminate ActorSystem in tests -akka.coordinated-shutdown.run-by-jvm-shutdown-hook = off -akka.coordinated-shutdown.terminate-actor-system = off -akka.cluster.run-coordinated-shutdown-when-down = off diff --git a/akka-samples/akka-sample-cluster-java/tutorial/index.html b/akka-samples/akka-sample-cluster-java/tutorial/index.html deleted file mode 100644 index 6ea54d9c5f..0000000000 --- a/akka-samples/akka-sample-cluster-java/tutorial/index.html +++ /dev/null @@ -1,490 +0,0 @@ - - -Akka Cluster Samples with Java - - - - -
-

-This tutorial contains 4 samples illustrating different -Akka cluster features. -

-
    -
  • Subscribe to cluster membership events
  • -
  • Sending messages to actors running on nodes in the cluster
  • -
  • Cluster aware routers
  • -
  • Cluster metrics
  • -
-
- -
-

A Simple Cluster Example

- -

-Open application.conf -

- -

-To enable cluster capabilities in your Akka project you should, at a minimum, add the remote settings, -and use cluster for akka.actor.provider. The akka.cluster.seed-nodes should -normally also be added to your application.conf file. -

- -

-The seed nodes are configured contact points which newly started nodes will try to connect with in order to join the cluster. -

- -

-Note that if you are going to start the nodes on different machines you need to specify the -ip-addresses or host names of the machines in application.conf instead of 127.0.0.1. -

- -

-Open SimpleClusterApp.java. -

- -

-The small program together with its configuration starts an ActorSystem with the Cluster enabled. -It joins the cluster and starts an actor that logs some membership events. -Take a look at the -SimpleClusterListener.java -actor. -

- -

-You can read more about the cluster concepts in the -documentation. -

- -

-To run this sample, go to the Run -tab, and start the application main class sample.cluster.simple.SimpleClusterApp -if it is not already started. -

- -

-SimpleClusterApp starts three actor systems (cluster members) in the same JVM process. It can be more -interesting to run them in separate processes. Stop the application in the -Run tab and then open three terminal windows. -

- -

-In the first terminal window, start the first seed node with the following command (on one line): -

- -

-<path to activator dir>/activator 
-  "runMain sample.cluster.simple.SimpleClusterApp 2551"		
-
- -

-2551 corresponds to the port of the first seed-nodes element in the configuration. In the log -output you see that the cluster node has been started and changed status to 'Up'. -

- -

-In the second terminal window, start the second seed node with the following command (on one line): -

- -

-<path to activator dir>/activator 
-  "runMain sample.cluster.simple.SimpleClusterApp 2552"		
-
- -

-2552 corresponds to the port of the second seed-nodes element in the configuration. In the -log output you see that the cluster node has been started and joins the other seed node and -becomes a member of the cluster. Its status changed to 'Up'. -

- -

-Switch over to the first terminal window and see in the log output that the member joined. -

- -

-Start another node in the third terminal window with the following command (on one line): -

- -

-<path to activator dir>/activator 
-  "runMain sample.cluster.simple.SimpleClusterApp 0"		
-
- -

-Now you don't need to specify the port number, 0 means that it will use a random available port. -It joins one of the configured seed nodes. Look at the log output in the different terminal -windows. -

- -

-Start even more nodes in the same way, if you like. -

- -

-Shut down one of the nodes by pressing 'ctrl-c' in one of the terminal windows. -The other nodes will detect the failure after a while, which you can see in the log -output in the other terminals. -

- -

-Look at the -source code -of the actor again. It registers itself as subscriber of certain cluster events. It gets notified with an snapshot event, -CurrentClusterState that holds full state information of the cluster. After that it receives events for changes -that happen in the cluster. -

- -
- -
-

Worker Dial-in Example

- -

-In the previous sample we saw how to subscribe to cluster membership events. -You can read more about it in the -documentation. -How can cluster membership events be used? -

- -

-Let's take a look at an example that illustrates how workers, here named backend, -can detect and register to new master nodes, here named frontend. -

- -

-The example application provides a service to transform text. When some text -is sent to one of the frontend services, it will be delegated to one of the -backend workers, which performs the transformation job, and sends the result back to -the original client. New backend nodes, as well as new frontend nodes, can be -added or removed to the cluster dynamically. -

- -

-Open TransformationMessages.java. -It defines the messages that are sent between the actors. -

- -

-The backend worker that performs the transformation job is defined in -TransformationBackend.java -

- -

-Note that the TransformationBackend actor subscribes to cluster events to detect new, -potential, frontend nodes, and send them a registration message so that they know -that they can use the backend worker. -

- -

-The frontend that receives user jobs and delegates to one of the registered backend workers is defined in -TransformationFrontend.java -

- -

-Note that the TransformationFrontend actor watch the registered backend -to be able to remove it from its list of available backend workers. -Death watch uses the cluster failure detector for nodes in the cluster, i.e. it detects -network failures and JVM crashes, in addition to graceful termination of watched -actor. -

- -

-To run this sample, go to the Run -tab, and start the application main class sample.cluster.transformation.TransformationApp -if it is not already started. -

- -

-TransformationApp starts -5 actor systems (cluster members) in the same JVM process. It can be more -interesting to run them in separate processes. Stop the application in the -Run tab and run the following commands in separate terminal windows. -

- -

-<path to activator dir>/activator 
-  "runMain sample.cluster.transformation.TransformationFrontendMain 2551"		
-
- -

-<path to activator dir>/activator 
-  "runMain sample.cluster.transformation.TransformationBackendMain 2552"		
-
- -

-<path to activator dir>/activator 
-  "runMain sample.cluster.transformation.TransformationBackendMain 0"		
-
- -

-<path to activator dir>/activator 
-  "runMain sample.cluster.transformation.TransformationBackendMain 0"		
-
- -

-<path to activator dir>/activator 
-  "runMain sample.cluster.transformation.TransformationFrontendMain 0"		
-
- -
- -
-

Cluster Aware Routers

- -

-All routers -can be made aware of member nodes in the cluster, i.e. deploying new routees or looking up routees -on nodes in the cluster. -When a node becomes unreachable or leaves the cluster the routees of that node are -automatically unregistered from the router. When new nodes join the cluster additional -routees are added to the router, according to the configuration. Routees are also added -when a node becomes reachable again, after having been unreachable. -

- -

-You can read more about cluster aware routers in the -documentation. -

- -

-Let's take a look at a few samples that make use of cluster aware routers. -

- -
- -
-

Router Example with Group of Routees

- -

-Let's take a look at how to use a cluster aware router with a group of routees, -i.e. a router which does not create its routees but instead forwards incoming messages to a given -set of actors created elsewhere. -

- -

-The example application provides a service to calculate statistics for a text. -When some text is sent to the service it splits it into words, and delegates the task -to count number of characters in each word to a separate worker, a routee of a router. -The character count for each word is sent back to an aggregator that calculates -the average number of characters per word when all results have been collected. -

- -

-Open StatsMessages.java. -It defines the messages that are sent between the actors. -

- -

-The worker that counts number of characters in each word is defined in -StatsWorker.java. -

- -

-The service that receives text from users and splits it up into words, delegates to workers and aggregates -is defined in StatsService.java -and StatsAggregator.java. -

- -

-Note, nothing cluster specific so far, just plain actors. -

- -

-All nodes start StatsService and StatsWorker actors. Remember, routees are the workers in this case. -

- -

-Open stats1.conf -The router is configured with routees.paths. -This means that user requests can be sent to StatsService on any node and it will use -StatsWorker on all nodes. -

- -

-To run this sample, go to the Run -tab, and start the application main class sample.cluster.stats.StatsSampleMain -if it is not already started. -

- -

-StatsSampleMain starts -4 actor systems (cluster members) in the same JVM process. It can be more -interesting to run them in separate processes. Stop the application in the -Run tab and run the following commands in separate terminal windows. -

- -

-<path to activator dir>/activator 
-  "runMain sample.cluster.stats.StatsSampleMain 2551"		
-
- -

-<path to activator dir>/activator 
-  "runMain sample.cluster.stats.StatsSampleMain 2552"		
-
- -

-<path to activator dir>/activator 
-  "runMain sample.cluster.stats.StatsSampleClientMain"		
-
- -

-<path to activator dir>/activator 
-  "runMain sample.cluster.stats.StatsSampleMain 0"		
-
- -
- -
-

Router Example with Pool of Remote Deployed Routees

- -

-Let's take a look at how to use a cluster aware router on single master node that creates -and deploys workers instead of looking them up. -

- -

-Open StatsSampleOneMasterMain.java. -To keep track of a single master we use the Cluster Singleton -in the contrib module. The ClusterSingletonManager is started on each node. -

- -

-We also need an actor on each node that keeps track of where current single master exists and -delegates jobs to the StatsService. That is provided by the ClusterSingletonProxy. -

- -

-The ClusterSingletonProxy receives text from users and delegates to the current StatsService, the single -master. It listens to cluster events to lookup the StatsService on the oldest node. -

- -

-All nodes start ClusterSingletonProxy and the ClusterSingletonManager. The router is now configured in -stats2.conf -

- -

-To run this sample, go to the Run -tab, and start the application main class sample.cluster.stats.StatsSampleOneMasterMain -if it is not already started. -

- -

-StatsSampleOneMasterMain starts -4 actor systems (cluster members) in the same JVM process. It can be more -interesting to run them in separate processes. Stop the application in the -Run tab and run the following commands in separate terminal windows. -

- -

-<path to activator dir>/activator 
-  "runMain sample.cluster.stats.StatsSampleOneMasterMain 2551"		
-
- -

-<path to activator dir>/activator 
-  "runMain sample.cluster.stats.StatsSampleOneMasterMain 2552"		
-
- -

-<path to activator dir>/activator 
-  "runMain sample.cluster.stats.StatsSampleOneMasterClientMain"		
-
- -

-<path to activator dir>/activator 
-  "runMain sample.cluster.stats.StatsSampleOneMasterMain 0"		
-
- -
- -
-

Adaptive Load Balancing

- -

-The member nodes of the cluster collects system health metrics and publishes that to other nodes and to -registered subscribers. This information is primarily used for load-balancing routers, such as -the AdaptiveLoadBalancingPool and AdaptiveLoadBalancingGroup routers. -

- -

-You can read more about cluster metrics in the -documentation. -

- -

-Let's take a look at this router in action. What can be more demanding than calculating factorials? -

- -

-The backend worker that performs the factorial calculation: -FactorialBackend -

- -

-The frontend that receives user jobs and delegates to the backends via the router: -FactorialFrontend -

- -

-As you can see, the router is defined in the same way as other routers, and in this case it is configured in: -factorial.conf -

- -

-It is only router type adaptive and the metrics-selector that is specific to this router, -other things work in the same way as other routers. -

- -

-To run this sample, go to the Run -tab, and start the application main class sample.cluster.factorial.FactorialApp -if it is not already started. -

- -

-FactorialApp starts -4 actor systems (cluster members) in the same JVM process. It can be more -interesting to run them in separate processes. Stop the application in the -Run tab and run the following commands in separate terminal windows. -

- -

-<path to activator dir>/activator 
-  "runMain sample.cluster.factorial.FactorialBackendMain 2551"		
-
- -

-<path to activator dir>/activator 
-  "runMain sample.cluster.factorial.FactorialBackendMain 2552"		
-
- -

-<path to activator dir>/activator 
-  "runMain sample.cluster.factorial.FactorialBackendMain 0"		
-
- -

-<path to activator dir>/activator 
-  "runMain sample.cluster.factorial.FactorialFrontendMain 0"		
-
- -

-Press ctrl-c in the terminal window of the frontend to stop the factorial calculations. -

- -
-
-

Tests

- -

-Tests can be found in src/multi-jvm. -You can run them from the Test tab. -

- -
- - - diff --git a/akka-samples/akka-sample-cluster-scala/.gitignore b/akka-samples/akka-sample-cluster-scala/.gitignore deleted file mode 100644 index 660c959e44..0000000000 --- a/akka-samples/akka-sample-cluster-scala/.gitignore +++ /dev/null @@ -1,17 +0,0 @@ -*# -*.iml -*.ipr -*.iws -*.pyc -*.tm.epoch -*.vim -*-shim.sbt -.idea/ -/project/plugins/project -project/boot -target/ -/logs -.cache -.classpath -.project -.settings \ No newline at end of file diff --git a/akka-samples/akka-sample-cluster-scala/COPYING b/akka-samples/akka-sample-cluster-scala/COPYING deleted file mode 100644 index 0e259d42c9..0000000000 --- a/akka-samples/akka-sample-cluster-scala/COPYING +++ /dev/null @@ -1,121 +0,0 @@ -Creative Commons Legal Code - -CC0 1.0 Universal - - CREATIVE COMMONS CORPORATION IS NOT A LAW FIRM AND DOES NOT PROVIDE - LEGAL SERVICES. DISTRIBUTION OF THIS DOCUMENT DOES NOT CREATE AN - ATTORNEY-CLIENT RELATIONSHIP. CREATIVE COMMONS PROVIDES THIS - INFORMATION ON AN "AS-IS" BASIS. CREATIVE COMMONS MAKES NO WARRANTIES - REGARDING THE USE OF THIS DOCUMENT OR THE INFORMATION OR WORKS - PROVIDED HEREUNDER, AND DISCLAIMS LIABILITY FOR DAMAGES RESULTING FROM - THE USE OF THIS DOCUMENT OR THE INFORMATION OR WORKS PROVIDED - HEREUNDER. - -Statement of Purpose - -The laws of most jurisdictions throughout the world automatically confer -exclusive Copyright and Related Rights (defined below) upon the creator -and subsequent owner(s) (each and all, an "owner") of an original work of -authorship and/or a database (each, a "Work"). - -Certain owners wish to permanently relinquish those rights to a Work for -the purpose of contributing to a commons of creative, cultural and -scientific works ("Commons") that the public can reliably and without fear -of later claims of infringement build upon, modify, incorporate in other -works, reuse and redistribute as freely as possible in any form whatsoever -and for any purposes, including without limitation commercial purposes. -These owners may contribute to the Commons to promote the ideal of a free -culture and the further production of creative, cultural and scientific -works, or to gain reputation or greater distribution for their Work in -part through the use and efforts of others. - -For these and/or other purposes and motivations, and without any -expectation of additional consideration or compensation, the person -associating CC0 with a Work (the "Affirmer"), to the extent that he or she -is an owner of Copyright and Related Rights in the Work, voluntarily -elects to apply CC0 to the Work and publicly distribute the Work under its -terms, with knowledge of his or her Copyright and Related Rights in the -Work and the meaning and intended legal effect of CC0 on those rights. - -1. Copyright and Related Rights. A Work made available under CC0 may be -protected by copyright and related or neighboring rights ("Copyright and -Related Rights"). Copyright and Related Rights include, but are not -limited to, the following: - - i. the right to reproduce, adapt, distribute, perform, display, - communicate, and translate a Work; - ii. moral rights retained by the original author(s) and/or performer(s); -iii. publicity and privacy rights pertaining to a person's image or - likeness depicted in a Work; - iv. rights protecting against unfair competition in regards to a Work, - subject to the limitations in paragraph 4(a), below; - v. rights protecting the extraction, dissemination, use and reuse of data - in a Work; - vi. database rights (such as those arising under Directive 96/9/EC of the - European Parliament and of the Council of 11 March 1996 on the legal - protection of databases, and under any national implementation - thereof, including any amended or successor version of such - directive); and -vii. other similar, equivalent or corresponding rights throughout the - world based on applicable law or treaty, and any national - implementations thereof. - -2. Waiver. To the greatest extent permitted by, but not in contravention -of, applicable law, Affirmer hereby overtly, fully, permanently, -irrevocably and unconditionally waives, abandons, and surrenders all of -Affirmer's Copyright and Related Rights and associated claims and causes -of action, whether now known or unknown (including existing as well as -future claims and causes of action), in the Work (i) in all territories -worldwide, (ii) for the maximum duration provided by applicable law or -treaty (including future time extensions), (iii) in any current or future -medium and for any number of copies, and (iv) for any purpose whatsoever, -including without limitation commercial, advertising or promotional -purposes (the "Waiver"). Affirmer makes the Waiver for the benefit of each -member of the public at large and to the detriment of Affirmer's heirs and -successors, fully intending that such Waiver shall not be subject to -revocation, rescission, cancellation, termination, or any other legal or -equitable action to disrupt the quiet enjoyment of the Work by the public -as contemplated by Affirmer's express Statement of Purpose. - -3. Public License Fallback. Should any part of the Waiver for any reason -be judged legally invalid or ineffective under applicable law, then the -Waiver shall be preserved to the maximum extent permitted taking into -account Affirmer's express Statement of Purpose. In addition, to the -extent the Waiver is so judged Affirmer hereby grants to each affected -person a royalty-free, non transferable, non sublicensable, non exclusive, -irrevocable and unconditional license to exercise Affirmer's Copyright and -Related Rights in the Work (i) in all territories worldwide, (ii) for the -maximum duration provided by applicable law or treaty (including future -time extensions), (iii) in any current or future medium and for any number -of copies, and (iv) for any purpose whatsoever, including without -limitation commercial, advertising or promotional purposes (the -"License"). The License shall be deemed effective as of the date CC0 was -applied by Affirmer to the Work. Should any part of the License for any -reason be judged legally invalid or ineffective under applicable law, such -partial invalidity or ineffectiveness shall not invalidate the remainder -of the License, and in such case Affirmer hereby affirms that he or she -will not (i) exercise any of his or her remaining Copyright and Related -Rights in the Work or (ii) assert any associated claims and causes of -action with respect to the Work, in either case contrary to Affirmer's -express Statement of Purpose. - -4. Limitations and Disclaimers. - - a. No trademark or patent rights held by Affirmer are waived, abandoned, - surrendered, licensed or otherwise affected by this document. - b. Affirmer offers the Work as-is and makes no representations or - warranties of any kind concerning the Work, express, implied, - statutory or otherwise, including without limitation warranties of - title, merchantability, fitness for a particular purpose, non - infringement, or the absence of latent or other defects, accuracy, or - the present or absence of errors, whether or not discoverable, all to - the greatest extent permissible under applicable law. - c. Affirmer disclaims responsibility for clearing rights of other persons - that may apply to the Work or any use thereof, including without - limitation any person's Copyright and Related Rights in the Work. - Further, Affirmer disclaims responsibility for obtaining any necessary - consents, permissions or other rights required for any use of the - Work. - d. Affirmer understands and acknowledges that Creative Commons is not a - party to this document and has no duty or obligation with respect to - this CC0 or use of the Work. diff --git a/akka-samples/akka-sample-cluster-scala/LICENSE b/akka-samples/akka-sample-cluster-scala/LICENSE deleted file mode 100644 index 2a18b45589..0000000000 --- a/akka-samples/akka-sample-cluster-scala/LICENSE +++ /dev/null @@ -1,10 +0,0 @@ -Activator Template by Lightbend - -Licensed under Public Domain (CC0) - -To the extent possible under law, the person who associated CC0 with -this Activator Tempate has waived all copyright and related or neighboring -rights to this Activator Template. - -You should have received a copy of the CC0 legalcode along with this -work. If not, see . diff --git a/akka-samples/akka-sample-cluster-scala/activator.properties b/akka-samples/akka-sample-cluster-scala/activator.properties deleted file mode 100644 index cfa238b004..0000000000 --- a/akka-samples/akka-sample-cluster-scala/activator.properties +++ /dev/null @@ -1,7 +0,0 @@ -name=akka-sample-cluster-scala -title=Akka Cluster Samples with Scala -description=Akka Cluster Samples with Scala -tags=akka,cluster,scala,sample -authorName=Akka Team -authorLink=http://akka.io/ -sourceLink=https://github.com/akka/akka diff --git a/akka-samples/akka-sample-cluster-scala/build.sbt b/akka-samples/akka-sample-cluster-scala/build.sbt deleted file mode 100644 index 7f92cb3d61..0000000000 --- a/akka-samples/akka-sample-cluster-scala/build.sbt +++ /dev/null @@ -1,49 +0,0 @@ -import com.typesafe.sbt.SbtMultiJvm -import com.typesafe.sbt.SbtMultiJvm.MultiJvmKeys.MultiJvm - -val akkaVersion = "2.5-SNAPSHOT" - -val project = Project( - id = "akka-sample-cluster-scala", - base = file(".") - ) - .settings(SbtMultiJvm.multiJvmSettings: _*) - .settings( - name := "akka-sample-cluster-scala", - version := "2.5-SNAPSHOT", - scalaVersion := "2.11.8", - scalacOptions in Compile ++= Seq("-encoding", "UTF-8", "-target:jvm-1.8", "-deprecation", "-feature", "-unchecked", "-Xlog-reflective-calls", "-Xlint"), - javacOptions in Compile ++= Seq("-source", "1.8", "-target", "1.8", "-Xlint:unchecked", "-Xlint:deprecation"), - libraryDependencies ++= Seq( - "com.typesafe.akka" %% "akka-actor" % akkaVersion, - "com.typesafe.akka" %% "akka-remote" % akkaVersion, - "com.typesafe.akka" %% "akka-cluster" % akkaVersion, - "com.typesafe.akka" %% "akka-cluster-metrics" % akkaVersion, - "com.typesafe.akka" %% "akka-cluster-tools" % akkaVersion, - "com.typesafe.akka" %% "akka-multi-node-testkit" % akkaVersion, - "org.scalatest" %% "scalatest" % "2.2.1" % "test", - "io.kamon" % "sigar-loader" % "1.6.6-rev002"), - javaOptions in run ++= Seq( - "-Xms128m", "-Xmx1024m", "-Djava.library.path=./target/native"), - Keys.fork in run := true, - mainClass in (Compile, run) := Some("sample.cluster.simple.SimpleClusterApp"), - // make sure that MultiJvm test are compiled by the default test compilation - compile in MultiJvm <<= (compile in MultiJvm) triggeredBy (compile in Test), - // disable parallel tests - parallelExecution in Test := false, - // make sure that MultiJvm tests are executed by the default test target, - // and combine the results from ordinary test and multi-jvm tests - executeTests in Test <<= (executeTests in Test, executeTests in MultiJvm) map { - case (testResults, multiNodeResults) => - val overall = - if (testResults.overall.id < multiNodeResults.overall.id) - multiNodeResults.overall - else - testResults.overall - Tests.Output(overall, - testResults.events ++ multiNodeResults.events, - testResults.summaries ++ multiNodeResults.summaries) - }, - licenses := Seq(("CC0", url("http://creativecommons.org/publicdomain/zero/1.0"))) - ) - .configs (MultiJvm) diff --git a/akka-samples/akka-sample-cluster-scala/project/build.properties b/akka-samples/akka-sample-cluster-scala/project/build.properties deleted file mode 100644 index 748703f770..0000000000 --- a/akka-samples/akka-sample-cluster-scala/project/build.properties +++ /dev/null @@ -1 +0,0 @@ -sbt.version=0.13.7 diff --git a/akka-samples/akka-sample-cluster-scala/project/plugins.sbt b/akka-samples/akka-sample-cluster-scala/project/plugins.sbt deleted file mode 100644 index c3e7d797de..0000000000 --- a/akka-samples/akka-sample-cluster-scala/project/plugins.sbt +++ /dev/null @@ -1,4 +0,0 @@ - -resolvers += Classpaths.typesafeResolver - -addSbtPlugin("com.typesafe.sbt" % "sbt-multi-jvm" % "0.3.8") diff --git a/akka-samples/akka-sample-cluster-scala/src/main/resources/application.conf b/akka-samples/akka-sample-cluster-scala/src/main/resources/application.conf deleted file mode 100644 index 56b8106004..0000000000 --- a/akka-samples/akka-sample-cluster-scala/src/main/resources/application.conf +++ /dev/null @@ -1,36 +0,0 @@ -#//#snippet -akka { - actor { - provider = cluster - } - remote { - log-remote-lifecycle-events = off - netty.tcp { - hostname = "127.0.0.1" - port = 0 - } - } - - cluster { - seed-nodes = [ - "akka.tcp://ClusterSystem@127.0.0.1:2551", - "akka.tcp://ClusterSystem@127.0.0.1:2552"] - - #//#snippet - # excluded from snippet - auto-down-unreachable-after = 10s - #//#snippet - # auto downing is NOT safe for production deployments. - # you may want to use it during development, read more about it in the docs. - # - # auto-down-unreachable-after = 10s - } -} - -# Enable metrics extension in akka-cluster-metrics. -akka.extensions=["akka.cluster.metrics.ClusterMetricsExtension"] - -# Sigar native library extract location during tests. -# Note: use per-jvm-instance folder when running multiple jvm on one host. -akka.cluster.metrics.native-library-extract-folder=${user.dir}/target/native -#//#snippet diff --git a/akka-samples/akka-sample-cluster-scala/src/main/resources/factorial.conf b/akka-samples/akka-sample-cluster-scala/src/main/resources/factorial.conf deleted file mode 100644 index a3240a6aed..0000000000 --- a/akka-samples/akka-sample-cluster-scala/src/main/resources/factorial.conf +++ /dev/null @@ -1,33 +0,0 @@ -include "application" - -# //#min-nr-of-members -akka.cluster.min-nr-of-members = 3 -# //#min-nr-of-members - -# //#role-min-nr-of-members -akka.cluster.role { - frontend.min-nr-of-members = 1 - backend.min-nr-of-members = 2 -} -# //#role-min-nr-of-members - -# //#adaptive-router -akka.actor.deployment { - /factorialFrontend/factorialBackendRouter = { - # Router type provided by metrics extension. - router = cluster-metrics-adaptive-group - # Router parameter specific for metrics extension. - # metrics-selector = heap - # metrics-selector = load - # metrics-selector = cpu - metrics-selector = mix - # - routees.paths = ["/user/factorialBackend"] - cluster { - enabled = on - use-role = backend - allow-local-routees = off - } - } -} -# //#adaptive-router diff --git a/akka-samples/akka-sample-cluster-scala/src/main/resources/stats1.conf b/akka-samples/akka-sample-cluster-scala/src/main/resources/stats1.conf deleted file mode 100644 index 4ae4ed091d..0000000000 --- a/akka-samples/akka-sample-cluster-scala/src/main/resources/stats1.conf +++ /dev/null @@ -1,15 +0,0 @@ -include "application" - -# //#config-router-lookup -akka.actor.deployment { - /statsService/workerRouter { - router = consistent-hashing-group - routees.paths = ["/user/statsWorker"] - cluster { - enabled = on - allow-local-routees = on - use-role = compute - } - } -} -# //#config-router-lookup diff --git a/akka-samples/akka-sample-cluster-scala/src/main/resources/stats2.conf b/akka-samples/akka-sample-cluster-scala/src/main/resources/stats2.conf deleted file mode 100644 index 40c3d487ef..0000000000 --- a/akka-samples/akka-sample-cluster-scala/src/main/resources/stats2.conf +++ /dev/null @@ -1,16 +0,0 @@ -include "application" - -# //#config-router-deploy -akka.actor.deployment { - /statsService/singleton/workerRouter { - router = consistent-hashing-pool - cluster { - enabled = on - max-nr-of-instances-per-node = 3 - allow-local-routees = on - use-role = compute - } - } -} -# //#config-router-deploy - diff --git a/akka-samples/akka-sample-cluster-scala/src/main/scala/sample/cluster/factorial/Extra.scala b/akka-samples/akka-sample-cluster-scala/src/main/scala/sample/cluster/factorial/Extra.scala deleted file mode 100644 index e26b9258d1..0000000000 --- a/akka-samples/akka-sample-cluster-scala/src/main/scala/sample/cluster/factorial/Extra.scala +++ /dev/null @@ -1,38 +0,0 @@ -package sample.cluster.factorial - -import akka.actor.Props -import akka.actor.Actor - -// not used, only for documentation -abstract class FactorialFrontend2 extends Actor { - //#router-lookup-in-code - import akka.cluster.routing.ClusterRouterGroup - import akka.cluster.routing.ClusterRouterGroupSettings - import akka.cluster.metrics.AdaptiveLoadBalancingGroup - import akka.cluster.metrics.HeapMetricsSelector - - val backend = context.actorOf( - ClusterRouterGroup(AdaptiveLoadBalancingGroup(HeapMetricsSelector), - ClusterRouterGroupSettings( - totalInstances = 100, routeesPaths = List("/user/factorialBackend"), - allowLocalRoutees = true, useRole = Some("backend"))).props(), - name = "factorialBackendRouter2") - //#router-lookup-in-code -} - -// not used, only for documentation -abstract class FactorialFrontend3 extends Actor { - //#router-deploy-in-code - import akka.cluster.routing.ClusterRouterPool - import akka.cluster.routing.ClusterRouterPoolSettings - import akka.cluster.metrics.AdaptiveLoadBalancingPool - import akka.cluster.metrics.SystemLoadAverageMetricsSelector - - val backend = context.actorOf( - ClusterRouterPool(AdaptiveLoadBalancingPool( - SystemLoadAverageMetricsSelector), ClusterRouterPoolSettings( - totalInstances = 100, maxInstancesPerNode = 3, - allowLocalRoutees = false, useRole = Some("backend"))).props(Props[FactorialBackend]), - name = "factorialBackendRouter3") - //#router-deploy-in-code -} \ No newline at end of file diff --git a/akka-samples/akka-sample-cluster-scala/src/main/scala/sample/cluster/factorial/FactorialApp.scala b/akka-samples/akka-sample-cluster-scala/src/main/scala/sample/cluster/factorial/FactorialApp.scala deleted file mode 100644 index 69fb39c4ad..0000000000 --- a/akka-samples/akka-sample-cluster-scala/src/main/scala/sample/cluster/factorial/FactorialApp.scala +++ /dev/null @@ -1,11 +0,0 @@ -package sample.cluster.factorial - -object FactorialApp { - def main(args: Array[String]): Unit = { - // starting 3 backend nodes and 1 frontend node - FactorialBackend.main(Seq("2551").toArray) - FactorialBackend.main(Seq("2552").toArray) - FactorialBackend.main(Array.empty) - FactorialFrontend.main(Array.empty) - } -} \ No newline at end of file diff --git a/akka-samples/akka-sample-cluster-scala/src/main/scala/sample/cluster/factorial/FactorialFrontend.scala b/akka-samples/akka-sample-cluster-scala/src/main/scala/sample/cluster/factorial/FactorialFrontend.scala deleted file mode 100644 index 61bc5c7dc0..0000000000 --- a/akka-samples/akka-sample-cluster-scala/src/main/scala/sample/cluster/factorial/FactorialFrontend.scala +++ /dev/null @@ -1,64 +0,0 @@ -package sample.cluster.factorial - -import scala.concurrent.duration._ -import com.typesafe.config.ConfigFactory -import akka.actor.Actor -import akka.actor.ActorLogging -import akka.actor.ActorSystem -import akka.actor.Props -import akka.cluster.Cluster -import akka.routing.FromConfig -import akka.actor.ReceiveTimeout -import scala.util.Try -import scala.concurrent.Await - -//#frontend -class FactorialFrontend(upToN: Int, repeat: Boolean) extends Actor with ActorLogging { - - val backend = context.actorOf(FromConfig.props(), - name = "factorialBackendRouter") - - override def preStart(): Unit = { - sendJobs() - if (repeat) { - context.setReceiveTimeout(10.seconds) - } - } - - def receive = { - case (n: Int, factorial: BigInt) => - if (n == upToN) { - log.debug("{}! = {}", n, factorial) - if (repeat) sendJobs() - else context.stop(self) - } - case ReceiveTimeout => - log.info("Timeout") - sendJobs() - } - - def sendJobs(): Unit = { - log.info("Starting batch of factorials up to [{}]", upToN) - 1 to upToN foreach { backend ! _ } - } -} -//#frontend - -object FactorialFrontend { - def main(args: Array[String]): Unit = { - val upToN = 200 - - val config = ConfigFactory.parseString("akka.cluster.roles = [frontend]"). - withFallback(ConfigFactory.load("factorial")) - - val system = ActorSystem("ClusterSystem", config) - system.log.info("Factorials will start when 2 backend members in the cluster.") - //#registerOnUp - Cluster(system) registerOnMemberUp { - system.actorOf(Props(classOf[FactorialFrontend], upToN, true), - name = "factorialFrontend") - } - //#registerOnUp - - } -} diff --git a/akka-samples/akka-sample-cluster-scala/src/main/scala/sample/cluster/simple/SimpleClusterApp.scala b/akka-samples/akka-sample-cluster-scala/src/main/scala/sample/cluster/simple/SimpleClusterApp.scala deleted file mode 100644 index 1e87f49d25..0000000000 --- a/akka-samples/akka-sample-cluster-scala/src/main/scala/sample/cluster/simple/SimpleClusterApp.scala +++ /dev/null @@ -1,29 +0,0 @@ -package sample.cluster.simple - -import com.typesafe.config.ConfigFactory -import akka.actor.ActorSystem -import akka.actor.Props - -object SimpleClusterApp { - def main(args: Array[String]): Unit = { - if (args.isEmpty) - startup(Seq("2551", "2552", "0")) - else - startup(args) - } - - def startup(ports: Seq[String]): Unit = { - ports foreach { port => - // Override the configuration of the port - val config = ConfigFactory.parseString("akka.remote.netty.tcp.port=" + port). - withFallback(ConfigFactory.load()) - - // Create an Akka system - val system = ActorSystem("ClusterSystem", config) - // Create an actor that handles cluster domain events - system.actorOf(Props[SimpleClusterListener], name = "clusterListener") - } - } - -} - diff --git a/akka-samples/akka-sample-cluster-scala/src/main/scala/sample/cluster/stats/Extra.scala b/akka-samples/akka-sample-cluster-scala/src/main/scala/sample/cluster/stats/Extra.scala deleted file mode 100644 index f5163c84db..0000000000 --- a/akka-samples/akka-sample-cluster-scala/src/main/scala/sample/cluster/stats/Extra.scala +++ /dev/null @@ -1,34 +0,0 @@ -package sample.cluster.stats - -import akka.actor.Actor -import akka.actor.Props - -// not used, only for documentation -abstract class StatsService2 extends Actor { - //#router-lookup-in-code - import akka.cluster.routing.ClusterRouterGroup - import akka.cluster.routing.ClusterRouterGroupSettings - import akka.routing.ConsistentHashingGroup - - val workerRouter = context.actorOf( - ClusterRouterGroup(ConsistentHashingGroup(Nil), ClusterRouterGroupSettings( - totalInstances = 100, routeesPaths = List("/user/statsWorker"), - allowLocalRoutees = true, useRole = Some("compute"))).props(), - name = "workerRouter2") - //#router-lookup-in-code -} - -// not used, only for documentation -abstract class StatsService3 extends Actor { - //#router-deploy-in-code - import akka.cluster.routing.ClusterRouterPool - import akka.cluster.routing.ClusterRouterPoolSettings - import akka.routing.ConsistentHashingPool - - val workerRouter = context.actorOf( - ClusterRouterPool(ConsistentHashingPool(0), ClusterRouterPoolSettings( - totalInstances = 100, maxInstancesPerNode = 3, - allowLocalRoutees = false, useRole = None)).props(Props[StatsWorker]), - name = "workerRouter3") - //#router-deploy-in-code -} diff --git a/akka-samples/akka-sample-cluster-scala/src/main/scala/sample/cluster/stats/StatsSample.scala b/akka-samples/akka-sample-cluster-scala/src/main/scala/sample/cluster/stats/StatsSample.scala deleted file mode 100644 index b32dd961a8..0000000000 --- a/akka-samples/akka-sample-cluster-scala/src/main/scala/sample/cluster/stats/StatsSample.scala +++ /dev/null @@ -1,91 +0,0 @@ -package sample.cluster.stats - -import scala.concurrent.duration._ -import java.util.concurrent.ThreadLocalRandom -import com.typesafe.config.ConfigFactory -import akka.actor.Actor -import akka.actor.ActorSystem -import akka.actor.Address -import akka.actor.PoisonPill -import akka.actor.Props -import akka.actor.RelativeActorPath -import akka.actor.RootActorPath -import akka.cluster.Cluster -import akka.cluster.ClusterEvent._ -import akka.cluster.MemberStatus - -object StatsSample { - def main(args: Array[String]): Unit = { - if (args.isEmpty) { - startup(Seq("2551", "2552", "0")) - StatsSampleClient.main(Array.empty) - } else { - startup(args) - } - } - - def startup(ports: Seq[String]): Unit = { - ports foreach { port => - // Override the configuration of the port when specified as program argument - val config = - ConfigFactory.parseString(s"akka.remote.netty.tcp.port=" + port).withFallback( - ConfigFactory.parseString("akka.cluster.roles = [compute]")). - withFallback(ConfigFactory.load("stats1")) - - val system = ActorSystem("ClusterSystem", config) - - system.actorOf(Props[StatsWorker], name = "statsWorker") - system.actorOf(Props[StatsService], name = "statsService") - } - } -} - -object StatsSampleClient { - def main(args: Array[String]): Unit = { - // note that client is not a compute node, role not defined - val system = ActorSystem("ClusterSystem") - system.actorOf(Props(classOf[StatsSampleClient], "/user/statsService"), "client") - } -} - -class StatsSampleClient(servicePath: String) extends Actor { - val cluster = Cluster(context.system) - val servicePathElements = servicePath match { - case RelativeActorPath(elements) => elements - case _ => throw new IllegalArgumentException( - "servicePath [%s] is not a valid relative actor path" format servicePath) - } - import context.dispatcher - val tickTask = context.system.scheduler.schedule(2.seconds, 2.seconds, self, "tick") - - var nodes = Set.empty[Address] - - override def preStart(): Unit = { - cluster.subscribe(self, classOf[MemberEvent], classOf[ReachabilityEvent]) - } - override def postStop(): Unit = { - cluster.unsubscribe(self) - tickTask.cancel() - } - - def receive = { - case "tick" if nodes.nonEmpty => - // just pick any one - val address = nodes.toIndexedSeq(ThreadLocalRandom.current.nextInt(nodes.size)) - val service = context.actorSelection(RootActorPath(address) / servicePathElements) - service ! StatsJob("this is the text that will be analyzed") - case result: StatsResult => - println(result) - case failed: JobFailed => - println(failed) - case state: CurrentClusterState => - nodes = state.members.collect { - case m if m.hasRole("compute") && m.status == MemberStatus.Up => m.address - } - case MemberUp(m) if m.hasRole("compute") => nodes += m.address - case other: MemberEvent => nodes -= other.member.address - case UnreachableMember(m) => nodes -= m.address - case ReachableMember(m) if m.hasRole("compute") => nodes += m.address - } - -} diff --git a/akka-samples/akka-sample-cluster-scala/src/main/scala/sample/cluster/stats/StatsSampleOneMaster.scala b/akka-samples/akka-sample-cluster-scala/src/main/scala/sample/cluster/stats/StatsSampleOneMaster.scala deleted file mode 100644 index f4db5403be..0000000000 --- a/akka-samples/akka-sample-cluster-scala/src/main/scala/sample/cluster/stats/StatsSampleOneMaster.scala +++ /dev/null @@ -1,56 +0,0 @@ -package sample.cluster.stats - -import com.typesafe.config.ConfigFactory -import akka.actor.ActorSystem -import akka.actor.PoisonPill -import akka.actor.Props -import akka.cluster.singleton.ClusterSingletonManager -import akka.cluster.singleton.ClusterSingletonManagerSettings -import akka.cluster.singleton.ClusterSingletonProxy -import akka.cluster.singleton.ClusterSingletonProxySettings - -object StatsSampleOneMaster { - def main(args: Array[String]): Unit = { - if (args.isEmpty) { - startup(Seq("2551", "2552", "0")) - StatsSampleOneMasterClient.main(Array.empty) - } else { - startup(args) - } - } - - def startup(ports: Seq[String]): Unit = { - ports foreach { port => - // Override the configuration of the port when specified as program argument - val config = - ConfigFactory.parseString(s"akka.remote.netty.tcp.port=" + port).withFallback( - ConfigFactory.parseString("akka.cluster.roles = [compute]")). - withFallback(ConfigFactory.load("stats2")) - - val system = ActorSystem("ClusterSystem", config) - - //#create-singleton-manager - system.actorOf(ClusterSingletonManager.props( - singletonProps = Props[StatsService], - terminationMessage = PoisonPill, - settings = ClusterSingletonManagerSettings(system).withRole("compute")), - name = "statsService") - //#create-singleton-manager - - //#singleton-proxy - system.actorOf(ClusterSingletonProxy.props(singletonManagerPath = "/user/statsService", - settings = ClusterSingletonProxySettings(system).withRole("compute")), - name = "statsServiceProxy") - //#singleton-proxy - } - } -} - -object StatsSampleOneMasterClient { - def main(args: Array[String]): Unit = { - // note that client is not a compute node, role not defined - val system = ActorSystem("ClusterSystem") - system.actorOf(Props(classOf[StatsSampleClient], "/user/statsServiceProxy"), "client") - } -} - diff --git a/akka-samples/akka-sample-cluster-scala/src/main/scala/sample/cluster/stats/StatsService.scala b/akka-samples/akka-sample-cluster-scala/src/main/scala/sample/cluster/stats/StatsService.scala deleted file mode 100644 index 4852a5541a..0000000000 --- a/akka-samples/akka-sample-cluster-scala/src/main/scala/sample/cluster/stats/StatsService.scala +++ /dev/null @@ -1,50 +0,0 @@ -package sample.cluster.stats - -import scala.concurrent.duration._ -import akka.actor.Actor -import akka.actor.ActorRef -import akka.actor.Props -import akka.actor.ReceiveTimeout -import akka.routing.ConsistentHashingRouter.ConsistentHashableEnvelope -import akka.routing.FromConfig - -//#service -class StatsService extends Actor { - // This router is used both with lookup and deploy of routees. If you - // have a router with only lookup of routees you can use Props.empty - // instead of Props[StatsWorker.class]. - val workerRouter = context.actorOf(FromConfig.props(Props[StatsWorker]), - name = "workerRouter") - - def receive = { - case StatsJob(text) if text != "" => - val words = text.split(" ") - val replyTo = sender() // important to not close over sender() - // create actor that collects replies from workers - val aggregator = context.actorOf(Props( - classOf[StatsAggregator], words.size, replyTo)) - words foreach { word => - workerRouter.tell( - ConsistentHashableEnvelope(word, word), aggregator) - } - } -} - -class StatsAggregator(expectedResults: Int, replyTo: ActorRef) extends Actor { - var results = IndexedSeq.empty[Int] - context.setReceiveTimeout(3.seconds) - - def receive = { - case wordCount: Int => - results = results :+ wordCount - if (results.size == expectedResults) { - val meanWordLength = results.sum.toDouble / results.size - replyTo ! StatsResult(meanWordLength) - context.stop(self) - } - case ReceiveTimeout => - replyTo ! JobFailed("Service unavailable, try again later") - context.stop(self) - } -} -//#service diff --git a/akka-samples/akka-sample-cluster-scala/src/main/scala/sample/cluster/transformation/TransformationApp.scala b/akka-samples/akka-sample-cluster-scala/src/main/scala/sample/cluster/transformation/TransformationApp.scala deleted file mode 100644 index e1ea6f1518..0000000000 --- a/akka-samples/akka-sample-cluster-scala/src/main/scala/sample/cluster/transformation/TransformationApp.scala +++ /dev/null @@ -1,14 +0,0 @@ -package sample.cluster.transformation - -object TransformationApp { - - def main(args: Array[String]): Unit = { - // starting 2 frontend nodes and 3 backend nodes - TransformationFrontend.main(Seq("2551").toArray) - TransformationBackend.main(Seq("2552").toArray) - TransformationBackend.main(Array.empty) - TransformationBackend.main(Array.empty) - TransformationFrontend.main(Array.empty) - } - -} \ No newline at end of file diff --git a/akka-samples/akka-sample-cluster-scala/src/multi-jvm/scala/sample/cluster/stats/StatsSampleSingleMasterSpec.scala b/akka-samples/akka-sample-cluster-scala/src/multi-jvm/scala/sample/cluster/stats/StatsSampleSingleMasterSpec.scala deleted file mode 100644 index 5b818528f5..0000000000 --- a/akka-samples/akka-sample-cluster-scala/src/multi-jvm/scala/sample/cluster/stats/StatsSampleSingleMasterSpec.scala +++ /dev/null @@ -1,128 +0,0 @@ -package sample.cluster.stats - -import language.postfixOps -import scala.concurrent.duration._ -import com.typesafe.config.ConfigFactory -import org.scalatest.BeforeAndAfterAll -import org.scalatest.WordSpecLike -import org.scalatest.Matchers -import akka.actor.PoisonPill -import akka.actor.Props -import akka.actor.RootActorPath -import akka.cluster.singleton.ClusterSingletonManager -import akka.cluster.singleton.ClusterSingletonManagerSettings -import akka.cluster.singleton.ClusterSingletonProxy -import akka.cluster.Cluster -import akka.cluster.Member -import akka.cluster.MemberStatus -import akka.cluster.ClusterEvent.CurrentClusterState -import akka.cluster.ClusterEvent.MemberUp -import akka.remote.testkit.MultiNodeConfig -import akka.remote.testkit.MultiNodeSpec -import akka.testkit.ImplicitSender -import akka.cluster.singleton.ClusterSingletonProxySettings - -object StatsSampleSingleMasterSpecConfig extends MultiNodeConfig { - // register the named roles (nodes) of the test - val first = role("first") - val second = role("second") - val third = role("third") - - def nodeList = Seq(first, second, third) - - // Extract individual sigar library for every node. - nodeList foreach { role => - nodeConfig(role) { - ConfigFactory.parseString(s""" - # Enable metrics extension in akka-cluster-metrics. - akka.extensions=["akka.cluster.metrics.ClusterMetricsExtension"] - # Sigar native library extract location during tests. - akka.cluster.metrics.native-library-extract-folder=target/native/${role.name} - """) - } - } - - // this configuration will be used for all nodes - // note that no fixed host names and ports are used - commonConfig(ConfigFactory.parseString(""" - akka.loglevel = INFO - akka.actor.provider = "akka.cluster.ClusterActorRefProvider" - akka.remote.log-remote-lifecycle-events = off - akka.cluster.roles = [compute] - #//#router-deploy-config - akka.actor.deployment { - /statsService/singleton/workerRouter { - router = consistent-hashing-pool - cluster { - enabled = on - max-nr-of-instances-per-node = 3 - allow-local-routees = on - use-role = compute - } - } - } - #//#router-deploy-config - """)) - -} - -// need one concrete test class per node -class StatsSampleSingleMasterSpecMultiJvmNode1 extends StatsSampleSingleMasterSpec -class StatsSampleSingleMasterSpecMultiJvmNode2 extends StatsSampleSingleMasterSpec -class StatsSampleSingleMasterSpecMultiJvmNode3 extends StatsSampleSingleMasterSpec - -abstract class StatsSampleSingleMasterSpec extends MultiNodeSpec(StatsSampleSingleMasterSpecConfig) - with WordSpecLike with Matchers with BeforeAndAfterAll with ImplicitSender { - - import StatsSampleSingleMasterSpecConfig._ - - override def initialParticipants = roles.size - - override def beforeAll() = multiNodeSpecBeforeAll() - - override def afterAll() = multiNodeSpecAfterAll() - - "The stats sample with single master" must { - "illustrate how to startup cluster" in within(15 seconds) { - Cluster(system).subscribe(testActor, classOf[MemberUp]) - expectMsgClass(classOf[CurrentClusterState]) - - val firstAddress = node(first).address - val secondAddress = node(second).address - val thirdAddress = node(third).address - - Cluster(system) join firstAddress - - receiveN(3).collect { case MemberUp(m) => m.address }.toSet should be( - Set(firstAddress, secondAddress, thirdAddress)) - - Cluster(system).unsubscribe(testActor) - - system.actorOf(ClusterSingletonManager.props( - singletonProps = Props[StatsService], terminationMessage = PoisonPill, - settings = ClusterSingletonManagerSettings(system).withRole("compute")), - name = "statsService") - - system.actorOf(ClusterSingletonProxy.props(singletonManagerPath = "/user/statsService", - ClusterSingletonProxySettings(system).withRole("compute")), - name = "statsServiceProxy") - - testConductor.enter("all-up") - } - - "show usage of the statsServiceProxy" in within(40 seconds) { - val proxy = system.actorSelection(RootActorPath(node(third).address) / "user" / "statsServiceProxy") - - // eventually the service should be ok, - // service and worker nodes might not be up yet - awaitAssert { - proxy ! StatsJob("this is the text that will be analyzed") - expectMsgType[StatsResult](1.second).meanWordLength should be( - 3.875 +- 0.001) - } - - testConductor.enter("done") - } - } - -} diff --git a/akka-samples/akka-sample-cluster-scala/src/multi-jvm/scala/sample/cluster/transformation/TransformationSampleSpec.scala b/akka-samples/akka-sample-cluster-scala/src/multi-jvm/scala/sample/cluster/transformation/TransformationSampleSpec.scala deleted file mode 100644 index 10024c2eac..0000000000 --- a/akka-samples/akka-sample-cluster-scala/src/multi-jvm/scala/sample/cluster/transformation/TransformationSampleSpec.scala +++ /dev/null @@ -1,138 +0,0 @@ -package sample.cluster.transformation - -import language.postfixOps -import scala.concurrent.duration._ - -import com.typesafe.config.ConfigFactory - -import org.scalatest.BeforeAndAfterAll -import org.scalatest.WordSpecLike -import org.scalatest.Matchers - -import akka.actor.Props -import akka.cluster.Cluster -import akka.remote.testkit.MultiNodeConfig -import akka.remote.testkit.MultiNodeSpec -import akka.testkit.ImplicitSender - -object TransformationSampleSpecConfig extends MultiNodeConfig { - // register the named roles (nodes) of the test - val frontend1 = role("frontend1") - val frontend2 = role("frontend2") - val backend1 = role("backend1") - val backend2 = role("backend2") - val backend3 = role("backend3") - - def nodeList = Seq(frontend1, frontend2, backend1, backend2, backend3) - - // Extract individual sigar library for every node. - nodeList foreach { role => - nodeConfig(role) { - ConfigFactory.parseString(s""" - # Enable metrics extension in akka-cluster-metrics. - akka.extensions=["akka.cluster.metrics.ClusterMetricsExtension"] - # Sigar native library extract location during tests. - akka.cluster.metrics.native-library-extract-folder=target/native/${role.name} - """) - } - } - - // this configuration will be used for all nodes - // note that no fixed host names and ports are used - commonConfig(ConfigFactory.parseString(""" - akka.actor.provider = cluster - akka.remote.log-remote-lifecycle-events = off - """)) - - nodeConfig(frontend1, frontend2)( - ConfigFactory.parseString("akka.cluster.roles =[frontend]")) - - nodeConfig(backend1, backend2, backend3)( - ConfigFactory.parseString("akka.cluster.roles =[backend]")) -} - -// need one concrete test class per node -class TransformationSampleSpecMultiJvmNode1 extends TransformationSampleSpec -class TransformationSampleSpecMultiJvmNode2 extends TransformationSampleSpec -class TransformationSampleSpecMultiJvmNode3 extends TransformationSampleSpec -class TransformationSampleSpecMultiJvmNode4 extends TransformationSampleSpec -class TransformationSampleSpecMultiJvmNode5 extends TransformationSampleSpec - -abstract class TransformationSampleSpec extends MultiNodeSpec(TransformationSampleSpecConfig) - with WordSpecLike with Matchers with BeforeAndAfterAll with ImplicitSender { - - import TransformationSampleSpecConfig._ - - override def initialParticipants = roles.size - - override def beforeAll() = multiNodeSpecBeforeAll() - - override def afterAll() = multiNodeSpecAfterAll() - - "The transformation sample" must { - "illustrate how to start first frontend" in within(15 seconds) { - runOn(frontend1) { - // this will only run on the 'first' node - Cluster(system) join node(frontend1).address - val transformationFrontend = system.actorOf(Props[TransformationFrontend], name = "frontend") - transformationFrontend ! TransformationJob("hello") - expectMsgPF() { - // no backends yet, service unavailable - case JobFailed(_, TransformationJob("hello")) => - } - } - - // this will run on all nodes - // use barrier to coordinate test steps - testConductor.enter("frontend1-started") - } - - "illustrate how a backend automatically registers" in within(15 seconds) { - runOn(backend1) { - Cluster(system) join node(frontend1).address - system.actorOf(Props[TransformationBackend], name = "backend") - } - testConductor.enter("backend1-started") - - runOn(frontend1) { - assertServiceOk() - } - - testConductor.enter("frontend1-backend1-ok") - } - - "illustrate how more nodes registers" in within(20 seconds) { - runOn(frontend2) { - Cluster(system) join node(frontend1).address - system.actorOf(Props[TransformationFrontend], name = "frontend") - } - testConductor.enter("frontend2-started") - - runOn(backend2, backend3) { - Cluster(system) join node(backend1).address - system.actorOf(Props[TransformationBackend], name = "backend") - } - - testConductor.enter("all-started") - - runOn(frontend1, frontend2) { - assertServiceOk() - } - - testConductor.enter("all-ok") - - } - - } - - def assertServiceOk(): Unit = { - val transformationFrontend = system.actorSelection("akka://" + system.name + "/user/frontend") - // eventually the service should be ok, - // backends might not have registered initially - awaitAssert { - transformationFrontend ! TransformationJob("hello") - expectMsgType[TransformationResult](1.second).text should be("HELLO") - } - } - -} diff --git a/akka-samples/akka-sample-cluster-scala/src/test/resources/reference.conf b/akka-samples/akka-sample-cluster-scala/src/test/resources/reference.conf deleted file mode 100644 index 90492329b7..0000000000 --- a/akka-samples/akka-sample-cluster-scala/src/test/resources/reference.conf +++ /dev/null @@ -1,4 +0,0 @@ -# Don't terminate ActorSystem in tests -akka.coordinated-shutdown.run-by-jvm-shutdown-hook = off -akka.coordinated-shutdown.terminate-actor-system = off -akka.cluster.run-coordinated-shutdown-when-down = off diff --git a/akka-samples/akka-sample-cluster-scala/tutorial/index.html b/akka-samples/akka-sample-cluster-scala/tutorial/index.html deleted file mode 100644 index 74af9fe198..0000000000 --- a/akka-samples/akka-sample-cluster-scala/tutorial/index.html +++ /dev/null @@ -1,489 +0,0 @@ - - -Akka Cluster Samples with Scala - - - - -
-

-This tutorial contains 4 samples illustrating different -Akka cluster features. -

-
    -
  • Subscribe to cluster membership events
  • -
  • Sending messages to actors running on nodes in the cluster
  • -
  • Cluster aware routers
  • -
  • Cluster metrics
  • -
-
- -
-

A Simple Cluster Example

- -

-Open application.conf -

- -

-To enable cluster capabilities in your Akka project you should, at a minimum, add the remote settings, -and use akka.cluster.ClusterActorRefProvider. The akka.cluster.seed-nodes should -normally also be added to your application.conf file. -

- -

-The seed nodes are configured contact points which newly started nodes will try to connect with in order to join the cluster. -

- -

-Note that if you are going to start the nodes on different machines you need to specify the -ip-addresses or host names of the machines in application.conf instead of 127.0.0.1. -

- -

-Open SimpleClusterApp.scala. -

- -

-The small program together with its configuration starts an ActorSystem with the Cluster enabled. -It joins the cluster and starts an actor that logs some membership events. -Take a look at the -SimpleClusterListener.scala -actor. -

- -

-You can read more about the cluster concepts in the -documentation. -

- -

-To run this sample, go to the Run -tab, and start the application main class sample.cluster.simple.SimpleClusterApp -if it is not already started. -

- -

-SimpleClusterApp starts three actor systems (cluster members) in the same JVM process. It can be more -interesting to run them in separate processes. Stop the application in the -Run tab and then open three terminal windows. -

- -

-In the first terminal window, start the first seed node with the following command (on one line): -

- -

-<path to activator dir>/activator 
-  "runMain sample.cluster.simple.SimpleClusterApp 2551"		
-
- -

-2551 corresponds to the port of the first seed-nodes element in the configuration. In the log -output you see that the cluster node has been started and changed status to 'Up'. -

- -

-In the second terminal window, start the second seed node with the following command (on one line): -

- -

-<path to activator dir>/activator 
-  "runMain sample.cluster.simple.SimpleClusterApp 2552"		
-
- -

-2552 corresponds to the port of the second seed-nodes element in the configuration. In the -log output you see that the cluster node has been started and joins the other seed node and -becomes a member of the cluster. Its status changed to 'Up'. -

- -

-Switch over to the first terminal window and see in the log output that the member joined. -

- -

-Start another node in the third terminal window with the following command (on one line): -

- -

-<path to activator dir>/activator 
-  "runMain sample.cluster.simple.SimpleClusterApp 0"		
-
- -

-Now you don't need to specify the port number, 0 means that it will use a random available port. -It joins one of the configured seed nodes. Look at the log output in the different terminal -windows. -

- -

-Start even more nodes in the same way, if you like. -

- -

-Shut down one of the nodes by pressing 'ctrl-c' in one of the terminal windows. -The other nodes will detect the failure after a while, which you can see in the log -output in the other terminals. -

- -

-Look at the -source code -of the actor again. It registers itself as subscriber of certain cluster events. It gets notified with an snapshot event, -CurrentClusterState that holds full state information of the cluster. After that it receives events for changes -that happen in the cluster. -

- -
- -
-

Worker Dial-in Example

- -

-In the previous sample we saw how to subscribe to cluster membership events. -You can read more about it in the -documentation. -How can cluster membership events be used? -

- -

-Let's take a look at an example that illustrates how workers, here named backend, -can detect and register to new master nodes, here named frontend. -

- -

-The example application provides a service to transform text. When some text -is sent to one of the frontend services, it will be delegated to one of the -backend workers, which performs the transformation job, and sends the result back to -the original client. New backend nodes, as well as new frontend nodes, can be -added or removed to the cluster dynamically. -

- -

-Open TransformationMessages.scala. -It defines the messages that are sent between the actors. -

- -

-The backend worker that performs the transformation job is defined in -TransformationBackend.scala -

- -

-Note that the TransformationBackend actor subscribes to cluster events to detect new, -potential, frontend nodes, and send them a registration message so that they know -that they can use the backend worker. -

- -

-The frontend that receives user jobs and delegates to one of the registered backend workers is defined in -TransformationFrontend.scala -

- -

-Note that the TransformationFrontend actor watch the registered backend -to be able to remove it from its list of available backend workers. -Death watch uses the cluster failure detector for nodes in the cluster, i.e. it detects -network failures and JVM crashes, in addition to graceful termination of watched -actor. -

- -

-To run this sample, go to the Run -tab, and start the application main class sample.cluster.transformation.TransformationApp -if it is not already started. -

- -

-TransformationApp starts -5 actor systems (cluster members) in the same JVM process. It can be more -interesting to run them in separate processes. Stop the application in the -Run tab and run the following commands in separate terminal windows. -

- -

-<path to activator dir>/activator 
-  "runMain sample.cluster.transformation.TransformationFrontend 2551"		
-
- -

-<path to activator dir>/activator 
-  "runMain sample.cluster.transformation.TransformationBackend 2552"		
-
- -

-<path to activator dir>/activator 
-  "runMain sample.cluster.transformation.TransformationBackend 0"		
-
- -

-<path to activator dir>/activator 
-  "runMain sample.cluster.transformation.TransformationBackend 0"		
-
- -

-<path to activator dir>/activator 
-  "runMain sample.cluster.transformation.TransformationFrontend 0"		
-
- -
- -
-

Cluster Aware Routers

- -

-All routers -can be made aware of member nodes in the cluster, i.e. deploying new routees or looking up routees -on nodes in the cluster. -When a node becomes unreachable or leaves the cluster the routees of that node are -automatically unregistered from the router. When new nodes join the cluster additional -routees are added to the router, according to the configuration. Routees are also added -when a node becomes reachable again, after having been unreachable. -

- -

-You can read more about cluster aware routers in the -documentation. -

- -

-Let's take a look at a few samples that make use of cluster aware routers. -

- -
- -
-

Router Example with Group of Routees

- -

-Let's take a look at how to use a cluster aware router with a group of routees, -i.e. a router which does not create its routees but instead forwards incoming messages to a given -set of actors created elsewhere. -

- -

-The example application provides a service to calculate statistics for a text. -When some text is sent to the service it splits it into words, and delegates the task -to count number of characters in each word to a separate worker, a routee of a router. -The character count for each word is sent back to an aggregator that calculates -the average number of characters per word when all results have been collected. -

- -

-Open StatsMessages.scala. -It defines the messages that are sent between the actors. -

- -

-The worker that counts number of characters in each word is defined in -StatsWorker.scala. -

- -

-The service that receives text from users and splits it up into words, delegates to workers and aggregates -is defined in StatsService.scala. -

- -

-Note, nothing cluster specific so far, just plain actors. -

- -

-All nodes start StatsService and StatsWorker actors. Remember, routees are the workers in this case. -

- -

-Open stats1.conf -The router is configured with routees.paths. -This means that user requests can be sent to StatsService on any node and it will use -StatsWorker on all nodes. -

- -

-To run this sample, go to the Run -tab, and start the application main class sample.cluster.stats.StatsSample -if it is not already started. -

- -

-StatsSample starts -4 actor systems (cluster members) in the same JVM process. It can be more -interesting to run them in separate processes. Stop the application in the -Run tab and run the following commands in separate terminal windows. -

- -

-<path to activator dir>/activator 
-  "runMain sample.cluster.stats.StatsSample 2551"		
-
- -

-<path to activator dir>/activator 
-  "runMain sample.cluster.stats.StatsSample 2552"		
-
- -

-<path to activator dir>/activator 
-  "runMain sample.cluster.stats.StatsSampleClient"		
-
- -

-<path to activator dir>/activator 
-  "runMain sample.cluster.stats.StatsSample 0"		
-
- -
- -
-

Router Example with Pool of Remote Deployed Routees

- -

-Let's take a look at how to use a cluster aware router on single master node that creates -and deploys workers instead of looking them up. -

- -

-Open StatsSampleOneMaster.scala. -To keep track of a single master we use the Cluster Singleton -in the contrib module. The ClusterSingletonManager is started on each node. -

- -

-We also need an actor on each node that keeps track of where current single master exists and -delegates jobs to the StatsService. That is provided by the ClusterSingletonProxy. -

- -

-The ClusterSingletonProxy receives text from users and delegates to the current StatsService, the single -master. It listens to cluster events to lookup the StatsService on the oldest node. -

- -

-All nodes start ClusterSingletonProxy and the ClusterSingletonManager. The router is now configured in -stats2.conf -

- -

-To run this sample, go to the Run -tab, and start the application main class sample.cluster.stats.StatsSampleOneMaster -if it is not already started. -

- -

-StatsSampleOneMaster starts -4 actor systems (cluster members) in the same JVM process. It can be more -interesting to run them in separate processes. Stop the application in the -Run tab and run the following commands in separate terminal windows. -

- -

-<path to activator dir>/activator 
-  "runMain sample.cluster.stats.StatsSampleOneMaster 2551"		
-
- -

-<path to activator dir>/activator 
-  "runMain sample.cluster.stats.StatsSampleOneMaster 2552"		
-
- -

-<path to activator dir>/activator 
-  "runMain sample.cluster.stats.StatsSampleOneMasterClient"		
-
- -

-<path to activator dir>/activator 
-  "runMain sample.cluster.stats.StatsSampleOneMaster 0"		
-
- -
- -
-

Adaptive Load Balancing

- -

-The member nodes of the cluster collects system health metrics and publishes that to other nodes and to -registered subscribers. This information is primarily used for load-balancing routers, such as -the AdaptiveLoadBalancingPool and AdaptiveLoadBalancingGroup routers. -

- -

-You can read more about cluster metrics in the -documentation. -

- -

-Let's take a look at this router in action. What can be more demanding than calculating factorials? -

- -

-The backend worker that performs the factorial calculation: -FactorialBackend -

- -

-The frontend that receives user jobs and delegates to the backends via the router: -FactorialFrontend -

- -

-As you can see, the router is defined in the same way as other routers, and in this case it is configured in: -factorial.conf -

- -

-It is only router type adaptive and the metrics-selector that is specific to this router, -other things work in the same way as other routers. -

- -

-To run this sample, go to the Run -tab, and start the application main class sample.cluster.factorial.FactorialApp -if it is not already started. -

- -

-FactorialApp starts -4 actor systems (cluster members) in the same JVM process. It can be more -interesting to run them in separate processes. Stop the application in the -Run tab and run the following commands in separate terminal windows. -

- -

-<path to activator dir>/activator 
-  "runMain sample.cluster.factorial.FactorialBackend 2551"		
-
- -

-<path to activator dir>/activator 
-  "runMain sample.cluster.factorial.FactorialBackend 2552"		
-
- -

-<path to activator dir>/activator 
-  "runMain sample.cluster.factorial.FactorialBackend 0"		
-
- -

-<path to activator dir>/activator 
-  "runMain sample.cluster.factorial.FactorialFrontend 0"		
-
- -

-Press ctrl-c in the terminal window of the frontend to stop the factorial calculations. -

- -
-
-

Tests

- -

-Tests can be found in src/multi-jvm. -You can run them from the Test tab. -

- -
- - - diff --git a/akka-samples/akka-sample-distributed-data-java/.gitignore b/akka-samples/akka-sample-distributed-data-java/.gitignore deleted file mode 100644 index 660c959e44..0000000000 --- a/akka-samples/akka-sample-distributed-data-java/.gitignore +++ /dev/null @@ -1,17 +0,0 @@ -*# -*.iml -*.ipr -*.iws -*.pyc -*.tm.epoch -*.vim -*-shim.sbt -.idea/ -/project/plugins/project -project/boot -target/ -/logs -.cache -.classpath -.project -.settings \ No newline at end of file diff --git a/akka-samples/akka-sample-distributed-data-java/COPYING b/akka-samples/akka-sample-distributed-data-java/COPYING deleted file mode 100644 index 0e259d42c9..0000000000 --- a/akka-samples/akka-sample-distributed-data-java/COPYING +++ /dev/null @@ -1,121 +0,0 @@ -Creative Commons Legal Code - -CC0 1.0 Universal - - CREATIVE COMMONS CORPORATION IS NOT A LAW FIRM AND DOES NOT PROVIDE - LEGAL SERVICES. DISTRIBUTION OF THIS DOCUMENT DOES NOT CREATE AN - ATTORNEY-CLIENT RELATIONSHIP. CREATIVE COMMONS PROVIDES THIS - INFORMATION ON AN "AS-IS" BASIS. CREATIVE COMMONS MAKES NO WARRANTIES - REGARDING THE USE OF THIS DOCUMENT OR THE INFORMATION OR WORKS - PROVIDED HEREUNDER, AND DISCLAIMS LIABILITY FOR DAMAGES RESULTING FROM - THE USE OF THIS DOCUMENT OR THE INFORMATION OR WORKS PROVIDED - HEREUNDER. - -Statement of Purpose - -The laws of most jurisdictions throughout the world automatically confer -exclusive Copyright and Related Rights (defined below) upon the creator -and subsequent owner(s) (each and all, an "owner") of an original work of -authorship and/or a database (each, a "Work"). - -Certain owners wish to permanently relinquish those rights to a Work for -the purpose of contributing to a commons of creative, cultural and -scientific works ("Commons") that the public can reliably and without fear -of later claims of infringement build upon, modify, incorporate in other -works, reuse and redistribute as freely as possible in any form whatsoever -and for any purposes, including without limitation commercial purposes. -These owners may contribute to the Commons to promote the ideal of a free -culture and the further production of creative, cultural and scientific -works, or to gain reputation or greater distribution for their Work in -part through the use and efforts of others. - -For these and/or other purposes and motivations, and without any -expectation of additional consideration or compensation, the person -associating CC0 with a Work (the "Affirmer"), to the extent that he or she -is an owner of Copyright and Related Rights in the Work, voluntarily -elects to apply CC0 to the Work and publicly distribute the Work under its -terms, with knowledge of his or her Copyright and Related Rights in the -Work and the meaning and intended legal effect of CC0 on those rights. - -1. Copyright and Related Rights. A Work made available under CC0 may be -protected by copyright and related or neighboring rights ("Copyright and -Related Rights"). Copyright and Related Rights include, but are not -limited to, the following: - - i. the right to reproduce, adapt, distribute, perform, display, - communicate, and translate a Work; - ii. moral rights retained by the original author(s) and/or performer(s); -iii. publicity and privacy rights pertaining to a person's image or - likeness depicted in a Work; - iv. rights protecting against unfair competition in regards to a Work, - subject to the limitations in paragraph 4(a), below; - v. rights protecting the extraction, dissemination, use and reuse of data - in a Work; - vi. database rights (such as those arising under Directive 96/9/EC of the - European Parliament and of the Council of 11 March 1996 on the legal - protection of databases, and under any national implementation - thereof, including any amended or successor version of such - directive); and -vii. other similar, equivalent or corresponding rights throughout the - world based on applicable law or treaty, and any national - implementations thereof. - -2. Waiver. To the greatest extent permitted by, but not in contravention -of, applicable law, Affirmer hereby overtly, fully, permanently, -irrevocably and unconditionally waives, abandons, and surrenders all of -Affirmer's Copyright and Related Rights and associated claims and causes -of action, whether now known or unknown (including existing as well as -future claims and causes of action), in the Work (i) in all territories -worldwide, (ii) for the maximum duration provided by applicable law or -treaty (including future time extensions), (iii) in any current or future -medium and for any number of copies, and (iv) for any purpose whatsoever, -including without limitation commercial, advertising or promotional -purposes (the "Waiver"). Affirmer makes the Waiver for the benefit of each -member of the public at large and to the detriment of Affirmer's heirs and -successors, fully intending that such Waiver shall not be subject to -revocation, rescission, cancellation, termination, or any other legal or -equitable action to disrupt the quiet enjoyment of the Work by the public -as contemplated by Affirmer's express Statement of Purpose. - -3. Public License Fallback. Should any part of the Waiver for any reason -be judged legally invalid or ineffective under applicable law, then the -Waiver shall be preserved to the maximum extent permitted taking into -account Affirmer's express Statement of Purpose. In addition, to the -extent the Waiver is so judged Affirmer hereby grants to each affected -person a royalty-free, non transferable, non sublicensable, non exclusive, -irrevocable and unconditional license to exercise Affirmer's Copyright and -Related Rights in the Work (i) in all territories worldwide, (ii) for the -maximum duration provided by applicable law or treaty (including future -time extensions), (iii) in any current or future medium and for any number -of copies, and (iv) for any purpose whatsoever, including without -limitation commercial, advertising or promotional purposes (the -"License"). The License shall be deemed effective as of the date CC0 was -applied by Affirmer to the Work. Should any part of the License for any -reason be judged legally invalid or ineffective under applicable law, such -partial invalidity or ineffectiveness shall not invalidate the remainder -of the License, and in such case Affirmer hereby affirms that he or she -will not (i) exercise any of his or her remaining Copyright and Related -Rights in the Work or (ii) assert any associated claims and causes of -action with respect to the Work, in either case contrary to Affirmer's -express Statement of Purpose. - -4. Limitations and Disclaimers. - - a. No trademark or patent rights held by Affirmer are waived, abandoned, - surrendered, licensed or otherwise affected by this document. - b. Affirmer offers the Work as-is and makes no representations or - warranties of any kind concerning the Work, express, implied, - statutory or otherwise, including without limitation warranties of - title, merchantability, fitness for a particular purpose, non - infringement, or the absence of latent or other defects, accuracy, or - the present or absence of errors, whether or not discoverable, all to - the greatest extent permissible under applicable law. - c. Affirmer disclaims responsibility for clearing rights of other persons - that may apply to the Work or any use thereof, including without - limitation any person's Copyright and Related Rights in the Work. - Further, Affirmer disclaims responsibility for obtaining any necessary - consents, permissions or other rights required for any use of the - Work. - d. Affirmer understands and acknowledges that Creative Commons is not a - party to this document and has no duty or obligation with respect to - this CC0 or use of the Work. diff --git a/akka-samples/akka-sample-distributed-data-java/LICENSE b/akka-samples/akka-sample-distributed-data-java/LICENSE deleted file mode 100644 index 2a18b45589..0000000000 --- a/akka-samples/akka-sample-distributed-data-java/LICENSE +++ /dev/null @@ -1,10 +0,0 @@ -Activator Template by Lightbend - -Licensed under Public Domain (CC0) - -To the extent possible under law, the person who associated CC0 with -this Activator Tempate has waived all copyright and related or neighboring -rights to this Activator Template. - -You should have received a copy of the CC0 legalcode along with this -work. If not, see . diff --git a/akka-samples/akka-sample-distributed-data-java/activator.properties b/akka-samples/akka-sample-distributed-data-java/activator.properties deleted file mode 100644 index edde6809fb..0000000000 --- a/akka-samples/akka-sample-distributed-data-java/activator.properties +++ /dev/null @@ -1,7 +0,0 @@ -name=akka-sample-distributed-data-java -title=Akka Distributed Data Samples with Java -description=Akka Distributed Data Samples with Java -tags=akka,cluster,java,sample,distributed-data -authorName=Akka Team -authorLink=http://akka.io/ -sourceLink=https://github.com/akka/akka diff --git a/akka-samples/akka-sample-distributed-data-java/build.sbt b/akka-samples/akka-sample-distributed-data-java/build.sbt deleted file mode 100644 index a716164e6b..0000000000 --- a/akka-samples/akka-sample-distributed-data-java/build.sbt +++ /dev/null @@ -1,50 +0,0 @@ -import com.typesafe.sbt.SbtMultiJvm -import com.typesafe.sbt.SbtMultiJvm.MultiJvmKeys.MultiJvm - -val akkaVersion = "2.5-SNAPSHOT" - -val project = Project( - id = "akka-sample-distributed-data-java", - base = file(".") - ) - .settings(SbtMultiJvm.multiJvmSettings: _*) - .settings( - name := "akka-sample-distributed-data-java", - version := "2.5-SNAPSHOT", - scalaVersion := "2.11.8", - scalacOptions in Compile ++= Seq("-encoding", "UTF-8", "-target:jvm-1.8", "-deprecation", "-feature", "-unchecked", "-Xlog-reflective-calls", "-Xlint"), - javacOptions in Compile ++= Seq("-source", "1.8", "-target", "1.8", "-Xlint:unchecked", "-Xlint:deprecation", "-Xdiags:verbose"), - javacOptions in doc in Compile := Seq("-source", "1.8"), - libraryDependencies ++= Seq( - "com.typesafe.akka" %% "akka-actor" % akkaVersion, - "com.typesafe.akka" %% "akka-remote" % akkaVersion, - "com.typesafe.akka" %% "akka-cluster" % akkaVersion, - "com.typesafe.akka" %% "akka-distributed-data-experimental" % akkaVersion, - "com.typesafe.akka" %% "akka-multi-node-testkit" % akkaVersion, - "org.scalatest" %% "scalatest" % "2.2.1" % "test"), - javaOptions in run ++= Seq( - "-Xms128m", "-Xmx1024m"), - Keys.fork in run := true, - // make sure that MultiJvm test are compiled by the default test compilation - compile in MultiJvm <<= (compile in MultiJvm) triggeredBy (compile in Test), - // disable parallel tests - parallelExecution in Test := false, - // make sure that MultiJvm tests are executed by the default test target, - // and combine the results from ordinary test and multi-jvm tests - executeTests in Test <<= (executeTests in Test, executeTests in MultiJvm) map { - case (testResults, multiNodeResults) => - val overall = - if (testResults.overall.id < multiNodeResults.overall.id) - multiNodeResults.overall - else - testResults.overall - Tests.Output(overall, - testResults.events ++ multiNodeResults.events, - testResults.summaries ++ multiNodeResults.summaries) - }, - licenses := Seq(("CC0", url("http://creativecommons.org/publicdomain/zero/1.0"))) - ) - .configs (MultiJvm) - - -fork in run := true diff --git a/akka-samples/akka-sample-distributed-data-java/project/build.properties b/akka-samples/akka-sample-distributed-data-java/project/build.properties deleted file mode 100644 index 748703f770..0000000000 --- a/akka-samples/akka-sample-distributed-data-java/project/build.properties +++ /dev/null @@ -1 +0,0 @@ -sbt.version=0.13.7 diff --git a/akka-samples/akka-sample-distributed-data-java/project/plugins.sbt b/akka-samples/akka-sample-distributed-data-java/project/plugins.sbt deleted file mode 100644 index c3e7d797de..0000000000 --- a/akka-samples/akka-sample-distributed-data-java/project/plugins.sbt +++ /dev/null @@ -1,4 +0,0 @@ - -resolvers += Classpaths.typesafeResolver - -addSbtPlugin("com.typesafe.sbt" % "sbt-multi-jvm" % "0.3.8") diff --git a/akka-samples/akka-sample-distributed-data-java/project/sbt-ui.sbt b/akka-samples/akka-sample-distributed-data-java/project/sbt-ui.sbt deleted file mode 100644 index 7c28b97b34..0000000000 --- a/akka-samples/akka-sample-distributed-data-java/project/sbt-ui.sbt +++ /dev/null @@ -1,3 +0,0 @@ -// This plugin represents functionality that is to be added to sbt in the future - -addSbtPlugin("org.scala-sbt" % "sbt-core-next" % "0.1.1") \ No newline at end of file diff --git a/akka-samples/akka-sample-distributed-data-java/src/main/java/sample/distributeddata/ReplicatedCache.java b/akka-samples/akka-sample-distributed-data-java/src/main/java/sample/distributeddata/ReplicatedCache.java deleted file mode 100644 index 9387384064..0000000000 --- a/akka-samples/akka-sample-distributed-data-java/src/main/java/sample/distributeddata/ReplicatedCache.java +++ /dev/null @@ -1,163 +0,0 @@ -package sample.distributeddata; - -import static akka.cluster.ddata.Replicator.readLocal; -import static akka.cluster.ddata.Replicator.writeLocal; - -import java.util.Optional; -import scala.Option; - -import akka.actor.AbstractActor; -import akka.actor.ActorRef; -import akka.actor.Props; -import akka.cluster.Cluster; -import akka.cluster.ddata.DistributedData; -import akka.cluster.ddata.Key; -import akka.cluster.ddata.LWWMap; -import akka.cluster.ddata.LWWMapKey; -import akka.cluster.ddata.Replicator.Get; -import akka.cluster.ddata.Replicator.GetSuccess; -import akka.cluster.ddata.Replicator.NotFound; -import akka.cluster.ddata.Replicator.Update; -import akka.cluster.ddata.Replicator.UpdateResponse; -import akka.japi.pf.ReceiveBuilder; - -@SuppressWarnings("unchecked") -public class ReplicatedCache extends AbstractActor { - - static class Request { - public final String key; - public final ActorRef replyTo; - - public Request(String key, ActorRef replyTo) { - this.key = key; - this.replyTo = replyTo; - } - } - - public static class PutInCache { - public final String key; - public final Object value; - - public PutInCache(String key, Object value) { - this.key = key; - this.value = value; - } - } - - public static class GetFromCache { - public final String key; - - public GetFromCache(String key) { - this.key = key; - } - } - - public static class Cached { - public final String key; - public final Optional value; - - public Cached(String key, Optional value) { - this.key = key; - this.value = value; - } - - @Override - public int hashCode() { - final int prime = 31; - int result = 1; - result = prime * result + ((key == null) ? 0 : key.hashCode()); - result = prime * result + ((value == null) ? 0 : value.hashCode()); - return result; - } - - @Override - public boolean equals(Object obj) { - if (this == obj) - return true; - if (obj == null) - return false; - if (getClass() != obj.getClass()) - return false; - Cached other = (Cached) obj; - if (key == null) { - if (other.key != null) - return false; - } else if (!key.equals(other.key)) - return false; - if (value == null) { - if (other.value != null) - return false; - } else if (!value.equals(other.value)) - return false; - return true; - } - - @Override - public String toString() { - return "Cached [key=" + key + ", value=" + value + "]"; - } - - } - - public static class Evict { - public final String key; - - public Evict(String key) { - this.key = key; - } - } - - public static Props props() { - return Props.create(ReplicatedCache.class); - } - - private final ActorRef replicator = DistributedData.get(context().system()).replicator(); - private final Cluster node = Cluster.get(context().system()); - - public ReplicatedCache() { - receive(ReceiveBuilder - .match(PutInCache.class, cmd -> receivePutInCache(cmd.key, cmd.value)) - .match(Evict.class, cmd -> receiveEvict(cmd.key)) - .match(GetFromCache.class, cmd -> receiveGetFromCache(cmd.key)) - .match(GetSuccess.class, g -> receiveGetSuccess((GetSuccess>) g)) - .match(NotFound.class, n -> receiveNotFound((NotFound>) n)) - .match(UpdateResponse.class, u -> {}) - .build()); - } - - private void receivePutInCache(String key, Object value) { - Update> update = new Update<>(dataKey(key), LWWMap.create(), writeLocal(), - curr -> curr.put(node, key, value)); - replicator.tell(update, self()); - } - - private void receiveEvict(String key) { - Update> update = new Update<>(dataKey(key), LWWMap.create(), writeLocal(), - curr -> curr.remove(node, key)); - replicator.tell(update, self()); - } - - private void receiveGetFromCache(String key) { - Optional ctx = Optional.of(new Request(key, sender())); - Get> get = new Get<>(dataKey(key), readLocal(), ctx); - replicator.tell(get, self()); - } - - private void receiveGetSuccess(GetSuccess> g) { - Request req = (Request) g.getRequest().get(); - Option valueOption = g.dataValue().get(req.key); - Optional valueOptional = Optional.ofNullable(valueOption.isDefined() ? valueOption.get() : null); - req.replyTo.tell(new Cached(req.key, valueOptional), self()); - } - - private void receiveNotFound(NotFound> n) { - Request req = (Request) n.getRequest().get(); - req.replyTo.tell(new Cached(req.key, Optional.empty()), self()); - } - - private Key> dataKey(String entryKey) { - return LWWMapKey.create("cache-" + Math.abs(entryKey.hashCode()) % 100); - } - - -} \ No newline at end of file diff --git a/akka-samples/akka-sample-distributed-data-java/src/main/java/sample/distributeddata/ReplicatedMetrics.java b/akka-samples/akka-sample-distributed-data-java/src/main/java/sample/distributeddata/ReplicatedMetrics.java deleted file mode 100644 index 649b160e58..0000000000 --- a/akka-samples/akka-sample-distributed-data-java/src/main/java/sample/distributeddata/ReplicatedMetrics.java +++ /dev/null @@ -1,167 +0,0 @@ -package sample.distributeddata; - -import static akka.cluster.ddata.Replicator.writeLocal; -import java.lang.management.ManagementFactory; -import java.lang.management.MemoryMXBean; -import java.lang.management.MemoryUsage; -import java.util.HashMap; -import java.util.HashSet; -import java.util.Map; -import java.util.Set; -import scala.concurrent.duration.FiniteDuration; - -import akka.actor.AbstractActor; -import akka.actor.ActorRef; -import akka.actor.Address; -import akka.actor.Cancellable; -import akka.actor.Props; -import akka.cluster.Cluster; -import akka.cluster.ClusterEvent; -import akka.cluster.ClusterEvent.MemberRemoved; -import akka.cluster.ClusterEvent.MemberUp; -import akka.cluster.ddata.DistributedData; -import akka.cluster.ddata.Key; -import akka.cluster.ddata.LWWMap; -import akka.cluster.ddata.LWWMapKey; -import akka.cluster.ddata.Replicator.Changed; -import akka.cluster.ddata.Replicator.Subscribe; -import akka.cluster.ddata.Replicator.Update; -import akka.cluster.ddata.Replicator.UpdateResponse; -import akka.event.Logging; -import akka.event.LoggingAdapter; -import akka.japi.pf.ReceiveBuilder; - -@SuppressWarnings("unchecked") -public class ReplicatedMetrics extends AbstractActor { - - public static Props props(FiniteDuration measureInterval, FiniteDuration cleanupInterval) { - return Props.create(ReplicatedMetrics.class, measureInterval, cleanupInterval); - } - - public static class UsedHeap { - public Map percentPerNode; - - public UsedHeap(Map percentPerNode) { - this.percentPerNode = percentPerNode; - } - } - - private static final String TICK = "tick"; - private static final String CLEANUP = "cleanup"; - - public static String nodeKey(Address address) { - return address.host().get() + ":" + address.port().get(); - } - - private final ActorRef replicator = DistributedData.get(context().system()).replicator(); - private final Cluster node = Cluster.get(context().system()); - private final String selfNodeKey = nodeKey(node.selfAddress()); - private final MemoryMXBean memoryMBean = ManagementFactory.getMemoryMXBean(); - private final LoggingAdapter log = Logging.getLogger(context().system(), this); - - private final Key> usedHeapKey = LWWMapKey.create("usedHeap"); - private final Key> maxHeapKey = LWWMapKey.create("maxHeap"); - - private final Cancellable tickTask; - private final Cancellable cleanupTask; - - private Map maxHeap = new HashMap<>(); - private final Set nodesInCluster = new HashSet<>(); - - @Override - public void preStart() { - replicator.tell(new Subscribe<>(maxHeapKey, self()), ActorRef.noSender()); - replicator.tell(new Subscribe<>(usedHeapKey, self()), ActorRef.noSender()); - node.subscribe(self(), ClusterEvent.initialStateAsEvents(), - MemberUp.class, MemberRemoved.class); - } - - @Override - public void postStop() throws Exception { - tickTask.cancel(); - cleanupTask.cancel(); - node.unsubscribe(self()); - super.postStop(); - } - - public ReplicatedMetrics(FiniteDuration measureInterval, FiniteDuration cleanupInterval) { - tickTask = context().system().scheduler().schedule(measureInterval, measureInterval, - self(), TICK, context().dispatcher(), self()); - cleanupTask = context().system().scheduler().schedule(cleanupInterval, cleanupInterval, - self(), CLEANUP, context().dispatcher(), self()); - - receive(ReceiveBuilder - .matchEquals(TICK, t -> receiveTick()) - .match(Changed.class, c -> c.key().equals(maxHeapKey), c -> receiveMaxHeapChanged((Changed>) c)) - .match(Changed.class, c -> c.key().equals(usedHeapKey), c -> receiveUsedHeapChanged((Changed>) c)) - .match(UpdateResponse.class, u -> {}) - .match(MemberUp.class, m -> receiveMemberUp(m.member().address())) - .match(MemberRemoved.class, m -> receiveMemberRemoved(m.member().address())) - .matchEquals(CLEANUP, c -> receiveCleanup()) - .build()); - } - - private void receiveTick() { - MemoryUsage heap = memoryMBean.getHeapMemoryUsage(); - long used = heap.getUsed(); - long max = heap.getMax(); - - Update> update1 = new Update<>(usedHeapKey, LWWMap.create(), writeLocal(), - curr -> curr.put(node, selfNodeKey, used)); - replicator.tell(update1, self()); - - Update> update2 = new Update<>(maxHeapKey, LWWMap.create(), writeLocal(), curr -> { - if (curr.contains(selfNodeKey) && curr.get(selfNodeKey).get().longValue() == max) - return curr; // unchanged - else - return curr.put(node, selfNodeKey, max); - }); - replicator.tell(update2, self()); - } - - private void receiveMaxHeapChanged(Changed> c) { - maxHeap = c.dataValue().getEntries(); - } - - private void receiveUsedHeapChanged(Changed> c) { - Map percentPerNode = new HashMap<>(); - for (Map.Entry entry : c.dataValue().getEntries().entrySet()) { - if (maxHeap.containsKey(entry.getKey())) { - double percent = (entry.getValue().doubleValue() / maxHeap.get(entry.getKey())) * 100.0; - percentPerNode.put(entry.getKey(), percent); - } - } - UsedHeap usedHeap = new UsedHeap(percentPerNode); - log.debug("Node {} observed:\n{}", node, usedHeap); - context().system().eventStream().publish(usedHeap); - } - - private void receiveMemberUp(Address address) { - nodesInCluster.add(nodeKey(address)); - } - - private void receiveMemberRemoved(Address address) { - nodesInCluster.remove(nodeKey(address)); - if (address.equals(node.selfAddress())) - context().stop(self()); - } - - private void receiveCleanup() { - Update> update1 = new Update<>(usedHeapKey, LWWMap.create(), writeLocal(), curr -> cleanup(curr)); - replicator.tell(update1, self()); - Update> update2 = new Update<>(maxHeapKey, LWWMap.create(), writeLocal(), curr -> cleanup(curr)); - replicator.tell(update2, self()); - } - - private LWWMap cleanup(LWWMap data) { - LWWMap result = data; - log.info("Cleanup " + nodesInCluster + " -- " + data.getEntries().keySet()); - for (String k : data.getEntries().keySet()) { - if (!nodesInCluster.contains(k)) { - result = result.remove(node, k); - } - } - return result; - } - -} \ No newline at end of file diff --git a/akka-samples/akka-sample-distributed-data-java/src/main/java/sample/distributeddata/ServiceRegistry.java b/akka-samples/akka-sample-distributed-data-java/src/main/java/sample/distributeddata/ServiceRegistry.java deleted file mode 100644 index 8d0dd71912..0000000000 --- a/akka-samples/akka-sample-distributed-data-java/src/main/java/sample/distributeddata/ServiceRegistry.java +++ /dev/null @@ -1,248 +0,0 @@ -package sample.distributeddata; - -import java.util.Collections; -import java.util.HashMap; -import java.util.HashSet; -import java.util.Map; -import java.util.Set; -import scala.PartialFunction; -import scala.runtime.BoxedUnit; - -import akka.actor.AbstractActor; -import akka.actor.ActorRef; -import akka.actor.Address; -import akka.actor.Props; -import akka.actor.Terminated; -import akka.cluster.Cluster; -import akka.cluster.ClusterEvent; -import akka.cluster.ddata.DistributedData; -import akka.cluster.ddata.GSet; -import akka.cluster.ddata.GSetKey; -import akka.cluster.ddata.Key; -import akka.cluster.ddata.ORSet; -import akka.cluster.ddata.Replicator; -import akka.cluster.ddata.Replicator.Changed; -import akka.cluster.ddata.Replicator.Subscribe; -import akka.cluster.ddata.Replicator.Update; -import akka.cluster.ddata.Replicator.UpdateResponse; -import akka.event.Logging; -import akka.event.LoggingAdapter; -import akka.japi.pf.ReceiveBuilder; - -@SuppressWarnings("unchecked") -public class ServiceRegistry extends AbstractActor { - - /** - * Register a `service` with a `name`. Several services can be registered with - * the same `name`. It will be removed when it is terminated. - */ - public static class Register { - public final String name; - public final ActorRef service; - - public Register(String name, ActorRef service) { - this.name = name; - this.service = service; - } - } - - /** - * Lookup services registered for a `name`. {@link Bindings} will be sent to - * `sender()`. - */ - public static class Lookup { - public final String name; - - public Lookup(String name) { - this.name = name; - } - } - - /** - * Reply for {@link Lookup} - */ - public static class Bindings { - public final String name; - public final Set services; - - public Bindings(String name, Set services) { - this.name = name; - this.services = services; - } - } - - /** - * Published to `ActorSystem.eventStream` when services are changed. - */ - public static class BindingChanged { - public final String name; - public final Set services; - - public BindingChanged(String name, Set services) { - this.name = name; - this.services = services; - } - } - - public static class ServiceKey extends Key> { - private static final long serialVersionUID = 1L; - - public ServiceKey(String serviceName) { - super(serviceName); - } - } - - public static Props props() { - return Props.create(ServiceRegistry.class); - } - - private final LoggingAdapter log = Logging.getLogger(context().system(), this); - private final ActorRef replicator = DistributedData.get(context().system()).replicator(); - private final Cluster node = Cluster.get(context().system()); - - - private final Key> allServicesKey = GSetKey.create("service-keys"); - - private Set keys = new HashSet<>(); - private final Map> services = new HashMap<>(); - private boolean leader = false; - - public ServiceRegistry() { - receive(matchCommands() - .orElse(matchChanged()) - .orElse(matchWatch()) - .orElse(matchOther())); - } - - @Override - public void preStart() { - replicator.tell(new Subscribe<>(allServicesKey, self()), ActorRef.noSender()); - node.subscribe(self(), ClusterEvent.initialStateAsEvents(), ClusterEvent.LeaderChanged.class); - } - - @Override - public void postStop() throws Exception { - node.unsubscribe(self()); - super.postStop(); - } - - private PartialFunction matchCommands() { - return ReceiveBuilder - .match(Register.class, r -> receiveRegister(r)) - .match(Lookup.class, l -> receiveLookup(l)) - .build(); - } - - private ServiceKey serviceKey(String serviceName) { - return new ServiceKey("service:" + serviceName); - } - - - private void receiveRegister(Register r) { - ServiceKey dKey = serviceKey(r.name); - // store the service names in a separate GSet to be able to - // get notifications of new names - if (!keys.contains(dKey)) { - Update> update1 = new Update<>(allServicesKey, GSet.create(), Replicator.writeLocal(), - curr -> curr.add(dKey)); - replicator.tell(update1, self()); - } - - Update> update2 = new Update<>(dKey, ORSet.create(), Replicator.writeLocal(), - curr -> curr.add(node, r.service)); - replicator.tell(update2, self()); - } - - private void receiveLookup(Lookup l) { - sender().tell(new Bindings(l.name, services.getOrDefault(l.name, Collections.emptySet())), self()); - } - - private PartialFunction matchChanged() { - return ReceiveBuilder - .match(Changed.class, c -> { - if (c.key().equals(allServicesKey)) - receiveAllServicesKeysChanged((Changed>) c); - else if (c.key() instanceof ServiceKey) - receiveServiceChanged((Changed>) c); - }) - .build(); - } - - private void receiveAllServicesKeysChanged(Changed> c) { - Set newKeys = c.dataValue().getElements(); - Set diff = new HashSet<>(newKeys); - diff.removeAll(keys); - log.debug("Services changed, added: {}, all: {}", diff, newKeys); - diff.forEach(dKey -> { - // subscribe to get notifications of when services with this name are added or removed - replicator.tell(new Subscribe>(dKey, self()), self()); - }); - keys = newKeys; - - } - - private void receiveServiceChanged(Changed> c) { - String name = c.key().id().split(":")[1]; - Set newServices = c.get(serviceKey(name)).getElements(); - log.debug("Services changed for name [{}]: {}", name, newServices); - services.put(name, newServices); - context().system().eventStream().publish(new BindingChanged(name, newServices)); - if (leader) { - newServices.forEach(ref -> context().watch(ref)); // watch is idempotent - } - } - - private PartialFunction matchWatch() { - return ReceiveBuilder - .match(ClusterEvent.LeaderChanged.class, c -> c.getLeader() != null, - c -> receiveLeaderChanged(c.getLeader())) - .match(Terminated.class, t -> receiveTerminated(t.actor())) - .build(); - } - - private void receiveLeaderChanged(Address newLeader) { - // Let one node (the leader) be responsible for removal of terminated services - // to avoid redundant work and too many death watch notifications. - // It is not critical to only do it from one node. - boolean wasLeader = leader; - leader = newLeader.equals(node.selfAddress()); - // when used with many (> 500) services you must increase the system message buffer - // `akka.remote.system-message-buffer-size` - if (!wasLeader && leader) { - for (Set refs : services.values()) { - for (ActorRef ref : refs) { - context().watch(ref); - } - } - } else if (wasLeader && !leader) { - for (Set refs : services.values()) { - for (ActorRef ref : refs) { - context().unwatch(ref); - } - } - } - } - - private void receiveTerminated(ActorRef ref) { - for (Map.Entry> entry : services.entrySet()) { - if (entry.getValue().contains(ref)) { - log.debug("Service with name [{}] terminated: {}", entry.getKey(), ref); - ServiceKey dKey = serviceKey(entry.getKey()); - Update> update = new Update<>(dKey, ORSet.create(), Replicator.writeLocal(), - curr -> curr.remove(node, ref)); - replicator.tell(update, self()); - } - } - } - - private PartialFunction matchOther() { - return ReceiveBuilder - .match(UpdateResponse.class, u -> { - // ok - }) - .build(); - } - - - -} \ No newline at end of file diff --git a/akka-samples/akka-sample-distributed-data-java/src/main/java/sample/distributeddata/VotingService.java b/akka-samples/akka-sample-distributed-data-java/src/main/java/sample/distributeddata/VotingService.java deleted file mode 100644 index 0ca6747680..0000000000 --- a/akka-samples/akka-sample-distributed-data-java/src/main/java/sample/distributeddata/VotingService.java +++ /dev/null @@ -1,149 +0,0 @@ -package sample.distributeddata; - -import java.util.Optional; -import java.util.HashMap; -import java.math.BigInteger; -import java.util.Map; -import scala.PartialFunction; -import scala.runtime.BoxedUnit; -import scala.concurrent.duration.Duration; - -import akka.actor.AbstractActor; -import akka.actor.ActorRef; -import akka.cluster.Cluster; -import akka.cluster.ddata.*; -import akka.japi.pf.ReceiveBuilder; - -import static akka.cluster.ddata.Replicator.*; -import static java.util.concurrent.TimeUnit.SECONDS; - -@SuppressWarnings("unchecked") -public class VotingService extends AbstractActor { - - public static final String OPEN = "open"; - public static final String CLOSE = "close"; - public static final String GET_VOTES = "getVotes"; - - public static class Votes { - public final Map result; - public final boolean open; - - public Votes(Map result, boolean open) { - this.result = result; - this.open = open; - } - } - - public static class Vote { - public final String participant; - - public Vote(String participant) { - this.participant = participant; - } - } - - private final ActorRef replicator = DistributedData.get(context().system()).replicator(); - private final Cluster node = Cluster.get(context().system()); - - private final Key openedKey = FlagKey.create("contestOpened"); - private final Key closedKey = FlagKey.create("contestClosed"); - private final Key> countersKey = PNCounterMapKey.create("contestCounters"); - private final WriteConsistency writeAll = new WriteAll(Duration.create(5, SECONDS)); - private final ReadConsistency readAll = new ReadAll(Duration.create(3, SECONDS)); - - @Override - public void preStart() { - replicator.tell(new Subscribe<>(openedKey, self()), ActorRef.noSender()); - } - - public VotingService() { - receive(ReceiveBuilder - .matchEquals(OPEN, cmd -> receiveOpen()) - .match(Changed.class, c -> c.key().equals(openedKey), c -> receiveOpenedChanged((Changed) c)) - .matchEquals(GET_VOTES, cmd -> receiveGetVotesEmpty()) - .build()); - } - - - private void receiveOpen() { - Update update = new Update<>(openedKey, Flag.create(), writeAll, curr -> curr.switchOn()); - replicator.tell(update, self()); - becomeOpen(); - } - - private void becomeOpen() { - replicator.tell(new Unsubscribe<>(openedKey, self()), ActorRef.noSender()); - replicator.tell(new Subscribe<>(closedKey, self()), ActorRef.noSender()); - context().become(matchOpen().orElse(matchGetVotes(true))); - } - - private void receiveOpenedChanged(Changed c) { - if (c.dataValue().enabled()) - becomeOpen(); - } - - private void receiveGetVotesEmpty() { - sender().tell(new Votes(new HashMap<>(), false), self()); - } - - private PartialFunction matchOpen() { - return ReceiveBuilder - .match(Vote.class, vote -> receiveVote(vote)) - .match(UpdateSuccess.class, u -> receiveUpdateSuccess()) - .matchEquals(CLOSE, cmd -> receiveClose()) - .match(Changed.class, c -> c.key().equals(closedKey), c -> receiveClosedChanged((Changed) c)) - .build(); - } - - private void receiveVote(Vote vote) { - Update> update = new Update<>(countersKey, PNCounterMap.create(), Replicator.writeLocal(), - curr -> curr.increment(node, vote.participant, 1)); - replicator.tell(update, self()); - } - - private void receiveUpdateSuccess() { - // ok - } - - private void receiveClose() { - Update update = new Update<>(closedKey, Flag.create(), writeAll, curr -> curr.switchOn()); - replicator.tell(update, self()); - context().become(matchGetVotes(false)); - } - - private void receiveClosedChanged(Changed c) { - if (c.dataValue().enabled()) - context().become(matchGetVotes(false)); - } - - private PartialFunction matchGetVotes(boolean open) { - return ReceiveBuilder - .matchEquals(GET_VOTES, s -> receiveGetVotes()) - .match(NotFound.class, n -> n.key().equals(countersKey), n -> receiveNotFound(open, (NotFound>) n)) - .match(GetSuccess.class, g -> g.key().equals(countersKey), - g -> receiveGetSuccess(open, (GetSuccess>) g)) - .match(GetFailure.class, f -> f.key().equals(countersKey), f -> receiveGetFailure()) - .match(UpdateSuccess.class, u -> receiveUpdateSuccess()).build(); - } - - private void receiveGetVotes() { - Optional ctx = Optional.of(sender()); - replicator.tell(new Replicator.Get>(countersKey, readAll, ctx), self()); - } - - - private void receiveGetSuccess(boolean open, GetSuccess> g) { - Map result = g.dataValue().getEntries(); - ActorRef replyTo = (ActorRef) g.getRequest().get(); - replyTo.tell(new Votes(result, open), self()); - } - - private void receiveNotFound(boolean open, NotFound> n) { - ActorRef replyTo = (ActorRef) n.getRequest().get(); - replyTo.tell(new Votes(new HashMap<>(), open), self()); - } - - private void receiveGetFailure() { - // skip - } -} \ No newline at end of file diff --git a/akka-samples/akka-sample-distributed-data-java/src/main/resources/application.conf b/akka-samples/akka-sample-distributed-data-java/src/main/resources/application.conf deleted file mode 100644 index b7f09dfe48..0000000000 --- a/akka-samples/akka-sample-distributed-data-java/src/main/resources/application.conf +++ /dev/null @@ -1,21 +0,0 @@ -akka { - actor { - provider = "cluster" - } - remote { - log-remote-lifecycle-events = off - netty.tcp { - hostname = "127.0.0.1" - port = 0 - } - } - - cluster { - seed-nodes = [ - "akka.tcp://ClusterSystem@127.0.0.1:2551", - "akka.tcp://ClusterSystem@127.0.0.1:2552"] - - auto-down-unreachable-after = 10s - } -} - diff --git a/akka-samples/akka-sample-distributed-data-java/src/multi-jvm/scala/sample/distributeddata/ReplicatedCacheSpec.scala b/akka-samples/akka-sample-distributed-data-java/src/multi-jvm/scala/sample/distributeddata/ReplicatedCacheSpec.scala deleted file mode 100644 index b5d2980b15..0000000000 --- a/akka-samples/akka-sample-distributed-data-java/src/multi-jvm/scala/sample/distributeddata/ReplicatedCacheSpec.scala +++ /dev/null @@ -1,135 +0,0 @@ -package sample.distributeddata - -import java.util.Optional; -import scala.concurrent.duration._ -import akka.cluster.Cluster -import akka.cluster.ddata.DistributedData -import akka.cluster.ddata.Replicator.GetReplicaCount -import akka.cluster.ddata.Replicator.ReplicaCount -import akka.remote.testconductor.RoleName -import akka.remote.testkit.MultiNodeConfig -import akka.remote.testkit.MultiNodeSpec -import akka.testkit._ -import com.typesafe.config.ConfigFactory - -object ReplicatedCacheSpec extends MultiNodeConfig { - val node1 = role("node-1") - val node2 = role("node-2") - val node3 = role("node-3") - - commonConfig(ConfigFactory.parseString(""" - akka.loglevel = INFO - akka.actor.provider = "cluster" - akka.log-dead-letters-during-shutdown = off - """)) - -} - -class ReplicatedCacheSpecMultiJvmNode1 extends ReplicatedCacheSpec -class ReplicatedCacheSpecMultiJvmNode2 extends ReplicatedCacheSpec -class ReplicatedCacheSpecMultiJvmNode3 extends ReplicatedCacheSpec - -class ReplicatedCacheSpec extends MultiNodeSpec(ReplicatedCacheSpec) with STMultiNodeSpec with ImplicitSender { - import ReplicatedCacheSpec._ - import ReplicatedCache._ - - override def initialParticipants = roles.size - - val cluster = Cluster(system) - val replicatedCache = system.actorOf(ReplicatedCache.props) - - def join(from: RoleName, to: RoleName): Unit = { - runOn(from) { - cluster join node(to).address - } - enterBarrier(from.name + "-joined") - } - - "Demo of a replicated cache" must { - "join cluster" in within(20.seconds) { - join(node1, node1) - join(node2, node1) - join(node3, node1) - - awaitAssert { - DistributedData(system).replicator ! GetReplicaCount - expectMsg(ReplicaCount(roles.size)) - } - enterBarrier("after-1") - } - - "replicate cached entry" in within(10.seconds) { - runOn(node1) { - replicatedCache ! new PutInCache("key1", "A") - } - - awaitAssert { - val probe = TestProbe() - replicatedCache.tell(new GetFromCache("key1"), probe.ref) - probe.expectMsg(new Cached("key1", Optional.of("A"))) - } - - enterBarrier("after-2") - } - - "replicate many cached entries" in within(10.seconds) { - runOn(node1) { - for (i ← 100 to 200) - replicatedCache ! new PutInCache("key" + i, i) - } - - awaitAssert { - val probe = TestProbe() - for (i ← 100 to 200) { - replicatedCache.tell(new GetFromCache("key" + i), probe.ref) - probe.expectMsg(new Cached("key" + i, Optional.of(Integer.valueOf(i)))) - } - } - - enterBarrier("after-3") - } - - "replicate evicted entry" in within(15.seconds) { - runOn(node1) { - replicatedCache ! new PutInCache("key2", "B") - } - - awaitAssert { - val probe = TestProbe() - replicatedCache.tell(new GetFromCache("key2"), probe.ref) - probe.expectMsg(new Cached("key2", Optional.of("B"))) - } - enterBarrier("key2-replicated") - - runOn(node3) { - replicatedCache ! new Evict("key2") - } - - awaitAssert { - val probe = TestProbe() - replicatedCache.tell(new GetFromCache("key2"), probe.ref) - probe.expectMsg(new Cached("key2", Optional.empty())) - } - - enterBarrier("after-4") - } - - "replicate updated cached entry" in within(10.seconds) { - runOn(node2) { - replicatedCache ! new PutInCache("key1", "A2") - replicatedCache ! new PutInCache("key1", "A3") - } - - awaitAssert { - val probe = TestProbe() - replicatedCache.tell(new GetFromCache("key1"), probe.ref) - probe.expectMsg(new Cached("key1", Optional.of("A3"))) - } - - enterBarrier("after-5") - } - - } - -} - diff --git a/akka-samples/akka-sample-distributed-data-java/src/multi-jvm/scala/sample/distributeddata/ReplicatedMetricsSpec.scala b/akka-samples/akka-sample-distributed-data-java/src/multi-jvm/scala/sample/distributeddata/ReplicatedMetricsSpec.scala deleted file mode 100644 index 61049162b1..0000000000 --- a/akka-samples/akka-sample-distributed-data-java/src/multi-jvm/scala/sample/distributeddata/ReplicatedMetricsSpec.scala +++ /dev/null @@ -1,92 +0,0 @@ -package sample.distributeddata - -import scala.concurrent.duration._ -import akka.cluster.Cluster -import akka.cluster.ddata.DistributedData -import akka.cluster.ddata.Replicator.GetReplicaCount -import akka.cluster.ddata.Replicator.ReplicaCount -import akka.remote.testconductor.RoleName -import akka.remote.testkit.MultiNodeConfig -import akka.remote.testkit.MultiNodeSpec -import akka.testkit._ -import com.typesafe.config.ConfigFactory -import scala.collection.JavaConverters._ - -object ReplicatedMetricsSpec extends MultiNodeConfig { - val node1 = role("node-1") - val node2 = role("node-2") - val node3 = role("node-3") - - commonConfig(ConfigFactory.parseString(""" - akka.loglevel = INFO - akka.actor.provider = "cluster" - akka.log-dead-letters-during-shutdown = off - """)) - -} - -class ReplicatedMetricsSpecMultiJvmNode1 extends ReplicatedMetricsSpec -class ReplicatedMetricsSpecMultiJvmNode2 extends ReplicatedMetricsSpec -class ReplicatedMetricsSpecMultiJvmNode3 extends ReplicatedMetricsSpec - -class ReplicatedMetricsSpec extends MultiNodeSpec(ReplicatedMetricsSpec) with STMultiNodeSpec with ImplicitSender { - import ReplicatedMetricsSpec._ - import ReplicatedMetrics._ - - override def initialParticipants = roles.size - - val cluster = Cluster(system) - val replicatedMetrics = system.actorOf(ReplicatedMetrics.props(1.second, 3.seconds)) - - def join(from: RoleName, to: RoleName): Unit = { - runOn(from) { - cluster join node(to).address - } - enterBarrier(from.name + "-joined") - } - - "Demo of a replicated metrics" must { - "join cluster" in within(20.seconds) { - join(node1, node1) - join(node2, node1) - join(node3, node1) - - awaitAssert { - DistributedData(system).replicator ! GetReplicaCount - expectMsg(ReplicaCount(roles.size)) - } - enterBarrier("after-1") - } - - "replicate metrics" in within(10.seconds) { - val probe = TestProbe() - system.eventStream.subscribe(probe.ref, classOf[UsedHeap]) - awaitAssert { - probe.expectMsgType[UsedHeap](1.second).percentPerNode.size should be(3) - } - probe.expectMsgType[UsedHeap].percentPerNode.size should be(3) - probe.expectMsgType[UsedHeap].percentPerNode.size should be(3) - enterBarrier("after-2") - } - - "cleanup removed node" in within(25.seconds) { - val node3Address = node(node3).address - runOn(node1) { - cluster.leave(node3Address) - } - runOn(node1, node2) { - val probe = TestProbe() - system.eventStream.subscribe(probe.ref, classOf[UsedHeap]) - awaitAssert { - probe.expectMsgType[UsedHeap](1.second).percentPerNode.size should be(2) - } - probe.expectMsgType[UsedHeap].percentPerNode.asScala.toMap should not contain ( - nodeKey(node3Address)) - } - enterBarrier("after-3") - } - - } - -} - diff --git a/akka-samples/akka-sample-distributed-data-java/src/multi-jvm/scala/sample/distributeddata/STMultiNodeSpec.scala b/akka-samples/akka-sample-distributed-data-java/src/multi-jvm/scala/sample/distributeddata/STMultiNodeSpec.scala deleted file mode 100644 index 0daad1df58..0000000000 --- a/akka-samples/akka-sample-distributed-data-java/src/multi-jvm/scala/sample/distributeddata/STMultiNodeSpec.scala +++ /dev/null @@ -1,17 +0,0 @@ -package sample.distributeddata - -import akka.remote.testkit.MultiNodeSpecCallbacks - -import org.scalatest.{ BeforeAndAfterAll, WordSpecLike } -import org.scalatest.Matchers - -/** - * Hooks up MultiNodeSpec with ScalaTest - */ -trait STMultiNodeSpec extends MultiNodeSpecCallbacks - with WordSpecLike with Matchers with BeforeAndAfterAll { - - override def beforeAll() = multiNodeSpecBeforeAll() - - override def afterAll() = multiNodeSpecAfterAll() -} diff --git a/akka-samples/akka-sample-distributed-data-java/src/multi-jvm/scala/sample/distributeddata/ServiceRegistrySpec.scala b/akka-samples/akka-sample-distributed-data-java/src/multi-jvm/scala/sample/distributeddata/ServiceRegistrySpec.scala deleted file mode 100644 index 2cf248ca25..0000000000 --- a/akka-samples/akka-sample-distributed-data-java/src/multi-jvm/scala/sample/distributeddata/ServiceRegistrySpec.scala +++ /dev/null @@ -1,142 +0,0 @@ -package sample.distributeddata - -import scala.concurrent.duration._ -import akka.actor.Actor -import akka.actor.PoisonPill -import akka.actor.Props -import akka.cluster.Cluster -import akka.cluster.ddata.DistributedData -import akka.cluster.ddata.Replicator.GetReplicaCount -import akka.cluster.ddata.Replicator.ReplicaCount -import akka.remote.testconductor.RoleName -import akka.remote.testkit.MultiNodeConfig -import akka.remote.testkit.MultiNodeSpec -import akka.testkit._ -import com.typesafe.config.ConfigFactory -import scala.collection.JavaConverters._ - -object ServiceRegistrySpec extends MultiNodeConfig { - val node1 = role("node-1") - val node2 = role("node-2") - val node3 = role("node-3") - - commonConfig(ConfigFactory.parseString(""" - akka.loglevel = INFO - akka.actor.provider = "cluster" - akka.log-dead-letters-during-shutdown = off - """)) - - class Service extends Actor { - def receive = { - case s: String => sender() ! self.path.name + ": " + s - } - } - -} - -class ServiceRegistrySpecMultiJvmNode1 extends ServiceRegistrySpec -class ServiceRegistrySpecMultiJvmNode2 extends ServiceRegistrySpec -class ServiceRegistrySpecMultiJvmNode3 extends ServiceRegistrySpec - -class ServiceRegistrySpec extends MultiNodeSpec(ServiceRegistrySpec) with STMultiNodeSpec with ImplicitSender { - import ServiceRegistrySpec._ - import ServiceRegistry._ - - override def initialParticipants = roles.size - - val cluster = Cluster(system) - val registry = system.actorOf(ServiceRegistry.props) - - def join(from: RoleName, to: RoleName): Unit = { - runOn(from) { - cluster join node(to).address - } - enterBarrier(from.name + "-joined") - } - - "Demo of a replicated service registry" must { - "join cluster" in within(20.seconds) { - join(node1, node1) - join(node2, node1) - join(node3, node1) - - awaitAssert { - DistributedData(system).replicator ! GetReplicaCount - expectMsg(ReplicaCount(roles.size)) - } - enterBarrier("after-1") - } - - "replicate service entry" in within(10.seconds) { - runOn(node1) { - val a1 = system.actorOf(Props[Service], name = "a1") - registry ! new Register("a", a1) - } - - awaitAssert { - val probe = TestProbe() - registry.tell(new Lookup("a"), probe.ref) - probe.expectMsgType[Bindings].services.asScala.map(_.path.name).toSet should be(Set("a1")) - } - - enterBarrier("after-2") - } - - "replicate updated service entry, and publish to even bus" in { - val probe = TestProbe() - system.eventStream.subscribe(probe.ref, classOf[BindingChanged]) - - runOn(node2) { - val a2 = system.actorOf(Props[Service], name = "a2") - registry ! new Register("a", a2) - } - - probe.within(10.seconds) { - probe.expectMsgType[BindingChanged].services.asScala.map(_.path.name).toSet should be(Set("a1", "a2")) - registry.tell(new Lookup("a"), probe.ref) - probe.expectMsgType[Bindings].services.asScala.map(_.path.name).toSet should be(Set("a1", "a2")) - } - - enterBarrier("after-4") - } - - "remove terminated service" in { - val probe = TestProbe() - system.eventStream.subscribe(probe.ref, classOf[BindingChanged]) - - runOn(node2) { - registry.tell(new Lookup("a"), probe.ref) - val a2 = probe.expectMsgType[Bindings].services.asScala.find(_.path.name == "a2").get - a2 ! PoisonPill - } - - probe.within(10.seconds) { - probe.expectMsgType[BindingChanged].services.asScala.map(_.path.name).toSet should be(Set("a1")) - registry.tell(new Lookup("a"), probe.ref) - probe.expectMsgType[Bindings].services.asScala.map(_.path.name).toSet should be(Set("a1")) - } - - enterBarrier("after-5") - } - - "replicate many service entries" in within(10.seconds) { - for (i ← 100 until 200) { - val service = system.actorOf(Props[Service], name = myself.name + "_" + i) - registry ! new Register("a" + i, service) - } - - awaitAssert { - val probe = TestProbe() - for (i ← 100 until 200) { - registry.tell(new Lookup("a" + i), probe.ref) - probe.expectMsgType[Bindings].services.asScala.map(_.path.name).toSet should be(roles.map(_.name + "_" + i).toSet) - } - } - - enterBarrier("after-6") - } - - } - -} - diff --git a/akka-samples/akka-sample-distributed-data-java/src/multi-jvm/scala/sample/distributeddata/ShoppingCartSpec.scala b/akka-samples/akka-sample-distributed-data-java/src/multi-jvm/scala/sample/distributeddata/ShoppingCartSpec.scala deleted file mode 100644 index ef5e302405..0000000000 --- a/akka-samples/akka-sample-distributed-data-java/src/multi-jvm/scala/sample/distributeddata/ShoppingCartSpec.scala +++ /dev/null @@ -1,101 +0,0 @@ -package sample.distributeddata - -import scala.concurrent.duration._ -import akka.cluster.Cluster -import akka.cluster.ddata.DistributedData -import akka.cluster.ddata.Replicator.GetReplicaCount -import akka.cluster.ddata.Replicator.ReplicaCount -import akka.remote.testconductor.RoleName -import akka.remote.testkit.MultiNodeConfig -import akka.remote.testkit.MultiNodeSpec -import akka.testkit._ -import com.typesafe.config.ConfigFactory -import scala.collection.JavaConverters._ - -object ShoppingCartSpec extends MultiNodeConfig { - val node1 = role("node-1") - val node2 = role("node-2") - val node3 = role("node-3") - - commonConfig(ConfigFactory.parseString(""" - akka.loglevel = INFO - akka.actor.provider = "cluster" - akka.log-dead-letters-during-shutdown = off - """)) - -} - -class ShoppingCartSpecMultiJvmNode1 extends ShoppingCartSpec -class ShoppingCartSpecMultiJvmNode2 extends ShoppingCartSpec -class ShoppingCartSpecMultiJvmNode3 extends ShoppingCartSpec - -class ShoppingCartSpec extends MultiNodeSpec(ShoppingCartSpec) with STMultiNodeSpec with ImplicitSender { - import ShoppingCartSpec._ - import ShoppingCart._ - - override def initialParticipants = roles.size - - val cluster = Cluster(system) - val shoppingCart = system.actorOf(ShoppingCart.props("user-1")) - - def join(from: RoleName, to: RoleName): Unit = { - runOn(from) { - cluster join node(to).address - } - enterBarrier(from.name + "-joined") - } - - "Demo of a replicated shopping cart" must { - "join cluster" in within(20.seconds) { - join(node1, node1) - join(node2, node1) - join(node3, node1) - - awaitAssert { - DistributedData(system).replicator ! GetReplicaCount - expectMsg(ReplicaCount(roles.size)) - } - enterBarrier("after-1") - } - - "handle updates directly after start" in within(15.seconds) { - runOn(node2) { - shoppingCart ! new ShoppingCart.AddItem(new LineItem("1", "Apples", 2)) - shoppingCart ! new ShoppingCart.AddItem(new LineItem("2", "Oranges", 3)) - } - enterBarrier("updates-done") - - awaitAssert { - shoppingCart ! ShoppingCart.GET_CART - val cart = expectMsgType[Cart] - cart.items.asScala.toSet should be(Set( - new LineItem("1", "Apples", 2), new LineItem("2", "Oranges", 3))) - } - - enterBarrier("after-2") - } - - "handle updates from different nodes" in within(5.seconds) { - runOn(node2) { - shoppingCart ! new ShoppingCart.AddItem(new LineItem("1", "Apples", 5)) - shoppingCart ! new ShoppingCart.RemoveItem("2") - } - runOn(node3) { - shoppingCart ! new ShoppingCart.AddItem(new LineItem("3", "Bananas", 4)) - } - enterBarrier("updates-done") - - awaitAssert { - shoppingCart ! ShoppingCart.GET_CART - val cart = expectMsgType[Cart] - cart.items.asScala.toSet should be( - Set(new LineItem("1", "Apples", 7), new LineItem("3", "Bananas", 4))) - } - - enterBarrier("after-3") - } - - } - -} - diff --git a/akka-samples/akka-sample-distributed-data-java/src/multi-jvm/scala/sample/distributeddata/VotingServiceSpec.scala b/akka-samples/akka-sample-distributed-data-java/src/multi-jvm/scala/sample/distributeddata/VotingServiceSpec.scala deleted file mode 100644 index 8f16dfbe86..0000000000 --- a/akka-samples/akka-sample-distributed-data-java/src/multi-jvm/scala/sample/distributeddata/VotingServiceSpec.scala +++ /dev/null @@ -1,101 +0,0 @@ -package sample.distributeddata - -import java.math.BigInteger -import scala.concurrent.duration._ -import akka.actor.Props -import akka.cluster.Cluster -import akka.cluster.ddata.DistributedData -import akka.cluster.ddata.Replicator.GetReplicaCount -import akka.cluster.ddata.Replicator.ReplicaCount -import akka.remote.testconductor.RoleName -import akka.remote.testkit.MultiNodeConfig -import akka.remote.testkit.MultiNodeSpec -import akka.testkit._ -import com.typesafe.config.ConfigFactory - -object VotingServiceSpec extends MultiNodeConfig { - val node1 = role("node-1") - val node2 = role("node-2") - val node3 = role("node-3") - - commonConfig(ConfigFactory.parseString(""" - akka.loglevel = INFO - akka.actor.provider = "cluster" - akka.log-dead-letters-during-shutdown = off - """)) - -} - -class VotingServiceSpecMultiJvmNode1 extends VotingServiceSpec -class VotingServiceSpecMultiJvmNode2 extends VotingServiceSpec -class VotingServiceSpecMultiJvmNode3 extends VotingServiceSpec - -class VotingServiceSpec extends MultiNodeSpec(VotingServiceSpec) with STMultiNodeSpec with ImplicitSender { - import VotingServiceSpec._ - - override def initialParticipants = roles.size - - val cluster = Cluster(system) - - def join(from: RoleName, to: RoleName): Unit = { - runOn(from) { - cluster join node(to).address - } - enterBarrier(from.name + "-joined") - } - - "Demo of a replicated voting" must { - - "join cluster" in within(20.seconds) { - join(node1, node1) - join(node2, node1) - join(node3, node1) - - awaitAssert { - DistributedData(system).replicator ! GetReplicaCount - expectMsg(ReplicaCount(roles.size)) - } - enterBarrier("after-1") - } - - "count votes correctly" in within(15.seconds) { - import VotingService._ - val votingService = system.actorOf(Props[VotingService], "votingService") - val N = 1000 - runOn(node1) { - votingService ! VotingService.OPEN - for (n ← 1 to N) { - votingService ! new Vote("#" + ((n % 20) + 1)) - } - } - runOn(node2, node3) { - // wait for it to open - val p = TestProbe() - awaitAssert { - votingService.tell(VotingService.GET_VOTES, p.ref) - p.expectMsgType[Votes](3.seconds).open should be(true) - } - for (n ← 1 to N) { - votingService ! new Vote("#" + ((n % 20) + 1)) - } - } - enterBarrier("voting-done") - runOn(node3) { - votingService ! VotingService.CLOSE - } - - val expected = (1 to 20).map(n => "#" + n -> BigInteger.valueOf(3L * N / 20)).toMap - awaitAssert { - votingService ! VotingService.GET_VOTES - val votes = expectMsgType[Votes](3.seconds) - votes.open should be (false) - import scala.collection.JavaConverters._ - votes.result.asScala.toMap should be (expected) - } - - enterBarrier("after-2") - } - } - -} - diff --git a/akka-samples/akka-sample-distributed-data-java/src/test/resources/reference.conf b/akka-samples/akka-sample-distributed-data-java/src/test/resources/reference.conf deleted file mode 100644 index 90492329b7..0000000000 --- a/akka-samples/akka-sample-distributed-data-java/src/test/resources/reference.conf +++ /dev/null @@ -1,4 +0,0 @@ -# Don't terminate ActorSystem in tests -akka.coordinated-shutdown.run-by-jvm-shutdown-hook = off -akka.coordinated-shutdown.terminate-actor-system = off -akka.cluster.run-coordinated-shutdown-when-down = off diff --git a/akka-samples/akka-sample-distributed-data-java/tutorial/index.html b/akka-samples/akka-sample-distributed-data-java/tutorial/index.html deleted file mode 100644 index 7b18760709..0000000000 --- a/akka-samples/akka-sample-distributed-data-java/tutorial/index.html +++ /dev/null @@ -1,306 +0,0 @@ - - -Akka Distributed Data Samples with Java - - - - -
-

-This tutorial contains 5 samples illustrating how to use -Akka Distributed Data. -

-
    -
  • Low Latency Voting Service
  • -
  • Highly Available Shopping Cart
  • -
  • Distributed Service Registry
  • -
  • Replicated Cache
  • -
  • Replicated Metrics
  • -
- -

-Akka Distributed Data is useful when you need to share data between nodes in an -Akka Cluster. The data is accessed with an actor providing a key-value store like API. -The keys are unique identifiers with type information of the data values. The values -are Conflict Free Replicated Data Types (CRDTs). -

- -

-All data entries are spread to all nodes, or nodes with a certain role, in the cluster -via direct replication and gossip based dissemination. You have fine grained control -of the consistency level for reads and writes. -

- -

-The nature CRDTs makes it possible to perform updates from any node without coordination. -Concurrent updates from different nodes will automatically be resolved by the monotonic -merge function, which all data types must provide. The state changes always converge. -Several useful data types for counters, sets, maps and registers are provided and -you can also implement your own custom data types. -

- -

-It is eventually consistent and geared toward providing high read and write availability -(partition tolerance), with low latency. Note that in an eventually consistent system a read may return an -out-of-date value. -

- -

-Note that there are some -Limitations -that you should be aware of. For example, Akka Distributed Data is not intended for Big Data. -

- -
- -
- -

Low Latency Voting Service

- -

-Distributed Data is great for low latency services, since you can update or get data from the local replica -without immediate communication with other nodes. -

- -

-Open VotingService.java. -

- -

-VotingService is an actor for low latency counting of votes on several cluster nodes and aggregation -of the grand total number of votes. The actor is started on each cluster node. First it expects an -OPEN message on one or several nodes. After that the counting can begin. The open -signal is immediately replicated to all nodes with a boolean -Flag. -Note writeAll. -

- -

-Update<Flag> update = new Update<>(openedKey, Flag.create(), writeAll, curr -> curr.switchOn());
-
- -

-The actor is subscribing to changes of the OpenedKey and other instances of this actor, -also on other nodes, will be notified when the flag is changed. -

- -

-replicator.tell(new Subscribe<>(openedKey, self()), ActorRef.noSender());
-
- -

-.match(Changed.class, c -> c.key().equals(openedKey), c -> receiveOpenedChanged((Changed<Flag>) c))
-
- -

-The counters are kept in a -PNCounterMap -and updated with: -

- -

-Update<PNCounterMap> update = new Update<>(countersKey, PNCounterMap.create(), Replicator.writeLocal(),
-        curr -> curr.increment(node, vote.participant, 1));
- replicator.tell(update, self());
-
- -

-Incrementing the counter is very fast, since it only involves communication with the local -Replicator actor. Note writeLocal. Those updates are also spread -to other nodes, but that is performed in the background. -

- -

-The total number of votes is retrieved with: -

- -

-Optional<Object> ctx = Optional.of(sender());
-replicator.tell(new Replicator.Get<PNCounterMap>(countersKey, readAll, ctx), self());
-
- -

-.match(GetSuccess.class, g -> g.key().equals(countersKey),
-   g -> receiveGetSuccess(open, (GetSuccess<PNCounterMap>) g))
-
- -

-private void receiveGetSuccess(boolean open, GetSuccess<PNCounterMap> g) {
-  Map<String, BigInteger> result = g.dataValue().getEntries();
-  ActorRef replyTo = (ActorRef) g.getRequest().get();
-  replyTo.tell(new Votes(result, open), self());
-}
-
- -

-The multi-node test for the VotingService can be found in -VotingServiceSpec.scala. -

- -

-Read the -Using the Replicator -documentation for more details of how to use Get, Update, and Subscribe. -

- -
- -
-

Highly Available Shopping Cart

- -

-Distributed Data is great for highly available services, since it is possible to perform -updates to the local node (or currently available nodes) during a network partition. -

- -

-Open ShoppingCart.java. -

- -

-ShoppingCart is an actor that holds the selected items to buy for a user. -The actor instance for a specific user may be started where ever needed in the cluster, i.e. several -instances may be started on different nodes and used at the same time. -

- -

-Each product in the cart is represented by a LineItem and all items in the cart -is collected in a LWWMap. -

- -

-The actor handles the commands GET_CART, AddItem and RemoveItem. -To get the latest updates in case the same shopping cart is used from several nodes it is using -consistency level of readMajority and writeMajority, but that is only -done to reduce the risk of seeing old data. If such reads and writes cannot be completed due to a -network partition it falls back to reading/writing from the local replica (see GetFailure). -Local reads and writes will always be successful and when the network partition heals the updated -shopping carts will be be disseminated by the -gossip protocol -and the LWWMap CRDTs are merged, i.e. it is a highly available shopping cart. -

- -

-The multi-node test for the ShoppingCart can be found in -ShoppingCartSpec.scala. -

- -

-Read the -Consistency -section in the documentation to understand the consistency considerations. -

- -
- -
-

Distributed Service Registry

- -

-Have you ever had the need to lookup actors by name in an Akka Cluster? -This example illustrates how you could implement such a registry. It is probably not -feature complete, but should be a good starting point. -

- -

-Open ServiceRegistry.java. -

- -

-ServiceRegistry is an actor that is started on each node in the cluster. -It supports two basic commands: -

-
    -
  • Register to bind an ActorRef to a name, - several actors can be bound to the same name
  • -
  • Lookup get currently bound services of a given name
  • -
- -

-For each named service it is using an -ORSet. -Here we are using top level ORSet entries. An alternative would have been to use a -ORMultiMap holding all services. That would have a disadvantage if we have many services. -When a data entry is changed the full state of that entry is replicated to other nodes, i.e. when you -update a map the whole map is replicated. -

- -

-The ServiceRegistry is subscribing to changes of a GSet where we add -the names of all services. It is also subscribing to all such service keys to get notifications when -actors are added or removed to a named service. -

- -

-The multi-node test for the ServiceRegistry can be found in -ServiceRegistrySpec.scala. -

- -
- -
-

Replicated Cache

- -

-This example illustrates a simple key-value cache. -

- -

-Open ReplicatedCache.scala. -

- -

-ReplicatedCache is an actor that is started on each node in the cluster. -It supports three commands: PutInCache, GetFromCache and Evict. -

- -

-It is splitting up the key space in 100 top level keys, each with a LWWMap. -When a data entry is changed the full state of that entry is replicated to other nodes, i.e. when you -update a map the whole map is replicated. Therefore, instead of using one ORMap with 1000 elements it -is more efficient to split that up in 100 top level ORMap entries with 10 elements each. Top level -entries are replicated individually, which has the trade-off that different entries may not be -replicated at the same time and you may see inconsistencies between related entries. -Separate top level entries cannot be updated atomically together. -

- -

-The multi-node test for the ReplicatedCache can be found in -ReplicatedCacheSpec.scala. -

- -
- -
-

Replicated Metrics

- -

-This example illustrates to spread metrics data to all nodes in an Akka cluster. -

- -

-Open ReplicatedMetrics.java. -

- -

-ReplicatedMetrics is an actor that is started on each node in the cluster. -Periodically it collects some metrics, in this case used and max heap size. -Each metrics type is stored in a LWWMap where the key in the map is the address of -the node. The values are disseminated to other nodes with the gossip protocol. -

- -

-The multi-node test for the ReplicatedCache can be found in -ReplicatedMetricsSpec.scala. -

- -

-Note that there are some -Limitations -that you should be aware of. For example, Akka Distributed Data is not intended for Big Data. -

- -
- - - diff --git a/akka-samples/akka-sample-distributed-data-scala/.gitignore b/akka-samples/akka-sample-distributed-data-scala/.gitignore deleted file mode 100644 index 660c959e44..0000000000 --- a/akka-samples/akka-sample-distributed-data-scala/.gitignore +++ /dev/null @@ -1,17 +0,0 @@ -*# -*.iml -*.ipr -*.iws -*.pyc -*.tm.epoch -*.vim -*-shim.sbt -.idea/ -/project/plugins/project -project/boot -target/ -/logs -.cache -.classpath -.project -.settings \ No newline at end of file diff --git a/akka-samples/akka-sample-distributed-data-scala/COPYING b/akka-samples/akka-sample-distributed-data-scala/COPYING deleted file mode 100644 index 0e259d42c9..0000000000 --- a/akka-samples/akka-sample-distributed-data-scala/COPYING +++ /dev/null @@ -1,121 +0,0 @@ -Creative Commons Legal Code - -CC0 1.0 Universal - - CREATIVE COMMONS CORPORATION IS NOT A LAW FIRM AND DOES NOT PROVIDE - LEGAL SERVICES. DISTRIBUTION OF THIS DOCUMENT DOES NOT CREATE AN - ATTORNEY-CLIENT RELATIONSHIP. CREATIVE COMMONS PROVIDES THIS - INFORMATION ON AN "AS-IS" BASIS. CREATIVE COMMONS MAKES NO WARRANTIES - REGARDING THE USE OF THIS DOCUMENT OR THE INFORMATION OR WORKS - PROVIDED HEREUNDER, AND DISCLAIMS LIABILITY FOR DAMAGES RESULTING FROM - THE USE OF THIS DOCUMENT OR THE INFORMATION OR WORKS PROVIDED - HEREUNDER. - -Statement of Purpose - -The laws of most jurisdictions throughout the world automatically confer -exclusive Copyright and Related Rights (defined below) upon the creator -and subsequent owner(s) (each and all, an "owner") of an original work of -authorship and/or a database (each, a "Work"). - -Certain owners wish to permanently relinquish those rights to a Work for -the purpose of contributing to a commons of creative, cultural and -scientific works ("Commons") that the public can reliably and without fear -of later claims of infringement build upon, modify, incorporate in other -works, reuse and redistribute as freely as possible in any form whatsoever -and for any purposes, including without limitation commercial purposes. -These owners may contribute to the Commons to promote the ideal of a free -culture and the further production of creative, cultural and scientific -works, or to gain reputation or greater distribution for their Work in -part through the use and efforts of others. - -For these and/or other purposes and motivations, and without any -expectation of additional consideration or compensation, the person -associating CC0 with a Work (the "Affirmer"), to the extent that he or she -is an owner of Copyright and Related Rights in the Work, voluntarily -elects to apply CC0 to the Work and publicly distribute the Work under its -terms, with knowledge of his or her Copyright and Related Rights in the -Work and the meaning and intended legal effect of CC0 on those rights. - -1. Copyright and Related Rights. A Work made available under CC0 may be -protected by copyright and related or neighboring rights ("Copyright and -Related Rights"). Copyright and Related Rights include, but are not -limited to, the following: - - i. the right to reproduce, adapt, distribute, perform, display, - communicate, and translate a Work; - ii. moral rights retained by the original author(s) and/or performer(s); -iii. publicity and privacy rights pertaining to a person's image or - likeness depicted in a Work; - iv. rights protecting against unfair competition in regards to a Work, - subject to the limitations in paragraph 4(a), below; - v. rights protecting the extraction, dissemination, use and reuse of data - in a Work; - vi. database rights (such as those arising under Directive 96/9/EC of the - European Parliament and of the Council of 11 March 1996 on the legal - protection of databases, and under any national implementation - thereof, including any amended or successor version of such - directive); and -vii. other similar, equivalent or corresponding rights throughout the - world based on applicable law or treaty, and any national - implementations thereof. - -2. Waiver. To the greatest extent permitted by, but not in contravention -of, applicable law, Affirmer hereby overtly, fully, permanently, -irrevocably and unconditionally waives, abandons, and surrenders all of -Affirmer's Copyright and Related Rights and associated claims and causes -of action, whether now known or unknown (including existing as well as -future claims and causes of action), in the Work (i) in all territories -worldwide, (ii) for the maximum duration provided by applicable law or -treaty (including future time extensions), (iii) in any current or future -medium and for any number of copies, and (iv) for any purpose whatsoever, -including without limitation commercial, advertising or promotional -purposes (the "Waiver"). Affirmer makes the Waiver for the benefit of each -member of the public at large and to the detriment of Affirmer's heirs and -successors, fully intending that such Waiver shall not be subject to -revocation, rescission, cancellation, termination, or any other legal or -equitable action to disrupt the quiet enjoyment of the Work by the public -as contemplated by Affirmer's express Statement of Purpose. - -3. Public License Fallback. Should any part of the Waiver for any reason -be judged legally invalid or ineffective under applicable law, then the -Waiver shall be preserved to the maximum extent permitted taking into -account Affirmer's express Statement of Purpose. In addition, to the -extent the Waiver is so judged Affirmer hereby grants to each affected -person a royalty-free, non transferable, non sublicensable, non exclusive, -irrevocable and unconditional license to exercise Affirmer's Copyright and -Related Rights in the Work (i) in all territories worldwide, (ii) for the -maximum duration provided by applicable law or treaty (including future -time extensions), (iii) in any current or future medium and for any number -of copies, and (iv) for any purpose whatsoever, including without -limitation commercial, advertising or promotional purposes (the -"License"). The License shall be deemed effective as of the date CC0 was -applied by Affirmer to the Work. Should any part of the License for any -reason be judged legally invalid or ineffective under applicable law, such -partial invalidity or ineffectiveness shall not invalidate the remainder -of the License, and in such case Affirmer hereby affirms that he or she -will not (i) exercise any of his or her remaining Copyright and Related -Rights in the Work or (ii) assert any associated claims and causes of -action with respect to the Work, in either case contrary to Affirmer's -express Statement of Purpose. - -4. Limitations and Disclaimers. - - a. No trademark or patent rights held by Affirmer are waived, abandoned, - surrendered, licensed or otherwise affected by this document. - b. Affirmer offers the Work as-is and makes no representations or - warranties of any kind concerning the Work, express, implied, - statutory or otherwise, including without limitation warranties of - title, merchantability, fitness for a particular purpose, non - infringement, or the absence of latent or other defects, accuracy, or - the present or absence of errors, whether or not discoverable, all to - the greatest extent permissible under applicable law. - c. Affirmer disclaims responsibility for clearing rights of other persons - that may apply to the Work or any use thereof, including without - limitation any person's Copyright and Related Rights in the Work. - Further, Affirmer disclaims responsibility for obtaining any necessary - consents, permissions or other rights required for any use of the - Work. - d. Affirmer understands and acknowledges that Creative Commons is not a - party to this document and has no duty or obligation with respect to - this CC0 or use of the Work. diff --git a/akka-samples/akka-sample-distributed-data-scala/LICENSE b/akka-samples/akka-sample-distributed-data-scala/LICENSE deleted file mode 100644 index 2a18b45589..0000000000 --- a/akka-samples/akka-sample-distributed-data-scala/LICENSE +++ /dev/null @@ -1,10 +0,0 @@ -Activator Template by Lightbend - -Licensed under Public Domain (CC0) - -To the extent possible under law, the person who associated CC0 with -this Activator Tempate has waived all copyright and related or neighboring -rights to this Activator Template. - -You should have received a copy of the CC0 legalcode along with this -work. If not, see . diff --git a/akka-samples/akka-sample-distributed-data-scala/activator.properties b/akka-samples/akka-sample-distributed-data-scala/activator.properties deleted file mode 100644 index 55ffb489d7..0000000000 --- a/akka-samples/akka-sample-distributed-data-scala/activator.properties +++ /dev/null @@ -1,7 +0,0 @@ -name=akka-sample-distributed-data-scala -title=Akka Distributed Data Samples with Scala -description=Akka Distributed Data Samples with Scala -tags=akka,cluster,scala,sample,distributed-data -authorName=Akka Team -authorLink=http://akka.io/ -sourceLink=https://github.com/akka/akka diff --git a/akka-samples/akka-sample-distributed-data-scala/build.sbt b/akka-samples/akka-sample-distributed-data-scala/build.sbt deleted file mode 100644 index 32cc9bc4e5..0000000000 --- a/akka-samples/akka-sample-distributed-data-scala/build.sbt +++ /dev/null @@ -1,49 +0,0 @@ -import com.typesafe.sbt.SbtMultiJvm -import com.typesafe.sbt.SbtMultiJvm.MultiJvmKeys.MultiJvm - -val akkaVersion = "2.5-SNAPSHOT" - -val project = Project( - id = "akka-sample-distributed-data-scala", - base = file(".") - ) - .settings(SbtMultiJvm.multiJvmSettings: _*) - .settings( - name := "akka-sample-distributed-data-scala", - version := "2.5-SNAPSHOT", - scalaVersion := "2.11.8", - scalacOptions in Compile ++= Seq("-encoding", "UTF-8", "-target:jvm-1.8", "-deprecation", "-feature", "-unchecked", "-Xlog-reflective-calls", "-Xlint"), - javacOptions in Compile ++= Seq("-source", "1.8", "-target", "1.8", "-Xlint:unchecked", "-Xlint:deprecation"), - libraryDependencies ++= Seq( - "com.typesafe.akka" %% "akka-actor" % akkaVersion, - "com.typesafe.akka" %% "akka-remote" % akkaVersion, - "com.typesafe.akka" %% "akka-cluster" % akkaVersion, - "com.typesafe.akka" %% "akka-distributed-data-experimental" % akkaVersion, - "com.typesafe.akka" %% "akka-multi-node-testkit" % akkaVersion, - "org.scalatest" %% "scalatest" % "2.2.1" % "test"), - javaOptions in run ++= Seq( - "-Xms128m", "-Xmx1024m"), - Keys.fork in run := true, - // make sure that MultiJvm test are compiled by the default test compilation - compile in MultiJvm <<= (compile in MultiJvm) triggeredBy (compile in Test), - // disable parallel tests - parallelExecution in Test := false, - // make sure that MultiJvm tests are executed by the default test target, - // and combine the results from ordinary test and multi-jvm tests - executeTests in Test <<= (executeTests in Test, executeTests in MultiJvm) map { - case (testResults, multiNodeResults) => - val overall = - if (testResults.overall.id < multiNodeResults.overall.id) - multiNodeResults.overall - else - testResults.overall - Tests.Output(overall, - testResults.events ++ multiNodeResults.events, - testResults.summaries ++ multiNodeResults.summaries) - }, - licenses := Seq(("CC0", url("http://creativecommons.org/publicdomain/zero/1.0"))) - ) - .configs (MultiJvm) - - -fork in run := true diff --git a/akka-samples/akka-sample-distributed-data-scala/project/build.properties b/akka-samples/akka-sample-distributed-data-scala/project/build.properties deleted file mode 100644 index 748703f770..0000000000 --- a/akka-samples/akka-sample-distributed-data-scala/project/build.properties +++ /dev/null @@ -1 +0,0 @@ -sbt.version=0.13.7 diff --git a/akka-samples/akka-sample-distributed-data-scala/project/plugins.sbt b/akka-samples/akka-sample-distributed-data-scala/project/plugins.sbt deleted file mode 100644 index c3e7d797de..0000000000 --- a/akka-samples/akka-sample-distributed-data-scala/project/plugins.sbt +++ /dev/null @@ -1,4 +0,0 @@ - -resolvers += Classpaths.typesafeResolver - -addSbtPlugin("com.typesafe.sbt" % "sbt-multi-jvm" % "0.3.8") diff --git a/akka-samples/akka-sample-distributed-data-scala/project/sbt-ui.sbt b/akka-samples/akka-sample-distributed-data-scala/project/sbt-ui.sbt deleted file mode 100644 index 7c28b97b34..0000000000 --- a/akka-samples/akka-sample-distributed-data-scala/project/sbt-ui.sbt +++ /dev/null @@ -1,3 +0,0 @@ -// This plugin represents functionality that is to be added to sbt in the future - -addSbtPlugin("org.scala-sbt" % "sbt-core-next" % "0.1.1") \ No newline at end of file diff --git a/akka-samples/akka-sample-distributed-data-scala/src/main/resources/application.conf b/akka-samples/akka-sample-distributed-data-scala/src/main/resources/application.conf deleted file mode 100644 index b7f09dfe48..0000000000 --- a/akka-samples/akka-sample-distributed-data-scala/src/main/resources/application.conf +++ /dev/null @@ -1,21 +0,0 @@ -akka { - actor { - provider = "cluster" - } - remote { - log-remote-lifecycle-events = off - netty.tcp { - hostname = "127.0.0.1" - port = 0 - } - } - - cluster { - seed-nodes = [ - "akka.tcp://ClusterSystem@127.0.0.1:2551", - "akka.tcp://ClusterSystem@127.0.0.1:2552"] - - auto-down-unreachable-after = 10s - } -} - diff --git a/akka-samples/akka-sample-distributed-data-scala/src/main/scala/sample/distributeddata/ReplicatedCache.scala b/akka-samples/akka-sample-distributed-data-scala/src/main/scala/sample/distributeddata/ReplicatedCache.scala deleted file mode 100644 index 7ff3e9a90a..0000000000 --- a/akka-samples/akka-sample-distributed-data-scala/src/main/scala/sample/distributeddata/ReplicatedCache.scala +++ /dev/null @@ -1,52 +0,0 @@ -package sample.distributeddata - -import akka.actor.Actor -import akka.actor.ActorRef -import akka.actor.Props -import akka.cluster.Cluster -import akka.cluster.ddata.DistributedData -import akka.cluster.ddata.LWWMap -import akka.cluster.ddata.LWWMapKey - -object ReplicatedCache { - import akka.cluster.ddata.Replicator._ - - def props: Props = Props[ReplicatedCache] - - private final case class Request(key: String, replyTo: ActorRef) - - final case class PutInCache(key: String, value: Any) - final case class GetFromCache(key: String) - final case class Cached(key: String, value: Option[Any]) - final case class Evict(key: String) -} - -class ReplicatedCache extends Actor { - import akka.cluster.ddata.Replicator._ - import ReplicatedCache._ - - val replicator = DistributedData(context.system).replicator - implicit val cluster = Cluster(context.system) - - def dataKey(entryKey: String): LWWMapKey[String, Any] = - LWWMapKey("cache-" + math.abs(entryKey.hashCode) % 100) - - def receive = { - case PutInCache(key, value) => - replicator ! Update(dataKey(key), LWWMap(), WriteLocal)(_ + (key -> value)) - case Evict(key) => - replicator ! Update(dataKey(key), LWWMap(), WriteLocal)(_ - key) - case GetFromCache(key) => - replicator ! Get(dataKey(key), ReadLocal, Some(Request(key, sender()))) - case g @ GetSuccess(LWWMapKey(_), Some(Request(key, replyTo))) => - g.get(dataKey(key)).get(key) match { - case Some(value) => replyTo ! Cached(key, Some(value)) - case None => replyTo ! Cached(key, None) - } - - case NotFound(_, Some(Request(key, replyTo))) => - replyTo ! Cached(key, None) - case _: UpdateResponse[_] => // ok - } - -} diff --git a/akka-samples/akka-sample-distributed-data-scala/src/main/scala/sample/distributeddata/ReplicatedMetrics.scala b/akka-samples/akka-sample-distributed-data-scala/src/main/scala/sample/distributeddata/ReplicatedMetrics.scala deleted file mode 100644 index c983b23381..0000000000 --- a/akka-samples/akka-sample-distributed-data-scala/src/main/scala/sample/distributeddata/ReplicatedMetrics.scala +++ /dev/null @@ -1,115 +0,0 @@ -package sample.distributeddata - -import java.lang.management.ManagementFactory -import java.lang.management.MemoryMXBean -import scala.concurrent.duration._ -import scala.concurrent.duration.FiniteDuration -import akka.actor.Actor -import akka.actor.ActorLogging -import akka.actor.Address -import akka.actor.Props -import akka.cluster.Cluster -import akka.cluster.ClusterEvent.InitialStateAsEvents -import akka.cluster.ClusterEvent.MemberRemoved -import akka.cluster.ClusterEvent.MemberUp -import akka.cluster.ddata.DistributedData -import akka.cluster.ddata.LWWMap -import akka.cluster.ddata.LWWMapKey - -object ReplicatedMetrics { - import akka.cluster.ddata.Replicator._ - - def props(measureInterval: FiniteDuration, cleanupInterval: FiniteDuration): Props = - Props(new ReplicatedMetrics(measureInterval, cleanupInterval)) - - def props: Props = props(1.second, 1.minute) - - private case object Tick - private case object Cleanup - - case class UsedHeap(percentPerNode: Map[String, Double]) { - override def toString = - percentPerNode.toSeq.sortBy(_._1).map { - case (key, value) => key + " --> " + value + " %" - }.mkString("\n") - } - - def nodeKey(address: Address): String = address.host.get + ":" + address.port.get - -} - -class ReplicatedMetrics(measureInterval: FiniteDuration, cleanupInterval: FiniteDuration) - extends Actor with ActorLogging { - import akka.cluster.ddata.Replicator._ - import ReplicatedMetrics._ - - val replicator = DistributedData(context.system).replicator - implicit val cluster = Cluster(context.system) - val node = nodeKey(cluster.selfAddress) - - val tickTask = context.system.scheduler.schedule(measureInterval, measureInterval, - self, Tick)(context.dispatcher) - val cleanupTask = context.system.scheduler.schedule(cleanupInterval, cleanupInterval, - self, Cleanup)(context.dispatcher) - val memoryMBean: MemoryMXBean = ManagementFactory.getMemoryMXBean - - val UsedHeapKey = LWWMapKey[String, Long]("usedHeap") - val MaxHeapKey = LWWMapKey[String, Long]("maxHeap") - - replicator ! Subscribe(UsedHeapKey, self) - replicator ! Subscribe(MaxHeapKey, self) - - cluster.subscribe(self, InitialStateAsEvents, classOf[MemberUp], classOf[MemberRemoved]) - - override def postStop(): Unit = { - tickTask.cancel() - cluster.unsubscribe(self) - super.postStop() - } - - var maxHeap = Map.empty[String, Long] - var nodesInCluster = Set.empty[String] - - def receive = { - case Tick => - val heap = memoryMBean.getHeapMemoryUsage - val used = heap.getUsed - val max = heap.getMax - replicator ! Update(UsedHeapKey, LWWMap.empty[String, Long], WriteLocal)(_ + (node -> used)) - replicator ! Update(MaxHeapKey, LWWMap.empty[String, Long], WriteLocal) { data => - data.get(node) match { - case Some(`max`) => data // unchanged - case _ => data + (node -> max) - } - } - - case c @ Changed(MaxHeapKey) => - maxHeap = c.get(MaxHeapKey).entries - - case c @ Changed(UsedHeapKey) => - val usedHeapPercent = UsedHeap(c.get(UsedHeapKey).entries.collect { - case (key, value) if maxHeap.contains(key) => - (key -> (value.toDouble / maxHeap(key)) * 100.0) - }) - log.debug("Node {} observed:\n{}", node, usedHeapPercent) - context.system.eventStream.publish(usedHeapPercent) - - case _: UpdateResponse[_] => // ok - - case MemberUp(m) => - nodesInCluster += nodeKey(m.address) - - case MemberRemoved(m, _) => - nodesInCluster -= nodeKey(m.address) - if (m.address == cluster.selfAddress) - context.stop(self) - - case Cleanup => - def cleanupRemoved(data: LWWMap[String, Long]): LWWMap[String, Long] = - (data.entries.keySet -- nodesInCluster).foldLeft(data) { case (d, key) => d - key } - - replicator ! Update(UsedHeapKey, LWWMap.empty[String, Long], WriteLocal)(cleanupRemoved) - replicator ! Update(MaxHeapKey, LWWMap.empty[String, Long], WriteLocal)(cleanupRemoved) - } - -} diff --git a/akka-samples/akka-sample-distributed-data-scala/src/main/scala/sample/distributeddata/ServiceRegistry.scala b/akka-samples/akka-sample-distributed-data-scala/src/main/scala/sample/distributeddata/ServiceRegistry.scala deleted file mode 100644 index dfa3722257..0000000000 --- a/akka-samples/akka-sample-distributed-data-scala/src/main/scala/sample/distributeddata/ServiceRegistry.scala +++ /dev/null @@ -1,127 +0,0 @@ -package sample.distributeddata - -import akka.actor.Actor -import akka.actor.ActorLogging -import akka.actor.ActorRef -import akka.actor.Props -import akka.actor.Terminated -import akka.cluster.Cluster -import akka.cluster.ClusterEvent -import akka.cluster.ClusterEvent.LeaderChanged -import akka.cluster.ddata.DistributedData -import akka.cluster.ddata.GSet -import akka.cluster.ddata.GSetKey -import akka.cluster.ddata.Key -import akka.cluster.ddata.ORSet - -object ServiceRegistry { - import akka.cluster.ddata.Replicator._ - - val props: Props = Props[ServiceRegistry] - - /** - * Register a `service` with a `name`. Several services - * can be registered with the same `name`. - * It will be removed when it is terminated. - */ - final case class Register(name: String, service: ActorRef) - /** - * Lookup services registered for a `name`. [[Bindings]] will - * be sent to `sender()`. - */ - final case class Lookup(name: String) - /** - * Reply for [[Lookup]] - */ - final case class Bindings(name: String, services: Set[ActorRef]) - /** - * Published to `ActorSystem.eventStream` when services are changed. - */ - final case class BindingChanged(name: String, services: Set[ActorRef]) - - final case class ServiceKey(serviceName: String) extends Key[ORSet[ActorRef]](serviceName) - - private val AllServicesKey = GSetKey[ServiceKey]("service-keys") - -} - -class ServiceRegistry extends Actor with ActorLogging { - import akka.cluster.ddata.Replicator._ - import ServiceRegistry._ - - val replicator = DistributedData(context.system).replicator - implicit val cluster = Cluster(context.system) - - var keys = Set.empty[ServiceKey] - var services = Map.empty[String, Set[ActorRef]] - var leader = false - - def serviceKey(serviceName: String): ServiceKey = - ServiceKey("service:" + serviceName) - - override def preStart(): Unit = { - replicator ! Subscribe(AllServicesKey, self) - cluster.subscribe(self, ClusterEvent.InitialStateAsEvents, classOf[ClusterEvent.LeaderChanged]) - } - - override def postStop(): Unit = { - cluster.unsubscribe(self) - } - - def receive = { - case Register(name, service) => - val dKey = serviceKey(name) - // store the service names in a separate GSet to be able to - // get notifications of new names - if (!keys(dKey)) - replicator ! Update(AllServicesKey, GSet(), WriteLocal)(_ + dKey) - // add the service - replicator ! Update(dKey, ORSet(), WriteLocal)(_ + service) - - case Lookup(name) => - sender() ! Bindings(name, services.getOrElse(name, Set.empty)) - - case c @ Changed(AllServicesKey) => - val newKeys = c.get(AllServicesKey).elements - log.debug("Services changed, added: {}, all: {}", (newKeys -- keys), newKeys) - (newKeys -- keys).foreach { dKey => - // subscribe to get notifications of when services with this name are added or removed - replicator ! Subscribe(dKey, self) - } - keys = newKeys - - case c @ Changed(ServiceKey(serviceName)) => - val name = serviceName.split(":").tail.mkString - val newServices = c.get(serviceKey(name)).elements - log.debug("Services changed for name [{}]: {}", name, newServices) - services = services.updated(name, newServices) - context.system.eventStream.publish(BindingChanged(name, newServices)) - if (leader) - newServices.foreach(context.watch) // watch is idempotent - - case LeaderChanged(node) => - // Let one node (the leader) be responsible for removal of terminated services - // to avoid redundant work and too many death watch notifications. - // It is not critical to only do it from one node. - val wasLeader = leader - leader = node.exists(_ == cluster.selfAddress) - // when used with many (> 500) services you must increase the system message buffer - // `akka.remote.system-message-buffer-size` - if (!wasLeader && leader) - for (refs ← services.valuesIterator; ref ← refs) - context.watch(ref) - else if (wasLeader && !leader) - for (refs ← services.valuesIterator; ref ← refs) - context.unwatch(ref) - - case Terminated(ref) => - val names = services.collect { case (name, refs) if refs.contains(ref) => name } - names.foreach { name => - log.debug("Service with name [{}] terminated: {}", name, ref) - replicator ! Update(serviceKey(name), ORSet(), WriteLocal)(_ - ref) - } - - case _: UpdateResponse[_] => // ok - } - -} diff --git a/akka-samples/akka-sample-distributed-data-scala/src/main/scala/sample/distributeddata/VotingService.scala b/akka-samples/akka-sample-distributed-data-scala/src/main/scala/sample/distributeddata/VotingService.scala deleted file mode 100644 index e98b932e55..0000000000 --- a/akka-samples/akka-sample-distributed-data-scala/src/main/scala/sample/distributeddata/VotingService.scala +++ /dev/null @@ -1,88 +0,0 @@ -package sample.distributeddata - -import scala.concurrent.duration._ -import akka.cluster.ddata.DistributedData -import akka.cluster.ddata.FlagKey -import akka.actor.Actor -import akka.cluster.ddata.PNCounterMapKey -import akka.actor.ActorRef -import akka.cluster.Cluster -import akka.cluster.ddata.PNCounterMap -import akka.cluster.ddata.Flag - -object VotingService { - case object Open - case object OpenAck - case object Close - case object CloseAck - final case class Vote(participant: String) - case object GetVotes - final case class Votes(result: Map[String, BigInt], open: Boolean) - - private final case class GetVotesReq(replyTo: ActorRef) -} - -class VotingService extends Actor { - import akka.cluster.ddata.Replicator._ - import VotingService._ - - val replicator = DistributedData(context.system).replicator - implicit val cluster = Cluster(context.system) - val OpenedKey = FlagKey("contestOpened") - val ClosedKey = FlagKey("contestClosed") - val CountersKey = PNCounterMapKey[String]("contestCounters") - - replicator ! Subscribe(OpenedKey, self) - - def receive = { - case Open => - replicator ! Update(OpenedKey, Flag(), WriteAll(5.seconds))(_.switchOn) - becomeOpen() - - case c @ Changed(OpenedKey) if c.get(OpenedKey).enabled => - becomeOpen() - - case GetVotes => - sender() ! Votes(Map.empty, open = false) - } - - def becomeOpen(): Unit = { - replicator ! Unsubscribe(OpenedKey, self) - replicator ! Subscribe(ClosedKey, self) - context.become(open orElse getVotes(open = true)) - } - - def open: Receive = { - case v @ Vote(participant) => - val update = Update(CountersKey, PNCounterMap[String](), WriteLocal, request = Some(v)) { - _.increment(participant, 1) - } - replicator ! update - - case _: UpdateSuccess[_] => - - case Close => - replicator ! Update(ClosedKey, Flag(), WriteAll(5.seconds))(_.switchOn) - context.become(getVotes(open = false)) - - case c @ Changed(ClosedKey) if c.get(ClosedKey).enabled => - context.become(getVotes(open = false)) - } - - def getVotes(open: Boolean): Receive = { - case GetVotes => - replicator ! Get(CountersKey, ReadAll(3.seconds), Some(GetVotesReq(sender()))) - - case g @ GetSuccess(CountersKey, Some(GetVotesReq(replyTo))) => - val data = g.get(CountersKey) - replyTo ! Votes(data.entries, open) - - case NotFound(CountersKey, Some(GetVotesReq(replyTo))) => - replyTo ! Votes(Map.empty, open) - - case _: GetFailure[_] => - - case _: UpdateSuccess[_] => - } - -} diff --git a/akka-samples/akka-sample-distributed-data-scala/src/multi-jvm/scala/sample/distributeddata/ReplicatedCacheSpec.scala b/akka-samples/akka-sample-distributed-data-scala/src/multi-jvm/scala/sample/distributeddata/ReplicatedCacheSpec.scala deleted file mode 100644 index 6a61450fc3..0000000000 --- a/akka-samples/akka-sample-distributed-data-scala/src/multi-jvm/scala/sample/distributeddata/ReplicatedCacheSpec.scala +++ /dev/null @@ -1,134 +0,0 @@ -package sample.distributeddata - -import scala.concurrent.duration._ -import akka.cluster.Cluster -import akka.cluster.ddata.DistributedData -import akka.cluster.ddata.Replicator.GetReplicaCount -import akka.cluster.ddata.Replicator.ReplicaCount -import akka.remote.testconductor.RoleName -import akka.remote.testkit.MultiNodeConfig -import akka.remote.testkit.MultiNodeSpec -import akka.testkit._ -import com.typesafe.config.ConfigFactory - -object ReplicatedCacheSpec extends MultiNodeConfig { - val node1 = role("node-1") - val node2 = role("node-2") - val node3 = role("node-3") - - commonConfig(ConfigFactory.parseString(""" - akka.loglevel = INFO - akka.actor.provider = "cluster" - akka.log-dead-letters-during-shutdown = off - """)) - -} - -class ReplicatedCacheSpecMultiJvmNode1 extends ReplicatedCacheSpec -class ReplicatedCacheSpecMultiJvmNode2 extends ReplicatedCacheSpec -class ReplicatedCacheSpecMultiJvmNode3 extends ReplicatedCacheSpec - -class ReplicatedCacheSpec extends MultiNodeSpec(ReplicatedCacheSpec) with STMultiNodeSpec with ImplicitSender { - import ReplicatedCacheSpec._ - import ReplicatedCache._ - - override def initialParticipants = roles.size - - val cluster = Cluster(system) - val replicatedCache = system.actorOf(ReplicatedCache.props) - - def join(from: RoleName, to: RoleName): Unit = { - runOn(from) { - cluster join node(to).address - } - enterBarrier(from.name + "-joined") - } - - "Demo of a replicated cache" must { - "join cluster" in within(20.seconds) { - join(node1, node1) - join(node2, node1) - join(node3, node1) - - awaitAssert { - DistributedData(system).replicator ! GetReplicaCount - expectMsg(ReplicaCount(roles.size)) - } - enterBarrier("after-1") - } - - "replicate cached entry" in within(10.seconds) { - runOn(node1) { - replicatedCache ! PutInCache("key1", "A") - } - - awaitAssert { - val probe = TestProbe() - replicatedCache.tell(GetFromCache("key1"), probe.ref) - probe.expectMsg(Cached("key1", Some("A"))) - } - - enterBarrier("after-2") - } - - "replicate many cached entries" in within(10.seconds) { - runOn(node1) { - for (i ← 100 to 200) - replicatedCache ! PutInCache("key" + i, i) - } - - awaitAssert { - val probe = TestProbe() - for (i ← 100 to 200) { - replicatedCache.tell(GetFromCache("key" + i), probe.ref) - probe.expectMsg(Cached("key" + i, Some(i))) - } - } - - enterBarrier("after-3") - } - - "replicate evicted entry" in within(15.seconds) { - runOn(node1) { - replicatedCache ! PutInCache("key2", "B") - } - - awaitAssert { - val probe = TestProbe() - replicatedCache.tell(GetFromCache("key2"), probe.ref) - probe.expectMsg(Cached("key2", Some("B"))) - } - enterBarrier("key2-replicated") - - runOn(node3) { - replicatedCache ! Evict("key2") - } - - awaitAssert { - val probe = TestProbe() - replicatedCache.tell(GetFromCache("key2"), probe.ref) - probe.expectMsg(Cached("key2", None)) - } - - enterBarrier("after-4") - } - - "replicate updated cached entry" in within(10.seconds) { - runOn(node2) { - replicatedCache ! PutInCache("key1", "A2") - replicatedCache ! PutInCache("key1", "A3") - } - - awaitAssert { - val probe = TestProbe() - replicatedCache.tell(GetFromCache("key1"), probe.ref) - probe.expectMsg(Cached("key1", Some("A3"))) - } - - enterBarrier("after-5") - } - - } - -} - diff --git a/akka-samples/akka-sample-distributed-data-scala/src/multi-jvm/scala/sample/distributeddata/ReplicatedMetricsSpec.scala b/akka-samples/akka-sample-distributed-data-scala/src/multi-jvm/scala/sample/distributeddata/ReplicatedMetricsSpec.scala deleted file mode 100644 index e590c0cf12..0000000000 --- a/akka-samples/akka-sample-distributed-data-scala/src/multi-jvm/scala/sample/distributeddata/ReplicatedMetricsSpec.scala +++ /dev/null @@ -1,91 +0,0 @@ -package sample.distributeddata - -import scala.concurrent.duration._ -import akka.cluster.Cluster -import akka.cluster.ddata.DistributedData -import akka.cluster.ddata.Replicator.GetReplicaCount -import akka.cluster.ddata.Replicator.ReplicaCount -import akka.remote.testconductor.RoleName -import akka.remote.testkit.MultiNodeConfig -import akka.remote.testkit.MultiNodeSpec -import akka.testkit._ -import com.typesafe.config.ConfigFactory - -object ReplicatedMetricsSpec extends MultiNodeConfig { - val node1 = role("node-1") - val node2 = role("node-2") - val node3 = role("node-3") - - commonConfig(ConfigFactory.parseString(""" - akka.loglevel = INFO - akka.actor.provider = "cluster" - akka.log-dead-letters-during-shutdown = off - """)) - -} - -class ReplicatedMetricsSpecMultiJvmNode1 extends ReplicatedMetricsSpec -class ReplicatedMetricsSpecMultiJvmNode2 extends ReplicatedMetricsSpec -class ReplicatedMetricsSpecMultiJvmNode3 extends ReplicatedMetricsSpec - -class ReplicatedMetricsSpec extends MultiNodeSpec(ReplicatedMetricsSpec) with STMultiNodeSpec with ImplicitSender { - import ReplicatedMetricsSpec._ - import ReplicatedMetrics._ - - override def initialParticipants = roles.size - - val cluster = Cluster(system) - val replicatedMetrics = system.actorOf(ReplicatedMetrics.props(1.second, 3.seconds)) - - def join(from: RoleName, to: RoleName): Unit = { - runOn(from) { - cluster join node(to).address - } - enterBarrier(from.name + "-joined") - } - - "Demo of a replicated metrics" must { - "join cluster" in within(20.seconds) { - join(node1, node1) - join(node2, node1) - join(node3, node1) - - awaitAssert { - DistributedData(system).replicator ! GetReplicaCount - expectMsg(ReplicaCount(roles.size)) - } - enterBarrier("after-1") - } - - "replicate metrics" in within(10.seconds) { - val probe = TestProbe() - system.eventStream.subscribe(probe.ref, classOf[UsedHeap]) - awaitAssert { - probe.expectMsgType[UsedHeap](1.second).percentPerNode.size should be(3) - } - probe.expectMsgType[UsedHeap].percentPerNode.size should be(3) - probe.expectMsgType[UsedHeap].percentPerNode.size should be(3) - enterBarrier("after-2") - } - - "cleanup removed node" in within(25.seconds) { - val node3Address = node(node3).address - runOn(node1) { - cluster.leave(node3Address) - } - runOn(node1, node2) { - val probe = TestProbe() - system.eventStream.subscribe(probe.ref, classOf[UsedHeap]) - awaitAssert { - probe.expectMsgType[UsedHeap](1.second).percentPerNode.size should be(2) - } - probe.expectMsgType[UsedHeap].percentPerNode should not contain ( - nodeKey(node3Address)) - } - enterBarrier("after-3") - } - - } - -} - diff --git a/akka-samples/akka-sample-distributed-data-scala/src/multi-jvm/scala/sample/distributeddata/STMultiNodeSpec.scala b/akka-samples/akka-sample-distributed-data-scala/src/multi-jvm/scala/sample/distributeddata/STMultiNodeSpec.scala deleted file mode 100644 index 0daad1df58..0000000000 --- a/akka-samples/akka-sample-distributed-data-scala/src/multi-jvm/scala/sample/distributeddata/STMultiNodeSpec.scala +++ /dev/null @@ -1,17 +0,0 @@ -package sample.distributeddata - -import akka.remote.testkit.MultiNodeSpecCallbacks - -import org.scalatest.{ BeforeAndAfterAll, WordSpecLike } -import org.scalatest.Matchers - -/** - * Hooks up MultiNodeSpec with ScalaTest - */ -trait STMultiNodeSpec extends MultiNodeSpecCallbacks - with WordSpecLike with Matchers with BeforeAndAfterAll { - - override def beforeAll() = multiNodeSpecBeforeAll() - - override def afterAll() = multiNodeSpecAfterAll() -} diff --git a/akka-samples/akka-sample-distributed-data-scala/src/multi-jvm/scala/sample/distributeddata/ServiceRegistrySpec.scala b/akka-samples/akka-sample-distributed-data-scala/src/multi-jvm/scala/sample/distributeddata/ServiceRegistrySpec.scala deleted file mode 100644 index dcecbf68c7..0000000000 --- a/akka-samples/akka-sample-distributed-data-scala/src/multi-jvm/scala/sample/distributeddata/ServiceRegistrySpec.scala +++ /dev/null @@ -1,141 +0,0 @@ -package sample.distributeddata - -import scala.concurrent.duration._ -import akka.actor.Actor -import akka.actor.PoisonPill -import akka.actor.Props -import akka.cluster.Cluster -import akka.cluster.ddata.DistributedData -import akka.cluster.ddata.Replicator.GetReplicaCount -import akka.cluster.ddata.Replicator.ReplicaCount -import akka.remote.testconductor.RoleName -import akka.remote.testkit.MultiNodeConfig -import akka.remote.testkit.MultiNodeSpec -import akka.testkit._ -import com.typesafe.config.ConfigFactory - -object ServiceRegistrySpec extends MultiNodeConfig { - val node1 = role("node-1") - val node2 = role("node-2") - val node3 = role("node-3") - - commonConfig(ConfigFactory.parseString(""" - akka.loglevel = INFO - akka.actor.provider = "cluster" - akka.log-dead-letters-during-shutdown = off - """)) - - class Service extends Actor { - def receive = { - case s: String => sender() ! self.path.name + ": " + s - } - } - -} - -class ServiceRegistrySpecMultiJvmNode1 extends ServiceRegistrySpec -class ServiceRegistrySpecMultiJvmNode2 extends ServiceRegistrySpec -class ServiceRegistrySpecMultiJvmNode3 extends ServiceRegistrySpec - -class ServiceRegistrySpec extends MultiNodeSpec(ServiceRegistrySpec) with STMultiNodeSpec with ImplicitSender { - import ServiceRegistrySpec._ - import ServiceRegistry._ - - override def initialParticipants = roles.size - - val cluster = Cluster(system) - val registry = system.actorOf(ServiceRegistry.props) - - def join(from: RoleName, to: RoleName): Unit = { - runOn(from) { - cluster join node(to).address - } - enterBarrier(from.name + "-joined") - } - - "Demo of a replicated service registry" must { - "join cluster" in within(20.seconds) { - join(node1, node1) - join(node2, node1) - join(node3, node1) - - awaitAssert { - DistributedData(system).replicator ! GetReplicaCount - expectMsg(ReplicaCount(roles.size)) - } - enterBarrier("after-1") - } - - "replicate service entry" in within(10.seconds) { - runOn(node1) { - val a1 = system.actorOf(Props[Service], name = "a1") - registry ! Register("a", a1) - } - - awaitAssert { - val probe = TestProbe() - registry.tell(Lookup("a"), probe.ref) - probe.expectMsgType[Bindings].services.map(_.path.name) should be(Set("a1")) - } - - enterBarrier("after-2") - } - - "replicate updated service entry, and publish to even bus" in { - val probe = TestProbe() - system.eventStream.subscribe(probe.ref, classOf[BindingChanged]) - - runOn(node2) { - val a2 = system.actorOf(Props[Service], name = "a2") - registry ! Register("a", a2) - } - - probe.within(10.seconds) { - probe.expectMsgType[BindingChanged].services.map(_.path.name) should be(Set("a1", "a2")) - registry.tell(Lookup("a"), probe.ref) - probe.expectMsgType[Bindings].services.map(_.path.name) should be(Set("a1", "a2")) - } - - enterBarrier("after-4") - } - - "remove terminated service" in { - val probe = TestProbe() - system.eventStream.subscribe(probe.ref, classOf[BindingChanged]) - - runOn(node2) { - registry.tell(Lookup("a"), probe.ref) - val a2 = probe.expectMsgType[Bindings].services.find(_.path.name == "a2").get - a2 ! PoisonPill - } - - probe.within(10.seconds) { - probe.expectMsgType[BindingChanged].services.map(_.path.name) should be(Set("a1")) - registry.tell(Lookup("a"), probe.ref) - probe.expectMsgType[Bindings].services.map(_.path.name) should be(Set("a1")) - } - - enterBarrier("after-5") - } - - "replicate many service entries" in within(10.seconds) { - for (i ← 100 until 200) { - val service = system.actorOf(Props[Service], name = myself.name + "_" + i) - registry ! Register("a" + i, service) - } - - awaitAssert { - val probe = TestProbe() - for (i ← 100 until 200) { - registry.tell(Lookup("a" + i), probe.ref) - probe.expectMsgType[Bindings].services.map(_.path.name) should be(roles.map(_.name + "_" + i).toSet) - } - } - - enterBarrier("after-6") - } - - } - -} - diff --git a/akka-samples/akka-sample-distributed-data-scala/src/multi-jvm/scala/sample/distributeddata/ShoppingCartSpec.scala b/akka-samples/akka-sample-distributed-data-scala/src/multi-jvm/scala/sample/distributeddata/ShoppingCartSpec.scala deleted file mode 100644 index 81f64f8dff..0000000000 --- a/akka-samples/akka-sample-distributed-data-scala/src/multi-jvm/scala/sample/distributeddata/ShoppingCartSpec.scala +++ /dev/null @@ -1,98 +0,0 @@ -package sample.distributeddata - -import scala.concurrent.duration._ -import akka.cluster.Cluster -import akka.cluster.ddata.DistributedData -import akka.cluster.ddata.Replicator.GetReplicaCount -import akka.cluster.ddata.Replicator.ReplicaCount -import akka.remote.testconductor.RoleName -import akka.remote.testkit.MultiNodeConfig -import akka.remote.testkit.MultiNodeSpec -import akka.testkit._ -import com.typesafe.config.ConfigFactory - -object ShoppingCartSpec extends MultiNodeConfig { - val node1 = role("node-1") - val node2 = role("node-2") - val node3 = role("node-3") - - commonConfig(ConfigFactory.parseString(""" - akka.loglevel = INFO - akka.actor.provider = "cluster" - akka.log-dead-letters-during-shutdown = off - """)) - -} - -class ShoppingCartSpecMultiJvmNode1 extends ShoppingCartSpec -class ShoppingCartSpecMultiJvmNode2 extends ShoppingCartSpec -class ShoppingCartSpecMultiJvmNode3 extends ShoppingCartSpec - -class ShoppingCartSpec extends MultiNodeSpec(ShoppingCartSpec) with STMultiNodeSpec with ImplicitSender { - import ShoppingCartSpec._ - import ShoppingCart._ - - override def initialParticipants = roles.size - - val cluster = Cluster(system) - val shoppingCart = system.actorOf(ShoppingCart.props("user-1")) - - def join(from: RoleName, to: RoleName): Unit = { - runOn(from) { - cluster join node(to).address - } - enterBarrier(from.name + "-joined") - } - - "Demo of a replicated shopping cart" must { - "join cluster" in within(20.seconds) { - join(node1, node1) - join(node2, node1) - join(node3, node1) - - awaitAssert { - DistributedData(system).replicator ! GetReplicaCount - expectMsg(ReplicaCount(roles.size)) - } - enterBarrier("after-1") - } - - "handle updates directly after start" in within(15.seconds) { - runOn(node2) { - shoppingCart ! ShoppingCart.AddItem(LineItem("1", "Apples", quantity = 2)) - shoppingCart ! ShoppingCart.AddItem(LineItem("2", "Oranges", quantity = 3)) - } - enterBarrier("updates-done") - - awaitAssert { - shoppingCart ! ShoppingCart.GetCart - val cart = expectMsgType[Cart] - cart.items should be(Set(LineItem("1", "Apples", quantity = 2), LineItem("2", "Oranges", quantity = 3))) - } - - enterBarrier("after-2") - } - - "handle updates from different nodes" in within(5.seconds) { - runOn(node2) { - shoppingCart ! ShoppingCart.AddItem(LineItem("1", "Apples", quantity = 5)) - shoppingCart ! ShoppingCart.RemoveItem("2") - } - runOn(node3) { - shoppingCart ! ShoppingCart.AddItem(LineItem("3", "Bananas", quantity = 4)) - } - enterBarrier("updates-done") - - awaitAssert { - shoppingCart ! ShoppingCart.GetCart - val cart = expectMsgType[Cart] - cart.items should be(Set(LineItem("1", "Apples", quantity = 7), LineItem("3", "Bananas", quantity = 4))) - } - - enterBarrier("after-3") - } - - } - -} - diff --git a/akka-samples/akka-sample-distributed-data-scala/src/multi-jvm/scala/sample/distributeddata/VotingServiceSpec.scala b/akka-samples/akka-sample-distributed-data-scala/src/multi-jvm/scala/sample/distributeddata/VotingServiceSpec.scala deleted file mode 100644 index 3b0a22894e..0000000000 --- a/akka-samples/akka-sample-distributed-data-scala/src/multi-jvm/scala/sample/distributeddata/VotingServiceSpec.scala +++ /dev/null @@ -1,97 +0,0 @@ -package sample.distributeddata - -import scala.concurrent.duration._ -import akka.actor.Props -import akka.cluster.Cluster -import akka.cluster.ddata.DistributedData -import akka.cluster.ddata.Replicator.GetReplicaCount -import akka.cluster.ddata.Replicator.ReplicaCount -import akka.remote.testconductor.RoleName -import akka.remote.testkit.MultiNodeConfig -import akka.remote.testkit.MultiNodeSpec -import akka.testkit._ -import com.typesafe.config.ConfigFactory - -object VotingServiceSpec extends MultiNodeConfig { - val node1 = role("node-1") - val node2 = role("node-2") - val node3 = role("node-3") - - commonConfig(ConfigFactory.parseString(""" - akka.loglevel = INFO - akka.actor.provider = "cluster" - akka.log-dead-letters-during-shutdown = off - """)) - -} - -class VotingServiceSpecMultiJvmNode1 extends VotingServiceSpec -class VotingServiceSpecMultiJvmNode2 extends VotingServiceSpec -class VotingServiceSpecMultiJvmNode3 extends VotingServiceSpec - -class VotingServiceSpec extends MultiNodeSpec(VotingServiceSpec) with STMultiNodeSpec with ImplicitSender { - import VotingServiceSpec._ - - override def initialParticipants = roles.size - - val cluster = Cluster(system) - - def join(from: RoleName, to: RoleName): Unit = { - runOn(from) { - cluster join node(to).address - } - enterBarrier(from.name + "-joined") - } - - "Demo of a replicated voting" must { - - "join cluster" in within(20.seconds) { - join(node1, node1) - join(node2, node1) - join(node3, node1) - - awaitAssert { - DistributedData(system).replicator ! GetReplicaCount - expectMsg(ReplicaCount(roles.size)) - } - enterBarrier("after-1") - } - - "count votes correctly" in within(15.seconds) { - import VotingService._ - val votingService = system.actorOf(Props[VotingService], "votingService") - val N = 1000 - runOn(node1) { - votingService ! Open - for (n ← 1 to N) { - votingService ! Vote("#" + ((n % 20) + 1)) - } - } - runOn(node2, node3) { - // wait for it to open - val p = TestProbe() - awaitAssert { - votingService.tell(GetVotes, p.ref) - p.expectMsgPF(3.seconds) { case Votes(_, true) => true } - } - for (n ← 1 to N) { - votingService ! Vote("#" + ((n % 20) + 1)) - } - } - enterBarrier("voting-done") - runOn(node3) { - votingService ! Close - } - - val expected = (1 to 20).map(n => "#" + n -> BigInt(3L * N / 20)).toMap - awaitAssert { - votingService ! GetVotes - expectMsg(3.seconds, Votes(expected, false)) - } - - enterBarrier("after-2") - } - } - -} - diff --git a/akka-samples/akka-sample-distributed-data-scala/src/test/resources/reference.conf b/akka-samples/akka-sample-distributed-data-scala/src/test/resources/reference.conf deleted file mode 100644 index 90492329b7..0000000000 --- a/akka-samples/akka-sample-distributed-data-scala/src/test/resources/reference.conf +++ /dev/null @@ -1,4 +0,0 @@ -# Don't terminate ActorSystem in tests -akka.coordinated-shutdown.run-by-jvm-shutdown-hook = off -akka.coordinated-shutdown.terminate-actor-system = off -akka.cluster.run-coordinated-shutdown-when-down = off diff --git a/akka-samples/akka-sample-distributed-data-scala/tutorial/index.html b/akka-samples/akka-sample-distributed-data-scala/tutorial/index.html deleted file mode 100644 index 1fb6df6200..0000000000 --- a/akka-samples/akka-sample-distributed-data-scala/tutorial/index.html +++ /dev/null @@ -1,292 +0,0 @@ - - -Akka Distributed Data Samples with Scala - - - - -
-

-This tutorial contains 5 samples illustrating how to use -Akka Distributed Data. -

-
    -
  • Low Latency Voting Service
  • -
  • Highly Available Shopping Cart
  • -
  • Distributed Service Registry
  • -
  • Replicated Cache
  • -
  • Replicated Metrics
  • -
- -

-Akka Distributed Data is useful when you need to share data between nodes in an -Akka Cluster. The data is accessed with an actor providing a key-value store like API. -The keys are unique identifiers with type information of the data values. The values -are Conflict Free Replicated Data Types (CRDTs). -

- -

-All data entries are spread to all nodes, or nodes with a certain role, in the cluster -via direct replication and gossip based dissemination. You have fine grained control -of the consistency level for reads and writes. -

- -

-The nature CRDTs makes it possible to perform updates from any node without coordination. -Concurrent updates from different nodes will automatically be resolved by the monotonic -merge function, which all data types must provide. The state changes always converge. -Several useful data types for counters, sets, maps and registers are provided and -you can also implement your own custom data types. -

- -

-It is eventually consistent and geared toward providing high read and write availability -(partition tolerance), with low latency. Note that in an eventually consistent system a read may return an -out-of-date value. -

- -

-Note that there are some -Limitations -that you should be aware of. For example, Akka Distributed Data is not intended for Big Data. -

- -
- -
- -

Low Latency Voting Service

- -

-Distributed Data is great for low latency services, since you can update or get data from the local replica -without immediate communication with other nodes. -

- -

-Open VotingService.scala. -

- -

-VotingService is an actor for low latency counting of votes on several cluster nodes and aggregation -of the grand total number of votes. The actor is started on each cluster node. First it expects an -Open message on one or several nodes. After that the counting can begin. The open -signal is immediately replicated to all nodes with a boolean -Flag. -Note WriteAll. -

- -

-replicator ! Update(OpenedKey, Flag(), WriteAll(5.seconds))(_.switchOn)
-
- -

-The actor is subscribing to changes of the OpenedKey and other instances of this actor, -also on other nodes, will be notified when the flag is changed. -

- -

-replicator ! Subscribe(OpenedKey, self)
-
- -

-case c @ Changed(OpenedKey) if c.get(OpenedKey).enabled
-
- -

-The counters are kept in a -PNCounterMap -and updated with: -

- -

-val update = Update(CountersKey, PNCounterMap(), WriteLocal, request = Some(v)) {
-  _.increment(participant, 1)
-}
-replicator ! update
-
- -

-Incrementing the counter is very fast, since it only involves communication with the local -Replicator actor. Note WriteLocal. Those updates are also spread -to other nodes, but that is performed in the background. -

- -

-The total number of votes is retrieved with: -

- -

-case GetVotes ⇒
-  replicator ! Get(CountersKey, ReadAll(3.seconds), Some(GetVotesReq(sender())))
-
-case g @ GetSuccess(CountersKey, Some(GetVotesReq(replyTo))) ⇒
-  val data = g.get(CountersKey)
-  replyTo ! Votes(data.entries, open)
-
- -

-The multi-node test for the VotingService can be found in -VotingServiceSpec.scala. -

- -

-Read the -Using the Replicator -documentation for more details of how to use Get, Update, and Subscribe. -

- -
- -
-

Highly Available Shopping Cart

- -

-Distributed Data is great for highly available services, since it is possible to perform -updates to the local node (or currently available nodes) during a network partition. -

- -

-Open ShoppingCart.scala. -

- -

-ShoppingCart is an actor that holds the selected items to buy for a user. -The actor instance for a specific user may be started where ever needed in the cluster, i.e. several -instances may be started on different nodes and used at the same time. -

- -

-Each product in the cart is represented by a LineItem and all items in the cart -is collected in a LWWMap. -

- -

-The actor handles the commands GetCart, AddItem and RemoveItem. -To get the latest updates in case the same shopping cart is used from several nodes it is using -consistency level of ReadMajority and WriteMajority, but that is only -done to reduce the risk of seeing old data. If such reads and writes cannot be completed due to a -network partition it falls back to reading/writing from the local replica (see GetFailure). -Local reads and writes will always be successful and when the network partition heals the updated -shopping carts will be be disseminated by the -gossip protocol -and the LWWMap CRDTs are merged, i.e. it is a highly available shopping cart. -

- -

-The multi-node test for the ShoppingCart can be found in -ShoppingCartSpec.scala. -

- -

-Read the -Consistency -section in the documentation to understand the consistency considerations. -

- -
- -
-

Distributed Service Registry

- -

-Have you ever had the need to lookup actors by name in an Akka Cluster? -This example illustrates how you could implement such a registry. It is probably not -feature complete, but should be a good starting point. -

- -

-Open ServiceRegistry.scala. -

- -

-ServiceRegistry is an actor that is started on each node in the cluster. -It supports two basic commands: -

-
    -
  • Register to bind an ActorRef to a name, - several actors can be bound to the same name
  • -
  • Lookup get currently bound services of a given name
  • -
- -

-For each named service it is using an -ORSet. -Here we are using top level ORSet entries. An alternative would have been to use a -ORMultiMap holding all services. That would have a disadvantage if we have many services. -When a data entry is changed the full state of that entry is replicated to other nodes, i.e. when you -update a map the whole map is replicated. -

- -

-The ServiceRegistry is subscribing to changes of a GSet where we add -the names of all services. It is also subscribing to all such service keys to get notifications when -actors are added or removed to a named service. -

- -

-The multi-node test for the ServiceRegistry can be found in -ServiceRegistrySpec.scala. -

- -
- -
-

Replicated Cache

- -

-This example illustrates a simple key-value cache. -

- -

-Open ReplicatedCache.scala. -

- -

-ReplicatedCache is an actor that is started on each node in the cluster. -It supports three commands: PutInCache, GetFromCache and Evict. -

- -

-It is splitting up the key space in 100 top level keys, each with a LWWMap. -When a data entry is changed the full state of that entry is replicated to other nodes, i.e. when you -update a map the whole map is replicated. Therefore, instead of using one ORMap with 1000 elements it -is more efficient to split that up in 100 top level ORMap entries with 10 elements each. Top level -entries are replicated individually, which has the trade-off that different entries may not be -replicated at the same time and you may see inconsistencies between related entries. -Separate top level entries cannot be updated atomically together. -

- -

-The multi-node test for the ReplicatedCache can be found in -ReplicatedCacheSpec.scala. -

- -
- -
-

Replicated Metrics

- -

-This example illustrates to spread metrics data to all nodes in an Akka cluster. -

- -

-Open ReplicatedMetrics.scala. -

- -

-ReplicatedMetrics is an actor that is started on each node in the cluster. -Periodically it collects some metrics, in this case used and max heap size. -Each metrics type is stored in a LWWMap where the key in the map is the address of -the node. The values are disseminated to other nodes with the gossip protocol. -

- -

-The multi-node test for the ReplicatedCache can be found in -ReplicatedMetricsSpec.scala. -

- -
- - - diff --git a/akka-samples/akka-sample-fsm-java-lambda/COPYING b/akka-samples/akka-sample-fsm-java-lambda/COPYING deleted file mode 100644 index 0e259d42c9..0000000000 --- a/akka-samples/akka-sample-fsm-java-lambda/COPYING +++ /dev/null @@ -1,121 +0,0 @@ -Creative Commons Legal Code - -CC0 1.0 Universal - - CREATIVE COMMONS CORPORATION IS NOT A LAW FIRM AND DOES NOT PROVIDE - LEGAL SERVICES. DISTRIBUTION OF THIS DOCUMENT DOES NOT CREATE AN - ATTORNEY-CLIENT RELATIONSHIP. CREATIVE COMMONS PROVIDES THIS - INFORMATION ON AN "AS-IS" BASIS. CREATIVE COMMONS MAKES NO WARRANTIES - REGARDING THE USE OF THIS DOCUMENT OR THE INFORMATION OR WORKS - PROVIDED HEREUNDER, AND DISCLAIMS LIABILITY FOR DAMAGES RESULTING FROM - THE USE OF THIS DOCUMENT OR THE INFORMATION OR WORKS PROVIDED - HEREUNDER. - -Statement of Purpose - -The laws of most jurisdictions throughout the world automatically confer -exclusive Copyright and Related Rights (defined below) upon the creator -and subsequent owner(s) (each and all, an "owner") of an original work of -authorship and/or a database (each, a "Work"). - -Certain owners wish to permanently relinquish those rights to a Work for -the purpose of contributing to a commons of creative, cultural and -scientific works ("Commons") that the public can reliably and without fear -of later claims of infringement build upon, modify, incorporate in other -works, reuse and redistribute as freely as possible in any form whatsoever -and for any purposes, including without limitation commercial purposes. -These owners may contribute to the Commons to promote the ideal of a free -culture and the further production of creative, cultural and scientific -works, or to gain reputation or greater distribution for their Work in -part through the use and efforts of others. - -For these and/or other purposes and motivations, and without any -expectation of additional consideration or compensation, the person -associating CC0 with a Work (the "Affirmer"), to the extent that he or she -is an owner of Copyright and Related Rights in the Work, voluntarily -elects to apply CC0 to the Work and publicly distribute the Work under its -terms, with knowledge of his or her Copyright and Related Rights in the -Work and the meaning and intended legal effect of CC0 on those rights. - -1. Copyright and Related Rights. A Work made available under CC0 may be -protected by copyright and related or neighboring rights ("Copyright and -Related Rights"). Copyright and Related Rights include, but are not -limited to, the following: - - i. the right to reproduce, adapt, distribute, perform, display, - communicate, and translate a Work; - ii. moral rights retained by the original author(s) and/or performer(s); -iii. publicity and privacy rights pertaining to a person's image or - likeness depicted in a Work; - iv. rights protecting against unfair competition in regards to a Work, - subject to the limitations in paragraph 4(a), below; - v. rights protecting the extraction, dissemination, use and reuse of data - in a Work; - vi. database rights (such as those arising under Directive 96/9/EC of the - European Parliament and of the Council of 11 March 1996 on the legal - protection of databases, and under any national implementation - thereof, including any amended or successor version of such - directive); and -vii. other similar, equivalent or corresponding rights throughout the - world based on applicable law or treaty, and any national - implementations thereof. - -2. Waiver. To the greatest extent permitted by, but not in contravention -of, applicable law, Affirmer hereby overtly, fully, permanently, -irrevocably and unconditionally waives, abandons, and surrenders all of -Affirmer's Copyright and Related Rights and associated claims and causes -of action, whether now known or unknown (including existing as well as -future claims and causes of action), in the Work (i) in all territories -worldwide, (ii) for the maximum duration provided by applicable law or -treaty (including future time extensions), (iii) in any current or future -medium and for any number of copies, and (iv) for any purpose whatsoever, -including without limitation commercial, advertising or promotional -purposes (the "Waiver"). Affirmer makes the Waiver for the benefit of each -member of the public at large and to the detriment of Affirmer's heirs and -successors, fully intending that such Waiver shall not be subject to -revocation, rescission, cancellation, termination, or any other legal or -equitable action to disrupt the quiet enjoyment of the Work by the public -as contemplated by Affirmer's express Statement of Purpose. - -3. Public License Fallback. Should any part of the Waiver for any reason -be judged legally invalid or ineffective under applicable law, then the -Waiver shall be preserved to the maximum extent permitted taking into -account Affirmer's express Statement of Purpose. In addition, to the -extent the Waiver is so judged Affirmer hereby grants to each affected -person a royalty-free, non transferable, non sublicensable, non exclusive, -irrevocable and unconditional license to exercise Affirmer's Copyright and -Related Rights in the Work (i) in all territories worldwide, (ii) for the -maximum duration provided by applicable law or treaty (including future -time extensions), (iii) in any current or future medium and for any number -of copies, and (iv) for any purpose whatsoever, including without -limitation commercial, advertising or promotional purposes (the -"License"). The License shall be deemed effective as of the date CC0 was -applied by Affirmer to the Work. Should any part of the License for any -reason be judged legally invalid or ineffective under applicable law, such -partial invalidity or ineffectiveness shall not invalidate the remainder -of the License, and in such case Affirmer hereby affirms that he or she -will not (i) exercise any of his or her remaining Copyright and Related -Rights in the Work or (ii) assert any associated claims and causes of -action with respect to the Work, in either case contrary to Affirmer's -express Statement of Purpose. - -4. Limitations and Disclaimers. - - a. No trademark or patent rights held by Affirmer are waived, abandoned, - surrendered, licensed or otherwise affected by this document. - b. Affirmer offers the Work as-is and makes no representations or - warranties of any kind concerning the Work, express, implied, - statutory or otherwise, including without limitation warranties of - title, merchantability, fitness for a particular purpose, non - infringement, or the absence of latent or other defects, accuracy, or - the present or absence of errors, whether or not discoverable, all to - the greatest extent permissible under applicable law. - c. Affirmer disclaims responsibility for clearing rights of other persons - that may apply to the Work or any use thereof, including without - limitation any person's Copyright and Related Rights in the Work. - Further, Affirmer disclaims responsibility for obtaining any necessary - consents, permissions or other rights required for any use of the - Work. - d. Affirmer understands and acknowledges that Creative Commons is not a - party to this document and has no duty or obligation with respect to - this CC0 or use of the Work. diff --git a/akka-samples/akka-sample-fsm-java-lambda/LICENSE b/akka-samples/akka-sample-fsm-java-lambda/LICENSE deleted file mode 100644 index 2a18b45589..0000000000 --- a/akka-samples/akka-sample-fsm-java-lambda/LICENSE +++ /dev/null @@ -1,10 +0,0 @@ -Activator Template by Lightbend - -Licensed under Public Domain (CC0) - -To the extent possible under law, the person who associated CC0 with -this Activator Tempate has waived all copyright and related or neighboring -rights to this Activator Template. - -You should have received a copy of the CC0 legalcode along with this -work. If not, see . diff --git a/akka-samples/akka-sample-fsm-java-lambda/activator.properties b/akka-samples/akka-sample-fsm-java-lambda/activator.properties deleted file mode 100644 index e12c4ed0e3..0000000000 --- a/akka-samples/akka-sample-fsm-java-lambda/activator.properties +++ /dev/null @@ -1,7 +0,0 @@ -name=akka-sample-fsm-java-lambda -title=Akka FSM in Java with Lambdas -description=Illustrating how to implement a finite state machine in actors. -tags=akka,java,java8,sample -authorName=Akka Team -authorLink=http://akka.io/ -sourceLink=https://github.com/akka/akka diff --git a/akka-samples/akka-sample-fsm-java-lambda/build.sbt b/akka-samples/akka-sample-fsm-java-lambda/build.sbt deleted file mode 100644 index 607c52f5e5..0000000000 --- a/akka-samples/akka-sample-fsm-java-lambda/build.sbt +++ /dev/null @@ -1,19 +0,0 @@ -name := "akka-sample-fsm-java-lambda" - -version := "2.5-SNAPSHOT" - -scalaVersion := "2.11.8" - -javacOptions in compile ++= Seq("-encoding", "UTF-8", "-source", "1.8", "-target", "1.8", "-Xlint") - -javacOptions in doc ++= Seq("-encoding", "UTF-8", "-source", "1.8") - -testOptions += Tests.Argument(TestFrameworks.JUnit, "-v", "-a") - -libraryDependencies ++= Seq( - "com.typesafe.akka" %% "akka-actor" % "2.5-SNAPSHOT", - "com.typesafe.akka" %% "akka-testkit" % "2.5-SNAPSHOT" % "test", - "junit" % "junit" % "4.11" % "test", - "com.novocode" % "junit-interface" % "0.10" % "test") - -licenses := Seq(("CC0", url("http://creativecommons.org/publicdomain/zero/1.0"))) diff --git a/akka-samples/akka-sample-fsm-java-lambda/pom.xml b/akka-samples/akka-sample-fsm-java-lambda/pom.xml deleted file mode 100644 index e18c9278dd..0000000000 --- a/akka-samples/akka-sample-fsm-java-lambda/pom.xml +++ /dev/null @@ -1,53 +0,0 @@ - - 4.0.0 - - - UTF-8 - - - sample - akka-sample-fsm-java-lambda - jar - 2.5-SNAPSHOT - - - - com.typesafe.akka - akka-actor_2.11 - 2.5-SNAPSHOT - - - com.typesafe.akka - akka-testkit_2.11 - 2.5-SNAPSHOT - - - junit - junit - 4.11 - test - - - - - - - org.apache.maven.plugins - maven-compiler-plugin - 3.1 - - 1.8 - 1.8 - true - - -Xlint - - - - - - - diff --git a/akka-samples/akka-sample-fsm-java-lambda/project/build.properties b/akka-samples/akka-sample-fsm-java-lambda/project/build.properties deleted file mode 100644 index 748703f770..0000000000 --- a/akka-samples/akka-sample-fsm-java-lambda/project/build.properties +++ /dev/null @@ -1 +0,0 @@ -sbt.version=0.13.7 diff --git a/akka-samples/akka-sample-fsm-java-lambda/src/main/java/sample/become/DiningHakkersOnBecome.java b/akka-samples/akka-sample-fsm-java-lambda/src/main/java/sample/become/DiningHakkersOnBecome.java deleted file mode 100644 index 8f78cd8f64..0000000000 --- a/akka-samples/akka-sample-fsm-java-lambda/src/main/java/sample/become/DiningHakkersOnBecome.java +++ /dev/null @@ -1,160 +0,0 @@ -package sample.become; - -import akka.actor.*; -import akka.japi.pf.ReceiveBuilder; -import java.util.ArrayList; -import java.util.Arrays; -import java.util.List; -import scala.concurrent.duration.Duration; -import scala.concurrent.duration.FiniteDuration; -import scala.PartialFunction; -import scala.runtime.BoxedUnit; - -import static sample.become.Messages.*; -import static java.util.concurrent.TimeUnit.*; - -// Akka adaptation of -// http://www.dalnefre.com/wp/2010/08/dining-philosophers-in-humus/ - -public class DiningHakkersOnBecome { - - /* - * A Chopstick is an actor, it can be taken, and put back - */ - public static class Chopstick extends AbstractActor { - - //When a Chopstick is taken by a hakker - //It will refuse to be taken by other hakkers - //But the owning hakker can put it back - PartialFunction takenBy(ActorRef hakker) { - return ReceiveBuilder. - match(Take.class, - t -> t.hakker.tell(new Busy(self()), self())). - match(Put.class, p -> p.hakker == hakker, - p -> context().become(available)). - build(); - } - - //When a Chopstick is available, it can be taken by a hakker - PartialFunction available = ReceiveBuilder. - match(Take.class, t -> { - context().become(takenBy(t.hakker)); - t.hakker.tell(new Taken(self()), self()); - }).build(); - - //A Chopstick begins its existence as available - public Chopstick() { - receive(available); - } - } - - /* - * A hakker is an awesome dude or dudette who either thinks about hacking or has to eat ;-) - */ - public static class Hakker extends AbstractActor { - private String name; - private ActorRef left; - private ActorRef right; - - public Hakker(String name, ActorRef left, ActorRef right) { - this.name = name; - this.left = left; - this.right = right; - - //All hakkers start in a non-eating state - receive(ReceiveBuilder.matchEquals(Think, m -> { - System.out.println(String.format("%s starts to think", name)); - startThinking(Duration.create(5, SECONDS)); - }).build()); - } - - //When a hakker is eating, he can decide to start to think, - //then he puts down his chopsticks and starts to think - PartialFunction eating = ReceiveBuilder. - matchEquals(Think, m -> { - left.tell(new Put(self()), self()); - right.tell(new Put(self()), self()); - System.out.println(String.format("%s puts down his chopsticks and starts to think", name)); - startThinking(Duration.create(5, SECONDS)); - }).build(); - - //When a hakker is waiting for the last chopstick it can either obtain it - //and start eating, or the other chopstick was busy, and the hakker goes - //back to think about how he should obtain his chopsticks :-) - PartialFunction waitingFor(ActorRef chopstickToWaitFor, ActorRef otherChopstick) { - return ReceiveBuilder. - match(Taken.class, t -> t.chopstick == chopstickToWaitFor, t -> { - System.out.println(String.format("%s has picked up %s and %s and starts to eat", - name, left.path().name(), right.path().name())); - context().become(eating); - context().system().scheduler().scheduleOnce(Duration.create(5, SECONDS), self(), Think, context().system().dispatcher(), self()); - }). - match(Busy.class, b -> { - otherChopstick.tell(new Put(self()), self()); - startThinking(Duration.create(10, MILLISECONDS)); - }). - build(); - } - - //When the results of the other grab comes back, - //he needs to put it back if he got the other one. - //Then go back and think and try to grab the chopsticks again - PartialFunction deniedAChopstick = ReceiveBuilder. - match(Taken.class, t -> { - t.chopstick.tell(new Put(self()), self()); - startThinking(Duration.create(10, MILLISECONDS)); - }). - match(Busy.class, b -> - startThinking(Duration.create(10, MILLISECONDS))). - build(); - - //When a hakker is hungry it tries to pick up its chopsticks and eat - //When it picks one up, it goes into wait for the other - //If the hakkers first attempt at grabbing a chopstick fails, - //it starts to wait for the response of the other grab - PartialFunction hungry = ReceiveBuilder. - match(Taken.class, t -> t.chopstick == left, - t -> context().become(waitingFor(right, left))). - match(Taken.class, t -> t.chopstick == right, - t -> context().become(waitingFor(left, right))). - match(Busy.class, - b -> context().become(deniedAChopstick)). - build(); - - //When a hakker is thinking it can become hungry - //and try to pick up its chopsticks and eat - PartialFunction thinking = ReceiveBuilder. - matchEquals(Eat, m -> { - context().become(hungry); - left.tell(new Take(self()), self()); - right.tell(new Take(self()), self()); - }).build(); - - private void startThinking(FiniteDuration duration) { - context().become(thinking); - context().system().scheduler().scheduleOnce(duration, self(), Eat, context().system().dispatcher(), self()); - } - } - - /* - * Alright, here's our test-harness - */ - public static void main(String[] args) { - ActorSystem system = ActorSystem.create(); - //Create 5 chopsticks - ActorRef[] chopsticks = new ActorRef[5]; - for (int i = 0; i < 5; i++) - chopsticks[i] = system.actorOf(Props.create(Chopstick.class), "Chopstick" + i); - - //Create 5 awesome hakkers and assign them their left and right chopstick - List names = Arrays.asList("Ghosh", "Boner", "Klang", "Krasser", "Manie"); - List hakkers = new ArrayList<>(); - int i = 0; - for (String name: names) { - hakkers.add(system.actorOf(Props.create(Hakker.class, name, chopsticks[i], chopsticks[(i + 1) % 5]))); - i++; - } - //Signal all hakkers that they should start thinking, and watch the show - hakkers.stream().forEach(hakker -> hakker.tell(Think, ActorRef.noSender())); - } -} diff --git a/akka-samples/akka-sample-fsm-java-lambda/src/main/java/sample/become/Messages.java b/akka-samples/akka-sample-fsm-java-lambda/src/main/java/sample/become/Messages.java deleted file mode 100644 index c61da59e39..0000000000 --- a/akka-samples/akka-sample-fsm-java-lambda/src/main/java/sample/become/Messages.java +++ /dev/null @@ -1,39 +0,0 @@ -package sample.become; - -import akka.actor.ActorRef; - -public class Messages { - public static final class Busy { - public final ActorRef chopstick; - public Busy(ActorRef chopstick){ - this.chopstick = chopstick; - } - } - - public static final class Put { - public final ActorRef hakker; - public Put(ActorRef hakker){ - this.hakker = hakker; - } - } - - public static final class Take { - public final ActorRef hakker; - public Take(ActorRef hakker){ - this.hakker = hakker; - } - } - - public static final class Taken { - public final ActorRef chopstick; - public Taken(ActorRef chopstick){ - this.chopstick = chopstick; - } - } - - private static interface EatMessage {}; - public static final Object Eat = new EatMessage() {}; - - private static interface ThinkMessage {}; - public static final Object Think = new ThinkMessage() {}; -} diff --git a/akka-samples/akka-sample-fsm-java-lambda/src/main/java/sample/fsm/DiningHakkersOnFsm.java b/akka-samples/akka-sample-fsm-java-lambda/src/main/java/sample/fsm/DiningHakkersOnFsm.java deleted file mode 100644 index c61cbbe62d..0000000000 --- a/akka-samples/akka-sample-fsm-java-lambda/src/main/java/sample/fsm/DiningHakkersOnFsm.java +++ /dev/null @@ -1,205 +0,0 @@ -package sample.fsm; - -import akka.actor.*; -import java.util.ArrayList; -import java.util.Arrays; -import java.util.List; -import scala.concurrent.duration.Duration; -import scala.concurrent.duration.FiniteDuration; - -import static java.util.concurrent.TimeUnit.*; -import static sample.fsm.Messages.*; - -// Akka adaptation of -// http://www.dalnefre.com/wp/2010/08/dining-philosophers-in-humus/ - -public class DiningHakkersOnFsm { - /** - * Some states the chopstick can be in - */ - public static enum CS { - Available, - Taken - } - - /** - * Some state container for the chopstick - */ - public static final class TakenBy { - public final ActorRef hakker; - public TakenBy(ActorRef hakker){ - this.hakker = hakker; - } - } - - /* - * A chopstick is an actor, it can be taken, and put back - */ - public static class Chopstick extends AbstractLoggingFSM { - { - // A chopstick begins its existence as available and taken by no one - startWith(CS.Available, new TakenBy(context().system().deadLetters())); - - // When a chopstick is available, it can be taken by a some hakker - when(CS.Available, - matchEventEquals(Take, (take, data) -> - goTo(CS.Taken).using(new TakenBy(sender())).replying(new Taken(self())))); - - // When a chopstick is taken by a hakker - // It will refuse to be taken by other hakkers - // But the owning hakker can put it back - when(CS.Taken, - matchEventEquals(Take, (take, data) -> - stay().replying(new Busy(self()))). - event((event, data) -> (event == Put) && (data.hakker == sender()), (event, data) -> - goTo(CS.Available).using(new TakenBy(context().system().deadLetters())))); - - // Initialize the chopstick - initialize(); - } - } - - /** - * Some fsm hakker states - */ - public static enum HS { - Waiting, - Thinking, - Hungry, - WaitForOtherChopstick, - FirstChopstickDenied, - Eating - } - - /** - * Some state container to keep track of which chopsticks we have - */ - public static final class TakenChopsticks { - public final ActorRef left; - public final ActorRef right; - - public TakenChopsticks(ActorRef left, ActorRef right) { - this.left = left; - this.right = right; - } - } - - /* - * A fsm hakker is an awesome dude or dudette who either thinks about hacking or has to eat ;-) - */ - public static class Hakker extends AbstractLoggingFSM { - private String name; - private ActorRef left; - private ActorRef right; - - public Hakker(String name, ActorRef left, ActorRef right) { - this.name = name; - this.left = left; - this.right = right; - } - - { - //All hakkers start waiting - startWith(HS.Waiting, new TakenChopsticks(null, null)); - - when(HS.Waiting, - matchEventEquals(Think, (think, data) -> { - System.out.println(String.format("%s starts to think", name)); - return startThinking(Duration.create(5, SECONDS)); - })); - - //When a hakker is thinking it can become hungry - //and try to pick up its chopsticks and eat - when(HS.Thinking, - matchEventEquals(StateTimeout(), (event, data) -> { - left.tell(Take, self()); - right.tell(Take, self()); - return goTo(HS.Hungry); - })); - - // When a hakker is hungry it tries to pick up its chopsticks and eat - // When it picks one up, it goes into wait for the other - // If the hakkers first attempt at grabbing a chopstick fails, - // it starts to wait for the response of the other grab - when(HS.Hungry, - matchEvent(Taken.class, (taken, data) -> taken.chopstick == left, - (taken, data) -> goTo(HS.WaitForOtherChopstick).using(new TakenChopsticks(left, null))). - event(Taken.class, (taken, data) -> taken.chopstick == right, - (taken, data) -> goTo(HS.WaitForOtherChopstick).using(new TakenChopsticks(null, right))). - event(Busy.class, - (busy, data) -> goTo(HS.FirstChopstickDenied))); - - // When a hakker is waiting for the last chopstick it can either obtain it - // and start eating, or the other chopstick was busy, and the hakker goes - // back to think about how he should obtain his chopsticks :-) - when(HS.WaitForOtherChopstick, - matchEvent(Taken.class, - (taken, data) -> (taken.chopstick == left && data.left == null && data.right != null), - (taken, data) -> startEating(left, right)). - event(Taken.class, - (taken, data) -> (taken.chopstick == right && data.left != null && data.right == null), - (taken, data) -> startEating(left, right)). - event(Busy.class, (busy, data) -> { - if (data.left != null) left.tell(Put, self()); - if (data.right != null) right.tell(Put, self()); - return startThinking(Duration.create(10, MILLISECONDS)); - })); - - // When the results of the other grab comes back, - // he needs to put it back if he got the other one. - // Then go back and think and try to grab the chopsticks again - when(HS.FirstChopstickDenied, - matchEvent(Taken.class, (taken, data) -> { - taken.chopstick.tell(Put, self()); - return startThinking(Duration.create(10, MILLISECONDS)); - }). - event(Busy.class, (busy, data) -> - startThinking(Duration.create(10, MILLISECONDS)))); - - // When a hakker is eating, he can decide to start to think, - // then he puts down his chopsticks and starts to think - when(HS.Eating, - matchEventEquals(StateTimeout(), (event, data) -> { - left.tell(Put, self()); - right.tell(Put, self()); - System.out.println(String.format("%s puts down his chopsticks and starts to think", name)); - return startThinking(Duration.create(5, SECONDS)); - })); - - // Initialize the hakker - initialize(); - } - - private FSM.State startEating(ActorRef left, ActorRef right) { - System.out.println(String.format("%s has picked up %s and %s and starts to eat", - name, left.path().name(), right.path().name())); - return goTo(HS.Eating).using(new TakenChopsticks(left, right)).forMax(Duration.create(5, SECONDS)); - } - - private FSM.State startThinking(FiniteDuration duration) { - return goTo(HS.Thinking).using(new TakenChopsticks(null, null)).forMax(duration); - } - } - - /* - * Alright, here's our test-harness - */ - public static void main(String[] args) { - ActorSystem system = ActorSystem.create(); - //Create 5 chopsticks - ActorRef[] chopsticks = new ActorRef[5]; - for (int i = 0; i < 5; i++) - chopsticks[i] = system.actorOf(Props.create(Chopstick.class), "Chopstick" + i); - - //Create 5 awesome hakkers and assign them their left and right chopstick - List names = Arrays.asList("Ghosh", "Boner", "Klang", "Krasser", "Manie"); - List hakkers = new ArrayList<>(); - int i = 0; - for (String name: names) { - hakkers.add(system.actorOf(Props.create(Hakker.class, name, chopsticks[i], chopsticks[(i + 1) % 5]), name)); - i++; - } - //Signal all hakkers that they should start thinking, and watch the show - hakkers.stream().forEach(hakker -> hakker.tell(Think, ActorRef.noSender())); - } -} diff --git a/akka-samples/akka-sample-fsm-java-lambda/src/main/java/sample/fsm/Messages.java b/akka-samples/akka-sample-fsm-java-lambda/src/main/java/sample/fsm/Messages.java deleted file mode 100644 index cb321dca96..0000000000 --- a/akka-samples/akka-sample-fsm-java-lambda/src/main/java/sample/fsm/Messages.java +++ /dev/null @@ -1,35 +0,0 @@ -package sample.fsm; - -import akka.actor.ActorRef; - -public class Messages { - - public static final class Busy { - public final ActorRef chopstick; - public Busy(ActorRef chopstick){ - this.chopstick = chopstick; - } - } - - private static interface PutMessage {}; - public static final Object Put = new PutMessage() { - @Override - public String toString() { return "Put"; } - }; - - private static interface TakeMessage {}; - public static final Object Take = new TakeMessage() { - @Override - public String toString() { return "Take"; } - }; - - public static final class Taken { - public final ActorRef chopstick; - public Taken(ActorRef chopstick){ - this.chopstick = chopstick; - } - } - - private static interface ThinkMessage {}; - public static final Object Think = new ThinkMessage() {}; -} diff --git a/akka-samples/akka-sample-fsm-java-lambda/tutorial/index.html b/akka-samples/akka-sample-fsm-java-lambda/tutorial/index.html deleted file mode 100644 index f52005cb18..0000000000 --- a/akka-samples/akka-sample-fsm-java-lambda/tutorial/index.html +++ /dev/null @@ -1,73 +0,0 @@ - - - Akka FSM in Java with Lambdas - - - - -
-

Finite State Machine in Actors

- -

-This sample is an adaptation of -Dining Hakkers. -It illustrates how state and behavior can be managed within -an Actor with two different approaches; using become and using -the AbstractFSM class. -

- - -
-
- -

Dining Hakkers with Become

- -

-Open DiningHakkersOnBecome.java. -

- -

-It illustrates how current behavior can be replaced with context.become. -Note that no var members are used, instead the state is encoded in the current -behavior and its parameters. -

- -

-Go to the Run tab, and start the application main class -sample.become.DiningHakkersOnBecome. -In the log output you can see the actions of the Hakker actors. -

- -

-Read more about become in -the documentation. -

- -
-
- -

Dining Hakkers with FSM

- -

-Open DiningHakkersOnFsm.java. -

- -

-It illustrates how the states and transitions can be defined with the akka.actor.AbstractFSM class. -

- -

-Go to the Run tab, and start the application main class -sample.fsm.DiningHakkersOnFsm. -In the log output you can see the actions of the Hakker actors. -

- -

-Read more about akka.actor.FSM in -the documentation. -

- -
- - - diff --git a/akka-samples/akka-sample-fsm-scala/COPYING b/akka-samples/akka-sample-fsm-scala/COPYING deleted file mode 100644 index 0e259d42c9..0000000000 --- a/akka-samples/akka-sample-fsm-scala/COPYING +++ /dev/null @@ -1,121 +0,0 @@ -Creative Commons Legal Code - -CC0 1.0 Universal - - CREATIVE COMMONS CORPORATION IS NOT A LAW FIRM AND DOES NOT PROVIDE - LEGAL SERVICES. DISTRIBUTION OF THIS DOCUMENT DOES NOT CREATE AN - ATTORNEY-CLIENT RELATIONSHIP. CREATIVE COMMONS PROVIDES THIS - INFORMATION ON AN "AS-IS" BASIS. CREATIVE COMMONS MAKES NO WARRANTIES - REGARDING THE USE OF THIS DOCUMENT OR THE INFORMATION OR WORKS - PROVIDED HEREUNDER, AND DISCLAIMS LIABILITY FOR DAMAGES RESULTING FROM - THE USE OF THIS DOCUMENT OR THE INFORMATION OR WORKS PROVIDED - HEREUNDER. - -Statement of Purpose - -The laws of most jurisdictions throughout the world automatically confer -exclusive Copyright and Related Rights (defined below) upon the creator -and subsequent owner(s) (each and all, an "owner") of an original work of -authorship and/or a database (each, a "Work"). - -Certain owners wish to permanently relinquish those rights to a Work for -the purpose of contributing to a commons of creative, cultural and -scientific works ("Commons") that the public can reliably and without fear -of later claims of infringement build upon, modify, incorporate in other -works, reuse and redistribute as freely as possible in any form whatsoever -and for any purposes, including without limitation commercial purposes. -These owners may contribute to the Commons to promote the ideal of a free -culture and the further production of creative, cultural and scientific -works, or to gain reputation or greater distribution for their Work in -part through the use and efforts of others. - -For these and/or other purposes and motivations, and without any -expectation of additional consideration or compensation, the person -associating CC0 with a Work (the "Affirmer"), to the extent that he or she -is an owner of Copyright and Related Rights in the Work, voluntarily -elects to apply CC0 to the Work and publicly distribute the Work under its -terms, with knowledge of his or her Copyright and Related Rights in the -Work and the meaning and intended legal effect of CC0 on those rights. - -1. Copyright and Related Rights. A Work made available under CC0 may be -protected by copyright and related or neighboring rights ("Copyright and -Related Rights"). Copyright and Related Rights include, but are not -limited to, the following: - - i. the right to reproduce, adapt, distribute, perform, display, - communicate, and translate a Work; - ii. moral rights retained by the original author(s) and/or performer(s); -iii. publicity and privacy rights pertaining to a person's image or - likeness depicted in a Work; - iv. rights protecting against unfair competition in regards to a Work, - subject to the limitations in paragraph 4(a), below; - v. rights protecting the extraction, dissemination, use and reuse of data - in a Work; - vi. database rights (such as those arising under Directive 96/9/EC of the - European Parliament and of the Council of 11 March 1996 on the legal - protection of databases, and under any national implementation - thereof, including any amended or successor version of such - directive); and -vii. other similar, equivalent or corresponding rights throughout the - world based on applicable law or treaty, and any national - implementations thereof. - -2. Waiver. To the greatest extent permitted by, but not in contravention -of, applicable law, Affirmer hereby overtly, fully, permanently, -irrevocably and unconditionally waives, abandons, and surrenders all of -Affirmer's Copyright and Related Rights and associated claims and causes -of action, whether now known or unknown (including existing as well as -future claims and causes of action), in the Work (i) in all territories -worldwide, (ii) for the maximum duration provided by applicable law or -treaty (including future time extensions), (iii) in any current or future -medium and for any number of copies, and (iv) for any purpose whatsoever, -including without limitation commercial, advertising or promotional -purposes (the "Waiver"). Affirmer makes the Waiver for the benefit of each -member of the public at large and to the detriment of Affirmer's heirs and -successors, fully intending that such Waiver shall not be subject to -revocation, rescission, cancellation, termination, or any other legal or -equitable action to disrupt the quiet enjoyment of the Work by the public -as contemplated by Affirmer's express Statement of Purpose. - -3. Public License Fallback. Should any part of the Waiver for any reason -be judged legally invalid or ineffective under applicable law, then the -Waiver shall be preserved to the maximum extent permitted taking into -account Affirmer's express Statement of Purpose. In addition, to the -extent the Waiver is so judged Affirmer hereby grants to each affected -person a royalty-free, non transferable, non sublicensable, non exclusive, -irrevocable and unconditional license to exercise Affirmer's Copyright and -Related Rights in the Work (i) in all territories worldwide, (ii) for the -maximum duration provided by applicable law or treaty (including future -time extensions), (iii) in any current or future medium and for any number -of copies, and (iv) for any purpose whatsoever, including without -limitation commercial, advertising or promotional purposes (the -"License"). The License shall be deemed effective as of the date CC0 was -applied by Affirmer to the Work. Should any part of the License for any -reason be judged legally invalid or ineffective under applicable law, such -partial invalidity or ineffectiveness shall not invalidate the remainder -of the License, and in such case Affirmer hereby affirms that he or she -will not (i) exercise any of his or her remaining Copyright and Related -Rights in the Work or (ii) assert any associated claims and causes of -action with respect to the Work, in either case contrary to Affirmer's -express Statement of Purpose. - -4. Limitations and Disclaimers. - - a. No trademark or patent rights held by Affirmer are waived, abandoned, - surrendered, licensed or otherwise affected by this document. - b. Affirmer offers the Work as-is and makes no representations or - warranties of any kind concerning the Work, express, implied, - statutory or otherwise, including without limitation warranties of - title, merchantability, fitness for a particular purpose, non - infringement, or the absence of latent or other defects, accuracy, or - the present or absence of errors, whether or not discoverable, all to - the greatest extent permissible under applicable law. - c. Affirmer disclaims responsibility for clearing rights of other persons - that may apply to the Work or any use thereof, including without - limitation any person's Copyright and Related Rights in the Work. - Further, Affirmer disclaims responsibility for obtaining any necessary - consents, permissions or other rights required for any use of the - Work. - d. Affirmer understands and acknowledges that Creative Commons is not a - party to this document and has no duty or obligation with respect to - this CC0 or use of the Work. diff --git a/akka-samples/akka-sample-fsm-scala/LICENSE b/akka-samples/akka-sample-fsm-scala/LICENSE deleted file mode 100644 index 2a18b45589..0000000000 --- a/akka-samples/akka-sample-fsm-scala/LICENSE +++ /dev/null @@ -1,10 +0,0 @@ -Activator Template by Lightbend - -Licensed under Public Domain (CC0) - -To the extent possible under law, the person who associated CC0 with -this Activator Tempate has waived all copyright and related or neighboring -rights to this Activator Template. - -You should have received a copy of the CC0 legalcode along with this -work. If not, see . diff --git a/akka-samples/akka-sample-fsm-scala/activator.properties b/akka-samples/akka-sample-fsm-scala/activator.properties deleted file mode 100644 index dd3e4429a8..0000000000 --- a/akka-samples/akka-sample-fsm-scala/activator.properties +++ /dev/null @@ -1,7 +0,0 @@ -name=akka-sample-fsm-scala -title=Akka FSM in Scala -description=Illustrating how to implement a finite state machine in actors. -tags=akka,scala,sample -authorName=Akka Team -authorLink=http://akka.io/ -sourceLink=https://github.com/akka/akka diff --git a/akka-samples/akka-sample-fsm-scala/build.sbt b/akka-samples/akka-sample-fsm-scala/build.sbt deleted file mode 100644 index e256544d91..0000000000 --- a/akka-samples/akka-sample-fsm-scala/build.sbt +++ /dev/null @@ -1,11 +0,0 @@ -name := "akka-sample-fsm-scala" - -version := "2.5-SNAPSHOT" - -scalaVersion := "2.11.8" - -libraryDependencies ++= Seq( - "com.typesafe.akka" %% "akka-actor" % "2.5-SNAPSHOT" -) - -licenses := Seq(("CC0", url("http://creativecommons.org/publicdomain/zero/1.0"))) diff --git a/akka-samples/akka-sample-fsm-scala/project/build.properties b/akka-samples/akka-sample-fsm-scala/project/build.properties deleted file mode 100644 index 748703f770..0000000000 --- a/akka-samples/akka-sample-fsm-scala/project/build.properties +++ /dev/null @@ -1 +0,0 @@ -sbt.version=0.13.7 diff --git a/akka-samples/akka-sample-fsm-scala/src/main/scala/sample/become/DiningHakkersOnBecome.scala b/akka-samples/akka-sample-fsm-scala/src/main/scala/sample/become/DiningHakkersOnBecome.scala deleted file mode 100644 index 9e5bc17c14..0000000000 --- a/akka-samples/akka-sample-fsm-scala/src/main/scala/sample/become/DiningHakkersOnBecome.scala +++ /dev/null @@ -1,145 +0,0 @@ -package sample.become - -import akka.actor._ -import scala.concurrent.duration._ - -// Akka adaptation of -// http://www.dalnefre.com/wp/2010/08/dining-philosophers-in-humus/ - -/* -* First we define our messages, they basically speak for themselves -*/ -sealed trait DiningHakkerMessage -final case class Busy(chopstick: ActorRef) extends DiningHakkerMessage -final case class Put(hakker: ActorRef) extends DiningHakkerMessage -final case class Take(hakker: ActorRef) extends DiningHakkerMessage -final case class Taken(chopstick: ActorRef) extends DiningHakkerMessage -object Eat extends DiningHakkerMessage -object Think extends DiningHakkerMessage - -/* -* A Chopstick is an actor, it can be taken, and put back -*/ -class Chopstick extends Actor { - - import context._ - - //When a Chopstick is taken by a hakker - //It will refuse to be taken by other hakkers - //But the owning hakker can put it back - def takenBy(hakker: ActorRef): Receive = { - case Take(otherHakker) => - otherHakker ! Busy(self) - case Put(`hakker`) => - become(available) - } - - //When a Chopstick is available, it can be taken by a hakker - def available: Receive = { - case Take(hakker) => - become(takenBy(hakker)) - hakker ! Taken(self) - } - - //A Chopstick begins its existence as available - def receive = available -} - -/* -* A hakker is an awesome dude or dudette who either thinks about hacking or has to eat ;-) -*/ -class Hakker(name: String, left: ActorRef, right: ActorRef) extends Actor { - - import context._ - - //When a hakker is thinking it can become hungry - //and try to pick up its chopsticks and eat - def thinking: Receive = { - case Eat => - become(hungry) - left ! Take(self) - right ! Take(self) - } - - //When a hakker is hungry it tries to pick up its chopsticks and eat - //When it picks one up, it goes into wait for the other - //If the hakkers first attempt at grabbing a chopstick fails, - //it starts to wait for the response of the other grab - def hungry: Receive = { - case Taken(`left`) => - become(waiting_for(right, left)) - case Taken(`right`) => - become(waiting_for(left, right)) - case Busy(chopstick) => - become(denied_a_chopstick) - } - - //When a hakker is waiting for the last chopstick it can either obtain it - //and start eating, or the other chopstick was busy, and the hakker goes - //back to think about how he should obtain his chopsticks :-) - def waiting_for(chopstickToWaitFor: ActorRef, otherChopstick: ActorRef): Receive = { - case Taken(`chopstickToWaitFor`) => - println("%s has picked up %s and %s and starts to eat".format(name, left.path.name, right.path.name)) - become(eating) - system.scheduler.scheduleOnce(5.seconds, self, Think) - - case Busy(chopstick) => - otherChopstick ! Put(self) - startThinking(10.milliseconds) - } - - //When the results of the other grab comes back, - //he needs to put it back if he got the other one. - //Then go back and think and try to grab the chopsticks again - def denied_a_chopstick: Receive = { - case Taken(chopstick) => - chopstick ! Put(self) - startThinking(10.milliseconds) - case Busy(chopstick) => - startThinking(10.milliseconds) - } - - //When a hakker is eating, he can decide to start to think, - //then he puts down his chopsticks and starts to think - def eating: Receive = { - case Think => - left ! Put(self) - right ! Put(self) - println("%s puts down his chopsticks and starts to think".format(name)) - startThinking(5.seconds) - } - - //All hakkers start in a non-eating state - def receive = { - case Think => - println("%s starts to think".format(name)) - startThinking(5.seconds) - } - - private def startThinking(duration: FiniteDuration): Unit = { - become(thinking) - system.scheduler.scheduleOnce(duration, self, Eat) - } -} - -/* -* Alright, here's our test-harness -*/ -object DiningHakkersOnBecome { - val system = ActorSystem() - - def main(args: Array[String]): Unit = run() - - def run(): Unit = { - //Create 5 chopsticks - val chopsticks = for (i <- 1 to 5) yield system.actorOf(Props[Chopstick], "Chopstick" + i) - - //Create 5 awesome hakkers and assign them their left and right chopstick - val hakkers = for { - (name, i) <- List("Ghosh", "Boner", "Klang", "Krasser", "Manie").zipWithIndex - } yield system.actorOf(Props(classOf[Hakker], name, chopsticks(i), chopsticks((i + 1) % 5))) - - //Signal all hakkers that they should start thinking, and watch the show - hakkers.foreach(_ ! Think) - } -} diff --git a/akka-samples/akka-sample-fsm-scala/src/main/scala/sample/fsm/DiningHakkersOnFsm.scala b/akka-samples/akka-sample-fsm-scala/src/main/scala/sample/fsm/DiningHakkersOnFsm.scala deleted file mode 100644 index 5df85df01f..0000000000 --- a/akka-samples/akka-sample-fsm-scala/src/main/scala/sample/fsm/DiningHakkersOnFsm.scala +++ /dev/null @@ -1,183 +0,0 @@ -package sample.fsm - -import akka.actor._ -import akka.actor.FSM._ -import scala.concurrent.duration._ - -// Akka adaptation of -// http://www.dalnefre.com/wp/2010/08/dining-philosophers-in-humus/ - -/* -* Some messages for the chopstick -*/ -sealed trait ChopstickMessage -object Take extends ChopstickMessage -object Put extends ChopstickMessage -final case class Taken(chopstick: ActorRef) extends ChopstickMessage -final case class Busy(chopstick: ActorRef) extends ChopstickMessage - -/** - * Some states the chopstick can be in - */ -sealed trait ChopstickState -case object Available extends ChopstickState -case object Taken extends ChopstickState - -/** - * Some state container for the chopstick - */ -final case class TakenBy(hakker: ActorRef) - -/* -* A chopstick is an actor, it can be taken, and put back -*/ -class Chopstick extends Actor with FSM[ChopstickState, TakenBy] { - import context._ - - // A chopstick begins its existence as available and taken by no one - startWith(Available, TakenBy(system.deadLetters)) - - // When a chopstick is available, it can be taken by a some hakker - when(Available) { - case Event(Take, _) => - goto(Taken) using TakenBy(sender()) replying Taken(self) - } - - // When a chopstick is taken by a hakker - // It will refuse to be taken by other hakkers - // But the owning hakker can put it back - when(Taken) { - case Event(Take, currentState) => - stay replying Busy(self) - case Event(Put, TakenBy(hakker)) if sender() == hakker => - goto(Available) using TakenBy(system.deadLetters) - } - - // Initialize the chopstick - initialize() -} - -/** - * Some fsm hakker messages - */ -sealed trait FSMHakkerMessage -object Think extends FSMHakkerMessage - -/** - * Some fsm hakker states - */ -sealed trait FSMHakkerState -case object Waiting extends FSMHakkerState -case object Thinking extends FSMHakkerState -case object Hungry extends FSMHakkerState -case object WaitForOtherChopstick extends FSMHakkerState -case object FirstChopstickDenied extends FSMHakkerState -case object Eating extends FSMHakkerState - -/** - * Some state container to keep track of which chopsticks we have - */ -final case class TakenChopsticks(left: Option[ActorRef], right: Option[ActorRef]) - -/* -* A fsm hakker is an awesome dude or dudette who either thinks about hacking or has to eat ;-) -*/ -class FSMHakker(name: String, left: ActorRef, right: ActorRef) extends Actor with FSM[FSMHakkerState, TakenChopsticks] { - - //All hakkers start waiting - startWith(Waiting, TakenChopsticks(None, None)) - - when(Waiting) { - case Event(Think, _) => - println("%s starts to think".format(name)) - startThinking(5.seconds) - } - - //When a hakker is thinking it can become hungry - //and try to pick up its chopsticks and eat - when(Thinking) { - case Event(StateTimeout, _) => - left ! Take - right ! Take - goto(Hungry) - } - - // When a hakker is hungry it tries to pick up its chopsticks and eat - // When it picks one up, it goes into wait for the other - // If the hakkers first attempt at grabbing a chopstick fails, - // it starts to wait for the response of the other grab - when(Hungry) { - case Event(Taken(`left`), _) => - goto(WaitForOtherChopstick) using TakenChopsticks(Some(left), None) - case Event(Taken(`right`), _) => - goto(WaitForOtherChopstick) using TakenChopsticks(None, Some(right)) - case Event(Busy(_), _) => - goto(FirstChopstickDenied) - } - - // When a hakker is waiting for the last chopstick it can either obtain it - // and start eating, or the other chopstick was busy, and the hakker goes - // back to think about how he should obtain his chopsticks :-) - when(WaitForOtherChopstick) { - case Event(Taken(`left`), TakenChopsticks(None, Some(right))) => startEating(left, right) - case Event(Taken(`right`), TakenChopsticks(Some(left), None)) => startEating(left, right) - case Event(Busy(chopstick), TakenChopsticks(leftOption, rightOption)) => - leftOption.foreach(_ ! Put) - rightOption.foreach(_ ! Put) - startThinking(10.milliseconds) - } - - private def startEating(left: ActorRef, right: ActorRef): State = { - println("%s has picked up %s and %s and starts to eat".format(name, left.path.name, right.path.name)) - goto(Eating) using TakenChopsticks(Some(left), Some(right)) forMax (5.seconds) - } - - // When the results of the other grab comes back, - // he needs to put it back if he got the other one. - // Then go back and think and try to grab the chopsticks again - when(FirstChopstickDenied) { - case Event(Taken(secondChopstick), _) => - secondChopstick ! Put - startThinking(10.milliseconds) - case Event(Busy(chopstick), _) => - startThinking(10.milliseconds) - } - - // When a hakker is eating, he can decide to start to think, - // then he puts down his chopsticks and starts to think - when(Eating) { - case Event(StateTimeout, _) => - println("%s puts down his chopsticks and starts to think".format(name)) - left ! Put - right ! Put - startThinking(5.seconds) - } - - // Initialize the hakker - initialize() - - private def startThinking(duration: FiniteDuration): State = { - goto(Thinking) using TakenChopsticks(None, None) forMax duration - } -} - -/* -* Alright, here's our test-harness -*/ -object DiningHakkersOnFsm { - - val system = ActorSystem() - - def main(args: Array[String]): Unit = run() - - def run(): Unit = { - // Create 5 chopsticks - val chopsticks = for (i <- 1 to 5) yield system.actorOf(Props[Chopstick], "Chopstick" + i) - // Create 5 awesome fsm hakkers and assign them their left and right chopstick - val hakkers = for { - (name, i) <- List("Ghosh", "Boner", "Klang", "Krasser", "Manie").zipWithIndex - } yield system.actorOf(Props(classOf[FSMHakker], name, chopsticks(i), chopsticks((i + 1) % 5))) - - hakkers.foreach(_ ! Think) - } -} diff --git a/akka-samples/akka-sample-fsm-scala/src/main/scala/sample/redelivery/FsmSimpleRedelivery.scala b/akka-samples/akka-sample-fsm-scala/src/main/scala/sample/redelivery/FsmSimpleRedelivery.scala deleted file mode 100644 index 2f58fbed1a..0000000000 --- a/akka-samples/akka-sample-fsm-scala/src/main/scala/sample/redelivery/FsmSimpleRedelivery.scala +++ /dev/null @@ -1,223 +0,0 @@ -/** - * Copyright (C) 2009-2017 Lightbend Inc. - */ - -package sample.redelivery - -import akka.actor._ -import scala.concurrent.duration._ -import java.util.concurrent.ThreadLocalRandom -import java.util.UUID - -object SimpleOrderedRedeliverer { - /** - * Props for creating a [[SimpleOrderedRedeliverer]]. - */ - def props(retryTimeout: FiniteDuration) = Props(classOf[SimpleOrderedRedeliverer], retryTimeout) - - /* - * Messages exchanged with the requester of the delivery. - */ - case class Deliver(to: ActorRef, msg: Any, uuid: UUID) - case class Delivered(uuid: UUID) - case class AcceptedForDelivery(uuid: UUID) - case class Busy(refused: UUID, currentlyProcessing: UUID) - - /* - * Messages exchanged with the “deliveree”. - */ - case class Ackable(from: ActorRef, msg: Any, uuid: UUID) - case class Ack(uuid: UUID) - - /* - * Various states the [[SimpleOrderedRedeliverer]] can be in. - */ - sealed trait State - case object Idle extends State - case object AwaitingAck extends State - - sealed trait Data - case object NoData extends Data - - /** - * Keeps track of our last delivery request. - */ - case class LastRequest(last: Deliver, requester: ActorRef) extends Data - - /** - * Private message used only inside of the [[SimpleOrderedRedeliverer]] to signalize a tick of its retry timer. - */ - private case object Retry -} - -/** - * An actor-in-the-middle kind. Takes care of message redelivery between two or more sides. - * - * Works “sequentially”, thus is able to process only one message at a time: - * - *
- *   Delivery-request#1 -> ACK#1 -> Delivery-request#2 -> ACK#2 -> ...
- * 
- * - * A situation like this: - * - *
- *   Delivery-request#1 -> Delivery-request#2 -> ...
- * 
- * - * ... will result in the second requester getting a [[SimpleOrderedRedeliverer.Busy]] message with [[UUID]]s - * of both his request and currently-processed one. - * - * @param retryTimeout how long to wait for the [[SimpleOrderedRedeliverer.Ack]] message - */ -class SimpleOrderedRedeliverer(retryTimeout: FiniteDuration) extends Actor with FSM[SimpleOrderedRedeliverer.State, SimpleOrderedRedeliverer.Data] { - import SimpleOrderedRedeliverer._ - - // So that we don't make a typo when referencing this timer. - val RetryTimer = "retry" - - // Start idle with neither last request, nor most recent requester. - startWith(Idle, NoData) - - /** - * Will process the provided request, sending an [[Ackable]] to its recipient and resetting the inner timer. - * @return a new post-processing state. - */ - def process(request: Deliver, requester: ActorRef): State = { - request.to ! Ackable(requester, request.msg, request.uuid) - setTimer(RetryTimer, Retry, retryTimeout, repeat = false) - goto(AwaitingAck) using LastRequest(request, requester) - } - - /* - * When [[Idle]], accept new requests and process them, replying with [[WillTry]]. - */ - when(Idle) { - case Event(request: Deliver, _) => - process(request, sender()) replying AcceptedForDelivery(request.uuid) - } - - when(AwaitingAck) { - - /* - * When awaiting the [[Ack]] and receiver seems not to have made it, - * resend the message wrapped in [[Ackable]]. This time, however, without - * sending [[WillTry]] to our requester! - */ - case Event(Retry, LastRequest(request, requester)) => - process(request, requester) - - /* - * Fortunately, the receiver made it! It his is an [[Ack]] of correct [[UUID]], - * cancel the retry timer, notify original requester with [[Delivered]] message, - * and turn [[Idle]] again. - */ - case Event(Ack(uuid), LastRequest(request, requester)) if uuid == request.uuid => - cancelTimer(RetryTimer) - requester ! Delivered(uuid) - goto(Idle) using NoData - - /* - * Someone (possibly else!) is trying to make the [[SimpleOrderedRedeliverer]] deliver a new message, - * while an [[Ack]] for the last one has not yet been delivered. Reject. - */ - case Event(request: Deliver, LastRequest(current, _)) => - stay() replying Busy(request.uuid, current.uuid) - } - -} - -object Receiver { - /** - * Props for creating a [[Receiver]]. - */ - def props = Props(classOf[Receiver]) -} - -class Receiver extends Actor { - /** - * Simulate losing 75% of all messages on the receiving end. We want to see the redelivery in action! - */ - def shouldSendAck = ThreadLocalRandom.current.nextDouble() < 0.25 - - def receive = { - case SimpleOrderedRedeliverer.Ackable(from, msg, uuid) => - val goingToSendAck = shouldSendAck - println(s""" [Receiver] got "$msg"; ${if (goingToSendAck) "" else " ***NOT***"} going to send Ack this time""") - // Send a [[SimpleOrderedRedeliverer.Ack]] -- if they're lucky! - if (goingToSendAck) sender() ! SimpleOrderedRedeliverer.Ack(uuid) - } -} - -object Requester { - /** - * Props for creating a [[Requester]]. - */ - def props = Props(classOf[Requester]) - - /** - * Requester-private message used to drive the simulation. - */ - private case object Tick -} - -class Requester extends Actor { - import Requester._ - import context.dispatcher - - /* - * Create a [[SimpleOrderedRedeliverer]] and a [[Receiver]]. - */ - val redeliverer = context.actorOf(SimpleOrderedRedeliverer.props(retryTimeout = 3.seconds)) - val receiver = context.actorOf(Receiver.props) - - /* - * One message would be quite boring, let's pick a random of the three! - */ - val messages = List("Hello!", "Ping!", "Howdy!") - - /* - * Start ticking! - */ - self ! Tick - - /** - * Make a new request every anywhere-between-1-and-10 seconds. - */ - def nextTickIn: FiniteDuration = (1.0 + ThreadLocalRandom.current.nextDouble() * 9.0).seconds - - def receive = { - case Tick => - val msg = util.Random.shuffle(messages).head - val uuid = UUID.randomUUID() - println(s"""[Requester] requesting ("$msg", $uuid) to be sent to [Receiver]...""") - - /* - * Make the actual request... - */ - redeliverer ! SimpleOrderedRedeliverer.Deliver(receiver, msg, uuid) - - /* - * ... and schedule a new [[Tick]]. - */ - context.system.scheduler.scheduleOnce(nextTickIn, self, Tick) - - /* - * This case is used for displaying [[SimpleOrderedRedeliverer.WillTry]] and [[SimpleOrderedRedeliverer.Delivered]] - * and [[SimpleOrderedRedeliverer.Busy]] messages. - */ - case msg => println(s"[Requester] got $msg") - } - -} - -object FsmSimpleRedelivery extends App { - - val system = ActorSystem() - - /* - * Start a new [[Requester]] actor. - */ - system.actorOf(Requester.props) - -} diff --git a/akka-samples/akka-sample-fsm-scala/tutorial/index.html b/akka-samples/akka-sample-fsm-scala/tutorial/index.html deleted file mode 100644 index 6ad8955fc2..0000000000 --- a/akka-samples/akka-sample-fsm-scala/tutorial/index.html +++ /dev/null @@ -1,94 +0,0 @@ - - - Akka FSM in Scala - - - - -
-

Finite State Machine in Actors

- -

-This sample is an adaptation of -Dining Hakkers. -It illustrates how state and behavior can be managed within -an Actor with two different approaches; using become and using -the FSM trait. -The sample also contains an implementation of a simple redelivering actor implemented as a FSM. -

- - -
-
- -

Dining Hakkers with Become

- -

-Open DiningHakkersOnBecome.scala. -

- -

-It illustrates how current behavior can be replaced with context.become. -Note that no var members are used, instead the state is encoded in the current -behavior and its parameters. -

- -

-Go to the Run tab, and start the application's main class -sample.become.DiningHakkersOnBecome. -In the log output you can see the actions of the Hakker actors. -

- -

-Read more about become in -the documentation. -

- -
-
- -

Dining Hakkers with FSM

- -

-Open DiningHakkersOnFsm.scala. -

- -

-It illustrates how the states and transitions can be defined with the akka.actor.FSM trait. -

- -

-Go to the Run tab, and start the application's main class -sample.fsm.DiningHakkersOnFsm. -In the log output you can see the actions of the Hakker actors. -

- -

-Read more about akka.actor.FSM in -the documentation. -

- -
-
- -

Simple redelivering FSM

- -

-Open FsmSimpleRedelivery.scala. -

- -

-It illustrates how you can take care of message redelivery between two or more sides. -This implementation is able to process only one message at a time. -

- -

-Go to the Run tab, and start the application's main class -sample.redelivery.FsmSimpleRedelivery. -In the log output you can see the actions of the Requester and the Receiver actors. -

- -
- - - diff --git a/akka-samples/akka-sample-main-java-lambda/COPYING b/akka-samples/akka-sample-main-java-lambda/COPYING deleted file mode 100644 index 0e259d42c9..0000000000 --- a/akka-samples/akka-sample-main-java-lambda/COPYING +++ /dev/null @@ -1,121 +0,0 @@ -Creative Commons Legal Code - -CC0 1.0 Universal - - CREATIVE COMMONS CORPORATION IS NOT A LAW FIRM AND DOES NOT PROVIDE - LEGAL SERVICES. DISTRIBUTION OF THIS DOCUMENT DOES NOT CREATE AN - ATTORNEY-CLIENT RELATIONSHIP. CREATIVE COMMONS PROVIDES THIS - INFORMATION ON AN "AS-IS" BASIS. CREATIVE COMMONS MAKES NO WARRANTIES - REGARDING THE USE OF THIS DOCUMENT OR THE INFORMATION OR WORKS - PROVIDED HEREUNDER, AND DISCLAIMS LIABILITY FOR DAMAGES RESULTING FROM - THE USE OF THIS DOCUMENT OR THE INFORMATION OR WORKS PROVIDED - HEREUNDER. - -Statement of Purpose - -The laws of most jurisdictions throughout the world automatically confer -exclusive Copyright and Related Rights (defined below) upon the creator -and subsequent owner(s) (each and all, an "owner") of an original work of -authorship and/or a database (each, a "Work"). - -Certain owners wish to permanently relinquish those rights to a Work for -the purpose of contributing to a commons of creative, cultural and -scientific works ("Commons") that the public can reliably and without fear -of later claims of infringement build upon, modify, incorporate in other -works, reuse and redistribute as freely as possible in any form whatsoever -and for any purposes, including without limitation commercial purposes. -These owners may contribute to the Commons to promote the ideal of a free -culture and the further production of creative, cultural and scientific -works, or to gain reputation or greater distribution for their Work in -part through the use and efforts of others. - -For these and/or other purposes and motivations, and without any -expectation of additional consideration or compensation, the person -associating CC0 with a Work (the "Affirmer"), to the extent that he or she -is an owner of Copyright and Related Rights in the Work, voluntarily -elects to apply CC0 to the Work and publicly distribute the Work under its -terms, with knowledge of his or her Copyright and Related Rights in the -Work and the meaning and intended legal effect of CC0 on those rights. - -1. Copyright and Related Rights. A Work made available under CC0 may be -protected by copyright and related or neighboring rights ("Copyright and -Related Rights"). Copyright and Related Rights include, but are not -limited to, the following: - - i. the right to reproduce, adapt, distribute, perform, display, - communicate, and translate a Work; - ii. moral rights retained by the original author(s) and/or performer(s); -iii. publicity and privacy rights pertaining to a person's image or - likeness depicted in a Work; - iv. rights protecting against unfair competition in regards to a Work, - subject to the limitations in paragraph 4(a), below; - v. rights protecting the extraction, dissemination, use and reuse of data - in a Work; - vi. database rights (such as those arising under Directive 96/9/EC of the - European Parliament and of the Council of 11 March 1996 on the legal - protection of databases, and under any national implementation - thereof, including any amended or successor version of such - directive); and -vii. other similar, equivalent or corresponding rights throughout the - world based on applicable law or treaty, and any national - implementations thereof. - -2. Waiver. To the greatest extent permitted by, but not in contravention -of, applicable law, Affirmer hereby overtly, fully, permanently, -irrevocably and unconditionally waives, abandons, and surrenders all of -Affirmer's Copyright and Related Rights and associated claims and causes -of action, whether now known or unknown (including existing as well as -future claims and causes of action), in the Work (i) in all territories -worldwide, (ii) for the maximum duration provided by applicable law or -treaty (including future time extensions), (iii) in any current or future -medium and for any number of copies, and (iv) for any purpose whatsoever, -including without limitation commercial, advertising or promotional -purposes (the "Waiver"). Affirmer makes the Waiver for the benefit of each -member of the public at large and to the detriment of Affirmer's heirs and -successors, fully intending that such Waiver shall not be subject to -revocation, rescission, cancellation, termination, or any other legal or -equitable action to disrupt the quiet enjoyment of the Work by the public -as contemplated by Affirmer's express Statement of Purpose. - -3. Public License Fallback. Should any part of the Waiver for any reason -be judged legally invalid or ineffective under applicable law, then the -Waiver shall be preserved to the maximum extent permitted taking into -account Affirmer's express Statement of Purpose. In addition, to the -extent the Waiver is so judged Affirmer hereby grants to each affected -person a royalty-free, non transferable, non sublicensable, non exclusive, -irrevocable and unconditional license to exercise Affirmer's Copyright and -Related Rights in the Work (i) in all territories worldwide, (ii) for the -maximum duration provided by applicable law or treaty (including future -time extensions), (iii) in any current or future medium and for any number -of copies, and (iv) for any purpose whatsoever, including without -limitation commercial, advertising or promotional purposes (the -"License"). The License shall be deemed effective as of the date CC0 was -applied by Affirmer to the Work. Should any part of the License for any -reason be judged legally invalid or ineffective under applicable law, such -partial invalidity or ineffectiveness shall not invalidate the remainder -of the License, and in such case Affirmer hereby affirms that he or she -will not (i) exercise any of his or her remaining Copyright and Related -Rights in the Work or (ii) assert any associated claims and causes of -action with respect to the Work, in either case contrary to Affirmer's -express Statement of Purpose. - -4. Limitations and Disclaimers. - - a. No trademark or patent rights held by Affirmer are waived, abandoned, - surrendered, licensed or otherwise affected by this document. - b. Affirmer offers the Work as-is and makes no representations or - warranties of any kind concerning the Work, express, implied, - statutory or otherwise, including without limitation warranties of - title, merchantability, fitness for a particular purpose, non - infringement, or the absence of latent or other defects, accuracy, or - the present or absence of errors, whether or not discoverable, all to - the greatest extent permissible under applicable law. - c. Affirmer disclaims responsibility for clearing rights of other persons - that may apply to the Work or any use thereof, including without - limitation any person's Copyright and Related Rights in the Work. - Further, Affirmer disclaims responsibility for obtaining any necessary - consents, permissions or other rights required for any use of the - Work. - d. Affirmer understands and acknowledges that Creative Commons is not a - party to this document and has no duty or obligation with respect to - this CC0 or use of the Work. diff --git a/akka-samples/akka-sample-main-java-lambda/LICENSE b/akka-samples/akka-sample-main-java-lambda/LICENSE deleted file mode 100644 index 2a18b45589..0000000000 --- a/akka-samples/akka-sample-main-java-lambda/LICENSE +++ /dev/null @@ -1,10 +0,0 @@ -Activator Template by Lightbend - -Licensed under Public Domain (CC0) - -To the extent possible under law, the person who associated CC0 with -this Activator Tempate has waived all copyright and related or neighboring -rights to this Activator Template. - -You should have received a copy of the CC0 legalcode along with this -work. If not, see . diff --git a/akka-samples/akka-sample-main-java-lambda/activator.properties b/akka-samples/akka-sample-main-java-lambda/activator.properties deleted file mode 100644 index 4a09a754df..0000000000 --- a/akka-samples/akka-sample-main-java-lambda/activator.properties +++ /dev/null @@ -1,7 +0,0 @@ -name=akka-sample-main-java-lambda -title=Akka Main in Java with Lambdas -description=Actor based version of obligatory Hello World program using the generic launcher class akka.Main. -tags=Basics,akka,java,java8,starter -authorName=Akka Team -authorLink=http://akka.io/ -sourceLink=https://github.com/akka/akka diff --git a/akka-samples/akka-sample-main-java-lambda/build.sbt b/akka-samples/akka-sample-main-java-lambda/build.sbt deleted file mode 100644 index 45e61a20d5..0000000000 --- a/akka-samples/akka-sample-main-java-lambda/build.sbt +++ /dev/null @@ -1,11 +0,0 @@ -name := "akka-sample-main-java-lambda" - -version := "2.5-SNAPSHOT" - -scalaVersion := "2.11.8" - -libraryDependencies ++= Seq( - "com.typesafe.akka" %% "akka-actor" % "2.5-SNAPSHOT" -) - -licenses := Seq(("CC0", url("http://creativecommons.org/publicdomain/zero/1.0"))) diff --git a/akka-samples/akka-sample-main-java-lambda/pom.xml b/akka-samples/akka-sample-main-java-lambda/pom.xml deleted file mode 100644 index 5c817f16fb..0000000000 --- a/akka-samples/akka-sample-main-java-lambda/pom.xml +++ /dev/null @@ -1,41 +0,0 @@ - - - - 4.0.0 - akka-sample-main-java-lambda - com.typesafe.akka.samples - Akka Main in Java - 2.5-SNAPSHOT - - - UTF-8 - - - - - com.typesafe.akka - akka-actor_2.11 - 2.5-SNAPSHOT - - - - - - - org.apache.maven.plugins - maven-compiler-plugin - 3.1 - - 1.8 - 1.8 - true - - -Xlint - - - - - - diff --git a/akka-samples/akka-sample-main-java-lambda/project/build.properties b/akka-samples/akka-sample-main-java-lambda/project/build.properties deleted file mode 100644 index 748703f770..0000000000 --- a/akka-samples/akka-sample-main-java-lambda/project/build.properties +++ /dev/null @@ -1 +0,0 @@ -sbt.version=0.13.7 diff --git a/akka-samples/akka-sample-main-java-lambda/src/main/java/sample/hello/Greeter.java b/akka-samples/akka-sample-main-java-lambda/src/main/java/sample/hello/Greeter.java deleted file mode 100644 index 4992a8ea84..0000000000 --- a/akka-samples/akka-sample-main-java-lambda/src/main/java/sample/hello/Greeter.java +++ /dev/null @@ -1,19 +0,0 @@ -package sample.hello; - -import akka.actor.AbstractActor; -import akka.japi.pf.ReceiveBuilder; - -public class Greeter extends AbstractActor { - - public static enum Msg { - GREET, DONE; - } - - public Greeter() { - receive(ReceiveBuilder. - matchEquals(Msg.GREET, m -> { - System.out.println("Hello World!"); - sender().tell(Msg.DONE, self()); - }).build()); - } -} diff --git a/akka-samples/akka-sample-main-java-lambda/src/main/java/sample/hello/HelloWorld.java b/akka-samples/akka-sample-main-java-lambda/src/main/java/sample/hello/HelloWorld.java deleted file mode 100644 index 1be9dcc28c..0000000000 --- a/akka-samples/akka-sample-main-java-lambda/src/main/java/sample/hello/HelloWorld.java +++ /dev/null @@ -1,26 +0,0 @@ -package sample.hello; - -import akka.actor.AbstractActor; -import akka.actor.Props; -import akka.actor.ActorRef; -import akka.japi.pf.ReceiveBuilder; -import static sample.hello.Greeter.Msg; - -public class HelloWorld extends AbstractActor { - - public HelloWorld() { - receive(ReceiveBuilder. - matchEquals(Msg.DONE, m -> { - // when the greeter is done, stop this actor and with it the application - context().stop(self()); - }).build()); - } - - @Override - public void preStart() { - // create the greeter actor - final ActorRef greeter = getContext().actorOf(Props.create(Greeter.class), "greeter"); - // tell it to perform the greeting - greeter.tell(Greeter.Msg.GREET, self()); - } -} diff --git a/akka-samples/akka-sample-main-java-lambda/src/main/java/sample/hello/Main.java b/akka-samples/akka-sample-main-java-lambda/src/main/java/sample/hello/Main.java deleted file mode 100644 index f96fbcd486..0000000000 --- a/akka-samples/akka-sample-main-java-lambda/src/main/java/sample/hello/Main.java +++ /dev/null @@ -1,8 +0,0 @@ -package sample.hello; - -public class Main { - - public static void main(String[] args) { - akka.Main.main(new String[] { HelloWorld.class.getName() }); - } -} diff --git a/akka-samples/akka-sample-main-java-lambda/src/main/java/sample/hello/Main2.java b/akka-samples/akka-sample-main-java-lambda/src/main/java/sample/hello/Main2.java deleted file mode 100644 index e5618086e7..0000000000 --- a/akka-samples/akka-sample-main-java-lambda/src/main/java/sample/hello/Main2.java +++ /dev/null @@ -1,28 +0,0 @@ -package sample.hello; - -import akka.actor.*; -import akka.japi.pf.ReceiveBuilder; - -public class Main2 { - - public static void main(String[] args) { - ActorSystem system = ActorSystem.create("Hello"); - ActorRef a = system.actorOf(Props.create(HelloWorld.class), "helloWorld"); - system.actorOf(Props.create(Terminator.class, a), "terminator"); - } - - public static class Terminator extends AbstractLoggingActor { - - private final ActorRef ref; - - public Terminator(ActorRef ref) { - this.ref = ref; - getContext().watch(ref); - receive(ReceiveBuilder. - match(Terminated.class, t -> { - log().info("{} has terminated, shutting down system", ref.path()); - context().system().terminate(); - }).build()); - } - } -} diff --git a/akka-samples/akka-sample-main-java-lambda/src/main/resources/application.conf b/akka-samples/akka-sample-main-java-lambda/src/main/resources/application.conf deleted file mode 100644 index 26cc28fc6b..0000000000 --- a/akka-samples/akka-sample-main-java-lambda/src/main/resources/application.conf +++ /dev/null @@ -1,3 +0,0 @@ -akka { - loglevel = INFO -} diff --git a/akka-samples/akka-sample-main-java-lambda/tutorial/index.html b/akka-samples/akka-sample-main-java-lambda/tutorial/index.html deleted file mode 100644 index 809eb33606..0000000000 --- a/akka-samples/akka-sample-main-java-lambda/tutorial/index.html +++ /dev/null @@ -1,119 +0,0 @@ - - - The Obligatory Hello World - - - - -
-

The Obligatory Hello World

- -

-Since every programming paradigm needs to solve the tough problem of printing a -well-known greeting to the console we’ll introduce you to the actor-based -version. -

- -

-Open HelloWorld.java -

- -

-The HelloWorld actor is the application’s “main” class; when it terminates -the application will shut down—more on that later. The main business logic -happens in the preStart method, where a Greeter actor is created -and instructed to issue that greeting we crave for. When the greeter is done it -will tell us so by sending back a message, and when that message has been -received it will be passed into the behavior described by the onReceive -method where we can conclude the demonstration by stopping the HelloWorld -actor. -

- -
-
- -

The Greeter

- -

-You will be very curious to see how the Greeter actor performs the -actual task. Open Greeter.java. -

- -

- -This is extremely simple now: after its creation this actor will not do -anything until someone sends it a message, and if that happens to be an -invitation to greet the world then the Greeter complies and informs the -requester that the deed has been done. -

- -
-
- -

Main class

- -

-Go to the Run tab, and start the application main class -sample.hello.Main. In the log output you can see the "Hello World!" greeting. -

- -

-Main.java -is actually just a small wrapper around the generic launcher class akka.Main, -which expects only one argument: the class name of the application’s main actor. This main -method will then create the infrastructure needed for running the actors, start the -given main actor and arrange for the whole application to shut down once the -main actor terminates. Thus you will be able to run the application with a -command similar to the following: -

- -

-java -classpath  akka.Main sample.hello.HelloWorld
-
- -

-This conveniently assumes placement of the above class definitions in package -sample.hello and it further assumes that you have the required JAR files for -scala-library, typesafe-config and akka-actor available. -The easiest would be to manage these dependencies with a -build tool. -

- -

-If you need more control of the startup code than what is provided by akka.Main -you can easily write your own main class such as -Main2.java -

- -

-Try to run the sample.hello.Main2 class -by selecting it in the 'Main class' menu in the Run tab. -

- -
- -
- -

Run with Maven

- -

-This sample also includes a Maven pom.xml. -

- -

-You can run the main classes with mvn from a terminal window using the -Exec Maven Plugin. -

- -

-mvn compile exec:java -Dexec.mainClass="akka.Main" -Dexec.args="sample.hello.HelloWorld"
-
- -

-mvn compile exec:java -Dexec.mainClass="sample.hello.Main2"
-
- -
- - - diff --git a/akka-samples/akka-sample-main-java/COPYING b/akka-samples/akka-sample-main-java/COPYING deleted file mode 100644 index 0e259d42c9..0000000000 --- a/akka-samples/akka-sample-main-java/COPYING +++ /dev/null @@ -1,121 +0,0 @@ -Creative Commons Legal Code - -CC0 1.0 Universal - - CREATIVE COMMONS CORPORATION IS NOT A LAW FIRM AND DOES NOT PROVIDE - LEGAL SERVICES. DISTRIBUTION OF THIS DOCUMENT DOES NOT CREATE AN - ATTORNEY-CLIENT RELATIONSHIP. CREATIVE COMMONS PROVIDES THIS - INFORMATION ON AN "AS-IS" BASIS. CREATIVE COMMONS MAKES NO WARRANTIES - REGARDING THE USE OF THIS DOCUMENT OR THE INFORMATION OR WORKS - PROVIDED HEREUNDER, AND DISCLAIMS LIABILITY FOR DAMAGES RESULTING FROM - THE USE OF THIS DOCUMENT OR THE INFORMATION OR WORKS PROVIDED - HEREUNDER. - -Statement of Purpose - -The laws of most jurisdictions throughout the world automatically confer -exclusive Copyright and Related Rights (defined below) upon the creator -and subsequent owner(s) (each and all, an "owner") of an original work of -authorship and/or a database (each, a "Work"). - -Certain owners wish to permanently relinquish those rights to a Work for -the purpose of contributing to a commons of creative, cultural and -scientific works ("Commons") that the public can reliably and without fear -of later claims of infringement build upon, modify, incorporate in other -works, reuse and redistribute as freely as possible in any form whatsoever -and for any purposes, including without limitation commercial purposes. -These owners may contribute to the Commons to promote the ideal of a free -culture and the further production of creative, cultural and scientific -works, or to gain reputation or greater distribution for their Work in -part through the use and efforts of others. - -For these and/or other purposes and motivations, and without any -expectation of additional consideration or compensation, the person -associating CC0 with a Work (the "Affirmer"), to the extent that he or she -is an owner of Copyright and Related Rights in the Work, voluntarily -elects to apply CC0 to the Work and publicly distribute the Work under its -terms, with knowledge of his or her Copyright and Related Rights in the -Work and the meaning and intended legal effect of CC0 on those rights. - -1. Copyright and Related Rights. A Work made available under CC0 may be -protected by copyright and related or neighboring rights ("Copyright and -Related Rights"). Copyright and Related Rights include, but are not -limited to, the following: - - i. the right to reproduce, adapt, distribute, perform, display, - communicate, and translate a Work; - ii. moral rights retained by the original author(s) and/or performer(s); -iii. publicity and privacy rights pertaining to a person's image or - likeness depicted in a Work; - iv. rights protecting against unfair competition in regards to a Work, - subject to the limitations in paragraph 4(a), below; - v. rights protecting the extraction, dissemination, use and reuse of data - in a Work; - vi. database rights (such as those arising under Directive 96/9/EC of the - European Parliament and of the Council of 11 March 1996 on the legal - protection of databases, and under any national implementation - thereof, including any amended or successor version of such - directive); and -vii. other similar, equivalent or corresponding rights throughout the - world based on applicable law or treaty, and any national - implementations thereof. - -2. Waiver. To the greatest extent permitted by, but not in contravention -of, applicable law, Affirmer hereby overtly, fully, permanently, -irrevocably and unconditionally waives, abandons, and surrenders all of -Affirmer's Copyright and Related Rights and associated claims and causes -of action, whether now known or unknown (including existing as well as -future claims and causes of action), in the Work (i) in all territories -worldwide, (ii) for the maximum duration provided by applicable law or -treaty (including future time extensions), (iii) in any current or future -medium and for any number of copies, and (iv) for any purpose whatsoever, -including without limitation commercial, advertising or promotional -purposes (the "Waiver"). Affirmer makes the Waiver for the benefit of each -member of the public at large and to the detriment of Affirmer's heirs and -successors, fully intending that such Waiver shall not be subject to -revocation, rescission, cancellation, termination, or any other legal or -equitable action to disrupt the quiet enjoyment of the Work by the public -as contemplated by Affirmer's express Statement of Purpose. - -3. Public License Fallback. Should any part of the Waiver for any reason -be judged legally invalid or ineffective under applicable law, then the -Waiver shall be preserved to the maximum extent permitted taking into -account Affirmer's express Statement of Purpose. In addition, to the -extent the Waiver is so judged Affirmer hereby grants to each affected -person a royalty-free, non transferable, non sublicensable, non exclusive, -irrevocable and unconditional license to exercise Affirmer's Copyright and -Related Rights in the Work (i) in all territories worldwide, (ii) for the -maximum duration provided by applicable law or treaty (including future -time extensions), (iii) in any current or future medium and for any number -of copies, and (iv) for any purpose whatsoever, including without -limitation commercial, advertising or promotional purposes (the -"License"). The License shall be deemed effective as of the date CC0 was -applied by Affirmer to the Work. Should any part of the License for any -reason be judged legally invalid or ineffective under applicable law, such -partial invalidity or ineffectiveness shall not invalidate the remainder -of the License, and in such case Affirmer hereby affirms that he or she -will not (i) exercise any of his or her remaining Copyright and Related -Rights in the Work or (ii) assert any associated claims and causes of -action with respect to the Work, in either case contrary to Affirmer's -express Statement of Purpose. - -4. Limitations and Disclaimers. - - a. No trademark or patent rights held by Affirmer are waived, abandoned, - surrendered, licensed or otherwise affected by this document. - b. Affirmer offers the Work as-is and makes no representations or - warranties of any kind concerning the Work, express, implied, - statutory or otherwise, including without limitation warranties of - title, merchantability, fitness for a particular purpose, non - infringement, or the absence of latent or other defects, accuracy, or - the present or absence of errors, whether or not discoverable, all to - the greatest extent permissible under applicable law. - c. Affirmer disclaims responsibility for clearing rights of other persons - that may apply to the Work or any use thereof, including without - limitation any person's Copyright and Related Rights in the Work. - Further, Affirmer disclaims responsibility for obtaining any necessary - consents, permissions or other rights required for any use of the - Work. - d. Affirmer understands and acknowledges that Creative Commons is not a - party to this document and has no duty or obligation with respect to - this CC0 or use of the Work. diff --git a/akka-samples/akka-sample-main-java/LICENSE b/akka-samples/akka-sample-main-java/LICENSE deleted file mode 100644 index 2a18b45589..0000000000 --- a/akka-samples/akka-sample-main-java/LICENSE +++ /dev/null @@ -1,10 +0,0 @@ -Activator Template by Lightbend - -Licensed under Public Domain (CC0) - -To the extent possible under law, the person who associated CC0 with -this Activator Tempate has waived all copyright and related or neighboring -rights to this Activator Template. - -You should have received a copy of the CC0 legalcode along with this -work. If not, see . diff --git a/akka-samples/akka-sample-main-java/activator.properties b/akka-samples/akka-sample-main-java/activator.properties deleted file mode 100644 index 09f985bc52..0000000000 --- a/akka-samples/akka-sample-main-java/activator.properties +++ /dev/null @@ -1,7 +0,0 @@ -name=akka-sample-main-java -title=Akka Main in Java -description=Actor based version of obligatory Hello World program using the generic launcher class akka.Main. -tags=Basics,akka,java,starter -authorName=Akka Team -authorLink=http://akka.io/ -sourceLink=https://github.com/akka/akka diff --git a/akka-samples/akka-sample-main-java/build.sbt b/akka-samples/akka-sample-main-java/build.sbt deleted file mode 100644 index d451232985..0000000000 --- a/akka-samples/akka-sample-main-java/build.sbt +++ /dev/null @@ -1,11 +0,0 @@ -name := "akka-sample-main-java" - -version := "2.5-SNAPSHOT" - -scalaVersion := "2.11.8" - -libraryDependencies ++= Seq( - "com.typesafe.akka" %% "akka-actor" % "2.5-SNAPSHOT" -) - -licenses := Seq(("CC0", url("http://creativecommons.org/publicdomain/zero/1.0"))) diff --git a/akka-samples/akka-sample-main-java/pom.xml b/akka-samples/akka-sample-main-java/pom.xml deleted file mode 100644 index 0f83da61bf..0000000000 --- a/akka-samples/akka-sample-main-java/pom.xml +++ /dev/null @@ -1,24 +0,0 @@ - - - - 4.0.0 - akka-sample-main-java - com.typesafe.akka.samples - Akka Main in Java - 1.0 - - - UTF-8 - - - - - com.typesafe.akka - akka-actor_2.11 - 2.5-SNAPSHOT - - - - diff --git a/akka-samples/akka-sample-main-java/project/build.properties b/akka-samples/akka-sample-main-java/project/build.properties deleted file mode 100644 index 748703f770..0000000000 --- a/akka-samples/akka-sample-main-java/project/build.properties +++ /dev/null @@ -1 +0,0 @@ -sbt.version=0.13.7 diff --git a/akka-samples/akka-sample-main-java/src/main/java/sample/hello/Greeter.java b/akka-samples/akka-sample-main-java/src/main/java/sample/hello/Greeter.java deleted file mode 100644 index 7bcf2c2941..0000000000 --- a/akka-samples/akka-sample-main-java/src/main/java/sample/hello/Greeter.java +++ /dev/null @@ -1,20 +0,0 @@ -package sample.hello; - -import akka.actor.UntypedActor; - -public class Greeter extends UntypedActor { - - public static enum Msg { - GREET, DONE; - } - - @Override - public void onReceive(Object msg) { - if (msg == Msg.GREET) { - System.out.println("Hello World!"); - getSender().tell(Msg.DONE, getSelf()); - } else - unhandled(msg); - } - -} diff --git a/akka-samples/akka-sample-main-java/src/main/java/sample/hello/HelloWorld.java b/akka-samples/akka-sample-main-java/src/main/java/sample/hello/HelloWorld.java deleted file mode 100644 index 49e14ecf7a..0000000000 --- a/akka-samples/akka-sample-main-java/src/main/java/sample/hello/HelloWorld.java +++ /dev/null @@ -1,25 +0,0 @@ -package sample.hello; - -import akka.actor.Props; -import akka.actor.UntypedActor; -import akka.actor.ActorRef; - -public class HelloWorld extends UntypedActor { - - @Override - public void preStart() { - // create the greeter actor - final ActorRef greeter = getContext().actorOf(Props.create(Greeter.class), "greeter"); - // tell it to perform the greeting - greeter.tell(Greeter.Msg.GREET, getSelf()); - } - - @Override - public void onReceive(Object msg) { - if (msg == Greeter.Msg.DONE) { - // when the greeter is done, stop this actor and with it the application - getContext().stop(getSelf()); - } else - unhandled(msg); - } -} diff --git a/akka-samples/akka-sample-main-java/src/main/java/sample/hello/Main.java b/akka-samples/akka-sample-main-java/src/main/java/sample/hello/Main.java deleted file mode 100644 index f96fbcd486..0000000000 --- a/akka-samples/akka-sample-main-java/src/main/java/sample/hello/Main.java +++ /dev/null @@ -1,8 +0,0 @@ -package sample.hello; - -public class Main { - - public static void main(String[] args) { - akka.Main.main(new String[] { HelloWorld.class.getName() }); - } -} diff --git a/akka-samples/akka-sample-main-java/src/main/java/sample/hello/Main2.java b/akka-samples/akka-sample-main-java/src/main/java/sample/hello/Main2.java deleted file mode 100644 index 23754bc840..0000000000 --- a/akka-samples/akka-sample-main-java/src/main/java/sample/hello/Main2.java +++ /dev/null @@ -1,40 +0,0 @@ -package sample.hello; - -import akka.actor.ActorRef; -import akka.actor.ActorSystem; -import akka.actor.Props; -import akka.actor.Terminated; -import akka.actor.UntypedActor; -import akka.event.Logging; -import akka.event.LoggingAdapter; - -public class Main2 { - - public static void main(String[] args) { - ActorSystem system = ActorSystem.create("Hello"); - ActorRef a = system.actorOf(Props.create(HelloWorld.class), "helloWorld"); - system.actorOf(Props.create(Terminator.class, a), "terminator"); - } - - public static class Terminator extends UntypedActor { - - private final LoggingAdapter log = Logging.getLogger(getContext().system(), this); - private final ActorRef ref; - - public Terminator(ActorRef ref) { - this.ref = ref; - getContext().watch(ref); - } - - @Override - public void onReceive(Object msg) { - if (msg instanceof Terminated) { - log.info("{} has terminated, shutting down system", ref.path()); - getContext().system().terminate(); - } else { - unhandled(msg); - } - } - - } -} diff --git a/akka-samples/akka-sample-main-java/src/main/resources/application.conf b/akka-samples/akka-sample-main-java/src/main/resources/application.conf deleted file mode 100644 index 26cc28fc6b..0000000000 --- a/akka-samples/akka-sample-main-java/src/main/resources/application.conf +++ /dev/null @@ -1,3 +0,0 @@ -akka { - loglevel = INFO -} diff --git a/akka-samples/akka-sample-main-java/tutorial/index.html b/akka-samples/akka-sample-main-java/tutorial/index.html deleted file mode 100644 index 66b99d1a97..0000000000 --- a/akka-samples/akka-sample-main-java/tutorial/index.html +++ /dev/null @@ -1,119 +0,0 @@ - - - The Obligatory Hello World - - - - -
-

The Obligatory Hello World

- -

-Since every programming paradigm needs to solve the tough problem of printing a -well-known greeting to the console we’ll introduce you to the actor-based -version. -

- -

-Open HelloWorld.java -

- -

-The HelloWorld actor is the application’s “main” class; when it terminates -the application will shut down—more on that later. The main business logic -happens in the preStart method, where a Greeter actor is created -and instructed to issue that greeting we crave for. When the greeter is done it -will tell us so by sending back a message, and when that message has been -received it will be passed into the behavior described by the onReceive -method where we can conclude the demonstration by stopping the HelloWorld -actor. -

- -
-
- -

The Greeter

- -

-You will be very curious to see how the Greeter actor performs the -actual task. Open Greeter.java. -

- -

- -This is extremely simple now: after its creation this actor will not do -anything until someone sends it a message, and if that happens to be an -invitation to greet the world then the Greeter complies and informs the -requester that the deed has been done. -

- -
-
- -

Main class

- -

-Go to the Run tab, and start the application main class -sample.hello.Main. In the log output you can see the "Hello World!" greeting. -

- -

-Main.java -is actually just a small wrapper around the generic launcher class akka.Main, -which expects only one argument: the class name of the application’s main actor. This main -method will then create the infrastructure needed for running the actors, start the -given main actor and arrange for the whole application to shut down once the -main actor terminates. Thus you will be able to run the application with a -command similar to the following: -

- -

-java -classpath  akka.Main sample.hello.HelloWorld
-
- -

-This conveniently assumes placement of the above class definitions in package -sample.hello and it further assumes that you have the required JAR files for -scala-library, typesafe-config and akka-actor available. -The easiest would be to manage these dependencies with a -build tool. -

- -

-If you need more control of the startup code than what is provided by akka.Main -you can easily write your own main class such as -Main2.java -

- -

-Try to run the sample.hello.Main2 class -by selecting it in the 'Main class' menu in the Run tab. -

- -
- -
- -

Run with Maven

- -

-This sample also includes a Maven pom.xml. -

- -

-You can run the main classes with mvn from a terminal window using the -Exec Maven Plugin. -

- -

-mvn compile exec:java -Dexec.mainClass="akka.Main" -Dexec.args="sample.hello.HelloWorld"
-
- -

-mvn compile exec:java -Dexec.mainClass="sample.hello.Main2"
-
- -
- - - diff --git a/akka-samples/akka-sample-main-scala/COPYING b/akka-samples/akka-sample-main-scala/COPYING deleted file mode 100644 index 0e259d42c9..0000000000 --- a/akka-samples/akka-sample-main-scala/COPYING +++ /dev/null @@ -1,121 +0,0 @@ -Creative Commons Legal Code - -CC0 1.0 Universal - - CREATIVE COMMONS CORPORATION IS NOT A LAW FIRM AND DOES NOT PROVIDE - LEGAL SERVICES. DISTRIBUTION OF THIS DOCUMENT DOES NOT CREATE AN - ATTORNEY-CLIENT RELATIONSHIP. CREATIVE COMMONS PROVIDES THIS - INFORMATION ON AN "AS-IS" BASIS. CREATIVE COMMONS MAKES NO WARRANTIES - REGARDING THE USE OF THIS DOCUMENT OR THE INFORMATION OR WORKS - PROVIDED HEREUNDER, AND DISCLAIMS LIABILITY FOR DAMAGES RESULTING FROM - THE USE OF THIS DOCUMENT OR THE INFORMATION OR WORKS PROVIDED - HEREUNDER. - -Statement of Purpose - -The laws of most jurisdictions throughout the world automatically confer -exclusive Copyright and Related Rights (defined below) upon the creator -and subsequent owner(s) (each and all, an "owner") of an original work of -authorship and/or a database (each, a "Work"). - -Certain owners wish to permanently relinquish those rights to a Work for -the purpose of contributing to a commons of creative, cultural and -scientific works ("Commons") that the public can reliably and without fear -of later claims of infringement build upon, modify, incorporate in other -works, reuse and redistribute as freely as possible in any form whatsoever -and for any purposes, including without limitation commercial purposes. -These owners may contribute to the Commons to promote the ideal of a free -culture and the further production of creative, cultural and scientific -works, or to gain reputation or greater distribution for their Work in -part through the use and efforts of others. - -For these and/or other purposes and motivations, and without any -expectation of additional consideration or compensation, the person -associating CC0 with a Work (the "Affirmer"), to the extent that he or she -is an owner of Copyright and Related Rights in the Work, voluntarily -elects to apply CC0 to the Work and publicly distribute the Work under its -terms, with knowledge of his or her Copyright and Related Rights in the -Work and the meaning and intended legal effect of CC0 on those rights. - -1. Copyright and Related Rights. A Work made available under CC0 may be -protected by copyright and related or neighboring rights ("Copyright and -Related Rights"). Copyright and Related Rights include, but are not -limited to, the following: - - i. the right to reproduce, adapt, distribute, perform, display, - communicate, and translate a Work; - ii. moral rights retained by the original author(s) and/or performer(s); -iii. publicity and privacy rights pertaining to a person's image or - likeness depicted in a Work; - iv. rights protecting against unfair competition in regards to a Work, - subject to the limitations in paragraph 4(a), below; - v. rights protecting the extraction, dissemination, use and reuse of data - in a Work; - vi. database rights (such as those arising under Directive 96/9/EC of the - European Parliament and of the Council of 11 March 1996 on the legal - protection of databases, and under any national implementation - thereof, including any amended or successor version of such - directive); and -vii. other similar, equivalent or corresponding rights throughout the - world based on applicable law or treaty, and any national - implementations thereof. - -2. Waiver. To the greatest extent permitted by, but not in contravention -of, applicable law, Affirmer hereby overtly, fully, permanently, -irrevocably and unconditionally waives, abandons, and surrenders all of -Affirmer's Copyright and Related Rights and associated claims and causes -of action, whether now known or unknown (including existing as well as -future claims and causes of action), in the Work (i) in all territories -worldwide, (ii) for the maximum duration provided by applicable law or -treaty (including future time extensions), (iii) in any current or future -medium and for any number of copies, and (iv) for any purpose whatsoever, -including without limitation commercial, advertising or promotional -purposes (the "Waiver"). Affirmer makes the Waiver for the benefit of each -member of the public at large and to the detriment of Affirmer's heirs and -successors, fully intending that such Waiver shall not be subject to -revocation, rescission, cancellation, termination, or any other legal or -equitable action to disrupt the quiet enjoyment of the Work by the public -as contemplated by Affirmer's express Statement of Purpose. - -3. Public License Fallback. Should any part of the Waiver for any reason -be judged legally invalid or ineffective under applicable law, then the -Waiver shall be preserved to the maximum extent permitted taking into -account Affirmer's express Statement of Purpose. In addition, to the -extent the Waiver is so judged Affirmer hereby grants to each affected -person a royalty-free, non transferable, non sublicensable, non exclusive, -irrevocable and unconditional license to exercise Affirmer's Copyright and -Related Rights in the Work (i) in all territories worldwide, (ii) for the -maximum duration provided by applicable law or treaty (including future -time extensions), (iii) in any current or future medium and for any number -of copies, and (iv) for any purpose whatsoever, including without -limitation commercial, advertising or promotional purposes (the -"License"). The License shall be deemed effective as of the date CC0 was -applied by Affirmer to the Work. Should any part of the License for any -reason be judged legally invalid or ineffective under applicable law, such -partial invalidity or ineffectiveness shall not invalidate the remainder -of the License, and in such case Affirmer hereby affirms that he or she -will not (i) exercise any of his or her remaining Copyright and Related -Rights in the Work or (ii) assert any associated claims and causes of -action with respect to the Work, in either case contrary to Affirmer's -express Statement of Purpose. - -4. Limitations and Disclaimers. - - a. No trademark or patent rights held by Affirmer are waived, abandoned, - surrendered, licensed or otherwise affected by this document. - b. Affirmer offers the Work as-is and makes no representations or - warranties of any kind concerning the Work, express, implied, - statutory or otherwise, including without limitation warranties of - title, merchantability, fitness for a particular purpose, non - infringement, or the absence of latent or other defects, accuracy, or - the present or absence of errors, whether or not discoverable, all to - the greatest extent permissible under applicable law. - c. Affirmer disclaims responsibility for clearing rights of other persons - that may apply to the Work or any use thereof, including without - limitation any person's Copyright and Related Rights in the Work. - Further, Affirmer disclaims responsibility for obtaining any necessary - consents, permissions or other rights required for any use of the - Work. - d. Affirmer understands and acknowledges that Creative Commons is not a - party to this document and has no duty or obligation with respect to - this CC0 or use of the Work. diff --git a/akka-samples/akka-sample-main-scala/LICENSE b/akka-samples/akka-sample-main-scala/LICENSE deleted file mode 100644 index 2a18b45589..0000000000 --- a/akka-samples/akka-sample-main-scala/LICENSE +++ /dev/null @@ -1,10 +0,0 @@ -Activator Template by Lightbend - -Licensed under Public Domain (CC0) - -To the extent possible under law, the person who associated CC0 with -this Activator Tempate has waived all copyright and related or neighboring -rights to this Activator Template. - -You should have received a copy of the CC0 legalcode along with this -work. If not, see . diff --git a/akka-samples/akka-sample-main-scala/activator.properties b/akka-samples/akka-sample-main-scala/activator.properties deleted file mode 100644 index 64a40cfe84..0000000000 --- a/akka-samples/akka-sample-main-scala/activator.properties +++ /dev/null @@ -1,7 +0,0 @@ -name=akka-sample-main-scala -title=Akka Main in Scala -description=Actor based version of obligatory Hello World program using the generic launcher class akka.Main. -tags=Basics,akka,scala,starter -authorName=Akka Team -authorLink=http://akka.io/ -sourceLink=https://github.com/akka/akka diff --git a/akka-samples/akka-sample-main-scala/build.sbt b/akka-samples/akka-sample-main-scala/build.sbt deleted file mode 100644 index a2991dc290..0000000000 --- a/akka-samples/akka-sample-main-scala/build.sbt +++ /dev/null @@ -1,29 +0,0 @@ -import NativePackagerHelper._ - -name := "akka-sample-main-scala" - -version := "2.5-SNAPSHOT" - -scalaVersion := "2.11.8" - -libraryDependencies ++= Seq( - "com.typesafe.akka" %% "akka-actor" % "2.5-SNAPSHOT" -) - -enablePlugins(JavaServerAppPackaging) - -mainClass in Compile := Some("sample.hello.Main") - -mappings in Universal ++= { - // optional example illustrating how to copy additional directory - directory("scripts") ++ - // copy configuration files to config directory - contentOf("src/main/resources").toMap.mapValues("config/" + _) -} - -// add 'config' directory first in the classpath of the start script, -// an alternative is to set the config file locations via CLI parameters -// when starting the application -scriptClasspath := Seq("../config/") ++ scriptClasspath.value - -licenses := Seq(("CC0", url("http://creativecommons.org/publicdomain/zero/1.0"))) diff --git a/akka-samples/akka-sample-main-scala/project/build.properties b/akka-samples/akka-sample-main-scala/project/build.properties deleted file mode 100644 index 748703f770..0000000000 --- a/akka-samples/akka-sample-main-scala/project/build.properties +++ /dev/null @@ -1 +0,0 @@ -sbt.version=0.13.7 diff --git a/akka-samples/akka-sample-main-scala/project/plugins.sbt b/akka-samples/akka-sample-main-scala/project/plugins.sbt deleted file mode 100644 index e2cad14b4d..0000000000 --- a/akka-samples/akka-sample-main-scala/project/plugins.sbt +++ /dev/null @@ -1 +0,0 @@ -addSbtPlugin("com.typesafe.sbt" % "sbt-native-packager" % "1.0.0-RC1") diff --git a/akka-samples/akka-sample-main-scala/src/main/resources/application.conf b/akka-samples/akka-sample-main-scala/src/main/resources/application.conf deleted file mode 100644 index 26cc28fc6b..0000000000 --- a/akka-samples/akka-sample-main-scala/src/main/resources/application.conf +++ /dev/null @@ -1,3 +0,0 @@ -akka { - loglevel = INFO -} diff --git a/akka-samples/akka-sample-main-scala/src/main/scala/sample/hello/Greeter.scala b/akka-samples/akka-sample-main-scala/src/main/scala/sample/hello/Greeter.scala deleted file mode 100644 index 8d0b1ab39a..0000000000 --- a/akka-samples/akka-sample-main-scala/src/main/scala/sample/hello/Greeter.scala +++ /dev/null @@ -1,16 +0,0 @@ -package sample.hello - -import akka.actor.Actor - -object Greeter { - case object Greet - case object Done -} - -class Greeter extends Actor { - def receive = { - case Greeter.Greet => - println("Hello World!") - sender() ! Greeter.Done - } -} \ No newline at end of file diff --git a/akka-samples/akka-sample-main-scala/src/main/scala/sample/hello/HelloWorld.scala b/akka-samples/akka-sample-main-scala/src/main/scala/sample/hello/HelloWorld.scala deleted file mode 100644 index e52619b7d8..0000000000 --- a/akka-samples/akka-sample-main-scala/src/main/scala/sample/hello/HelloWorld.scala +++ /dev/null @@ -1,20 +0,0 @@ -package sample.hello - -import akka.actor.Actor -import akka.actor.Props - -class HelloWorld extends Actor { - - override def preStart(): Unit = { - // create the greeter actor - val greeter = context.actorOf(Props[Greeter], "greeter") - // tell it to perform the greeting - greeter ! Greeter.Greet - } - - def receive = { - // when the greeter is done, stop this actor and with it the application - case Greeter.Done => context.stop(self) - } -} - diff --git a/akka-samples/akka-sample-main-scala/src/main/scala/sample/hello/Main.scala b/akka-samples/akka-sample-main-scala/src/main/scala/sample/hello/Main.scala deleted file mode 100644 index 7f11453a31..0000000000 --- a/akka-samples/akka-sample-main-scala/src/main/scala/sample/hello/Main.scala +++ /dev/null @@ -1,9 +0,0 @@ -package sample.hello - -object Main { - - def main(args: Array[String]): Unit = { - akka.Main.main(Array(classOf[HelloWorld].getName)) - } - -} \ No newline at end of file diff --git a/akka-samples/akka-sample-main-scala/src/main/scala/sample/hello/Main2.scala b/akka-samples/akka-sample-main-scala/src/main/scala/sample/hello/Main2.scala deleted file mode 100644 index 1d79c65206..0000000000 --- a/akka-samples/akka-sample-main-scala/src/main/scala/sample/hello/Main2.scala +++ /dev/null @@ -1,27 +0,0 @@ -package sample.hello - -import akka.actor.ActorSystem -import akka.actor.Props -import akka.actor.ActorRef -import akka.actor.Actor -import akka.actor.ActorLogging -import akka.actor.Terminated - -object Main2 { - - def main(args: Array[String]): Unit = { - val system = ActorSystem("Hello") - val a = system.actorOf(Props[HelloWorld], "helloWorld") - system.actorOf(Props(classOf[Terminator], a), "terminator") - } - - class Terminator(ref: ActorRef) extends Actor with ActorLogging { - context watch ref - def receive = { - case Terminated(_) => - log.info("{} has terminated, shutting down system", ref.path) - context.system.terminate() - } - } - -} \ No newline at end of file diff --git a/akka-samples/akka-sample-main-scala/tutorial/index.html b/akka-samples/akka-sample-main-scala/tutorial/index.html deleted file mode 100644 index 404ee3754a..0000000000 --- a/akka-samples/akka-sample-main-scala/tutorial/index.html +++ /dev/null @@ -1,96 +0,0 @@ - - - The Obligatory Hello World - - - - -
-

The Obligatory Hello World

- -

-Since every programming paradigm needs to solve the tough problem of printing a -well-known greeting to the console we’ll introduce you to the actor-based -version. -

- -

-Open HelloWorld.scala -

- -

-The HelloWorld actor is the application’s “main” class; when it terminates -the application will shut down—more on that later. The main business logic -happens in the preStart method, where a Greeter actor is created -and instructed to issue that greeting we crave for. When the greeter is done it -will tell us so by sending back a message, and when that message has been -received it will be passed into the behavior described by the receive -method where we can conclude the demonstration by stopping the HelloWorld -actor. -

- -
-
- -

The Greeter

- -

-You will be very curious to see how the Greeter actor performs the -actual task. Open Greeter.scala. -

- -

- -This is extremely simple now: after its creation this actor will not do -anything until someone sends it a message, and if that happens to be an -invitation to greet the world then the Greeter complies and informs the -requester that the deed has been done. -

- -
-
- -

Main class

- -

-Go to the Run tab, and start the application main class -sample.hello.Main. In the log output you can see the "Hello World!" greeting. -

- -

-Main.scala -is actually just a small wrapper around the generic launcher class akka.Main, -which expects only one argument: the class name of the application’s main actor. This main -method will then create the infrastructure needed for running the actors, start the -given main actor and arrange for the whole application to shut down once the -main actor terminates. Thus you will be able to run the application with a -command similar to the following: -

- -

-java -classpath  akka.Main sample.hello.HelloWorld
-
- -

-This conveniently assumes placement of the above class definitions in package -sample.hello and it further assumes that you have the required JAR files for -scala-library, typesafe-config and akka-actor available. -The easiest would be to manage these dependencies with a -build tool. -

- -

-If you need more control of the startup code than what is provided by akka.Main -you can easily write your own main class such as -Main2.scala -

- -

-Try to run the sample.hello.Main2 class -by selecting it in the 'Main class' menu in the Run tab. -

- -
- - - diff --git a/akka-samples/akka-sample-multi-node-scala/COPYING b/akka-samples/akka-sample-multi-node-scala/COPYING deleted file mode 100644 index 0e259d42c9..0000000000 --- a/akka-samples/akka-sample-multi-node-scala/COPYING +++ /dev/null @@ -1,121 +0,0 @@ -Creative Commons Legal Code - -CC0 1.0 Universal - - CREATIVE COMMONS CORPORATION IS NOT A LAW FIRM AND DOES NOT PROVIDE - LEGAL SERVICES. DISTRIBUTION OF THIS DOCUMENT DOES NOT CREATE AN - ATTORNEY-CLIENT RELATIONSHIP. CREATIVE COMMONS PROVIDES THIS - INFORMATION ON AN "AS-IS" BASIS. CREATIVE COMMONS MAKES NO WARRANTIES - REGARDING THE USE OF THIS DOCUMENT OR THE INFORMATION OR WORKS - PROVIDED HEREUNDER, AND DISCLAIMS LIABILITY FOR DAMAGES RESULTING FROM - THE USE OF THIS DOCUMENT OR THE INFORMATION OR WORKS PROVIDED - HEREUNDER. - -Statement of Purpose - -The laws of most jurisdictions throughout the world automatically confer -exclusive Copyright and Related Rights (defined below) upon the creator -and subsequent owner(s) (each and all, an "owner") of an original work of -authorship and/or a database (each, a "Work"). - -Certain owners wish to permanently relinquish those rights to a Work for -the purpose of contributing to a commons of creative, cultural and -scientific works ("Commons") that the public can reliably and without fear -of later claims of infringement build upon, modify, incorporate in other -works, reuse and redistribute as freely as possible in any form whatsoever -and for any purposes, including without limitation commercial purposes. -These owners may contribute to the Commons to promote the ideal of a free -culture and the further production of creative, cultural and scientific -works, or to gain reputation or greater distribution for their Work in -part through the use and efforts of others. - -For these and/or other purposes and motivations, and without any -expectation of additional consideration or compensation, the person -associating CC0 with a Work (the "Affirmer"), to the extent that he or she -is an owner of Copyright and Related Rights in the Work, voluntarily -elects to apply CC0 to the Work and publicly distribute the Work under its -terms, with knowledge of his or her Copyright and Related Rights in the -Work and the meaning and intended legal effect of CC0 on those rights. - -1. Copyright and Related Rights. A Work made available under CC0 may be -protected by copyright and related or neighboring rights ("Copyright and -Related Rights"). Copyright and Related Rights include, but are not -limited to, the following: - - i. the right to reproduce, adapt, distribute, perform, display, - communicate, and translate a Work; - ii. moral rights retained by the original author(s) and/or performer(s); -iii. publicity and privacy rights pertaining to a person's image or - likeness depicted in a Work; - iv. rights protecting against unfair competition in regards to a Work, - subject to the limitations in paragraph 4(a), below; - v. rights protecting the extraction, dissemination, use and reuse of data - in a Work; - vi. database rights (such as those arising under Directive 96/9/EC of the - European Parliament and of the Council of 11 March 1996 on the legal - protection of databases, and under any national implementation - thereof, including any amended or successor version of such - directive); and -vii. other similar, equivalent or corresponding rights throughout the - world based on applicable law or treaty, and any national - implementations thereof. - -2. Waiver. To the greatest extent permitted by, but not in contravention -of, applicable law, Affirmer hereby overtly, fully, permanently, -irrevocably and unconditionally waives, abandons, and surrenders all of -Affirmer's Copyright and Related Rights and associated claims and causes -of action, whether now known or unknown (including existing as well as -future claims and causes of action), in the Work (i) in all territories -worldwide, (ii) for the maximum duration provided by applicable law or -treaty (including future time extensions), (iii) in any current or future -medium and for any number of copies, and (iv) for any purpose whatsoever, -including without limitation commercial, advertising or promotional -purposes (the "Waiver"). Affirmer makes the Waiver for the benefit of each -member of the public at large and to the detriment of Affirmer's heirs and -successors, fully intending that such Waiver shall not be subject to -revocation, rescission, cancellation, termination, or any other legal or -equitable action to disrupt the quiet enjoyment of the Work by the public -as contemplated by Affirmer's express Statement of Purpose. - -3. Public License Fallback. Should any part of the Waiver for any reason -be judged legally invalid or ineffective under applicable law, then the -Waiver shall be preserved to the maximum extent permitted taking into -account Affirmer's express Statement of Purpose. In addition, to the -extent the Waiver is so judged Affirmer hereby grants to each affected -person a royalty-free, non transferable, non sublicensable, non exclusive, -irrevocable and unconditional license to exercise Affirmer's Copyright and -Related Rights in the Work (i) in all territories worldwide, (ii) for the -maximum duration provided by applicable law or treaty (including future -time extensions), (iii) in any current or future medium and for any number -of copies, and (iv) for any purpose whatsoever, including without -limitation commercial, advertising or promotional purposes (the -"License"). The License shall be deemed effective as of the date CC0 was -applied by Affirmer to the Work. Should any part of the License for any -reason be judged legally invalid or ineffective under applicable law, such -partial invalidity or ineffectiveness shall not invalidate the remainder -of the License, and in such case Affirmer hereby affirms that he or she -will not (i) exercise any of his or her remaining Copyright and Related -Rights in the Work or (ii) assert any associated claims and causes of -action with respect to the Work, in either case contrary to Affirmer's -express Statement of Purpose. - -4. Limitations and Disclaimers. - - a. No trademark or patent rights held by Affirmer are waived, abandoned, - surrendered, licensed or otherwise affected by this document. - b. Affirmer offers the Work as-is and makes no representations or - warranties of any kind concerning the Work, express, implied, - statutory or otherwise, including without limitation warranties of - title, merchantability, fitness for a particular purpose, non - infringement, or the absence of latent or other defects, accuracy, or - the present or absence of errors, whether or not discoverable, all to - the greatest extent permissible under applicable law. - c. Affirmer disclaims responsibility for clearing rights of other persons - that may apply to the Work or any use thereof, including without - limitation any person's Copyright and Related Rights in the Work. - Further, Affirmer disclaims responsibility for obtaining any necessary - consents, permissions or other rights required for any use of the - Work. - d. Affirmer understands and acknowledges that Creative Commons is not a - party to this document and has no duty or obligation with respect to - this CC0 or use of the Work. diff --git a/akka-samples/akka-sample-multi-node-scala/LICENSE b/akka-samples/akka-sample-multi-node-scala/LICENSE deleted file mode 100644 index 2a18b45589..0000000000 --- a/akka-samples/akka-sample-multi-node-scala/LICENSE +++ /dev/null @@ -1,10 +0,0 @@ -Activator Template by Lightbend - -Licensed under Public Domain (CC0) - -To the extent possible under law, the person who associated CC0 with -this Activator Tempate has waived all copyright and related or neighboring -rights to this Activator Template. - -You should have received a copy of the CC0 legalcode along with this -work. If not, see . diff --git a/akka-samples/akka-sample-multi-node-scala/activator.properties b/akka-samples/akka-sample-multi-node-scala/activator.properties deleted file mode 100644 index 4813361a7a..0000000000 --- a/akka-samples/akka-sample-multi-node-scala/activator.properties +++ /dev/null @@ -1,7 +0,0 @@ -name=akka-sample-multi-node-scala -title=Akka Multi-Node Testing Sample with Scala -description=Sample containing sbt build settings and test classes for illustrating multi-node testing with Akka and Scala -tags=akka,testing,scala,sample -authorName=Akka Team -authorLink=http://akka.io/ -sourceLink=https://github.com/akka/akka diff --git a/akka-samples/akka-sample-multi-node-scala/build.sbt b/akka-samples/akka-sample-multi-node-scala/build.sbt deleted file mode 100644 index 08fef52eaa..0000000000 --- a/akka-samples/akka-sample-multi-node-scala/build.sbt +++ /dev/null @@ -1,39 +0,0 @@ -import com.typesafe.sbt.SbtMultiJvm -import com.typesafe.sbt.SbtMultiJvm.MultiJvmKeys.MultiJvm - -val akkaVersion = "2.5-SNAPSHOT" - -val project = Project( - id = "akka-sample-multi-node-scala", - base = file(".") - ) - .settings(SbtMultiJvm.multiJvmSettings: _*) - .settings( - name := "akka-sample-multi-node-scala", - version := "2.5-SNAPSHOT", - scalaVersion := "2.11.8", - libraryDependencies ++= Seq( - "com.typesafe.akka" %% "akka-actor" % akkaVersion, - "com.typesafe.akka" %% "akka-remote" % akkaVersion, - "com.typesafe.akka" %% "akka-multi-node-testkit" % akkaVersion, - "org.scalatest" %% "scalatest" % "2.2.1" % "test"), - // make sure that MultiJvm test are compiled by the default test compilation - compile in MultiJvm <<= (compile in MultiJvm) triggeredBy (compile in Test), - // disable parallel tests - parallelExecution in Test := false, - // make sure that MultiJvm tests are executed by the default test target, - // and combine the results from ordinary test and multi-jvm tests - executeTests in Test <<= (executeTests in Test, executeTests in MultiJvm) map { - case (testResults, multiNodeResults) => - val overall = - if (testResults.overall.id < multiNodeResults.overall.id) - multiNodeResults.overall - else - testResults.overall - Tests.Output(overall, - testResults.events ++ multiNodeResults.events, - testResults.summaries ++ multiNodeResults.summaries) - }, - licenses := Seq(("CC0", url("http://creativecommons.org/publicdomain/zero/1.0"))) - ) - .configs (MultiJvm) diff --git a/akka-samples/akka-sample-multi-node-scala/project/build.properties b/akka-samples/akka-sample-multi-node-scala/project/build.properties deleted file mode 100644 index 748703f770..0000000000 --- a/akka-samples/akka-sample-multi-node-scala/project/build.properties +++ /dev/null @@ -1 +0,0 @@ -sbt.version=0.13.7 diff --git a/akka-samples/akka-sample-multi-node-scala/project/plugins.sbt b/akka-samples/akka-sample-multi-node-scala/project/plugins.sbt deleted file mode 100644 index c3e7d797de..0000000000 --- a/akka-samples/akka-sample-multi-node-scala/project/plugins.sbt +++ /dev/null @@ -1,4 +0,0 @@ - -resolvers += Classpaths.typesafeResolver - -addSbtPlugin("com.typesafe.sbt" % "sbt-multi-jvm" % "0.3.8") diff --git a/akka-samples/akka-sample-multi-node-scala/src/test/resources/reference.conf b/akka-samples/akka-sample-multi-node-scala/src/test/resources/reference.conf deleted file mode 100644 index 90492329b7..0000000000 --- a/akka-samples/akka-sample-multi-node-scala/src/test/resources/reference.conf +++ /dev/null @@ -1,4 +0,0 @@ -# Don't terminate ActorSystem in tests -akka.coordinated-shutdown.run-by-jvm-shutdown-hook = off -akka.coordinated-shutdown.terminate-actor-system = off -akka.cluster.run-coordinated-shutdown-when-down = off diff --git a/akka-samples/akka-sample-multi-node-scala/src/test/scala/sample/multinode/STMultiNodeSpec.scala b/akka-samples/akka-sample-multi-node-scala/src/test/scala/sample/multinode/STMultiNodeSpec.scala deleted file mode 100644 index 8f96439262..0000000000 --- a/akka-samples/akka-sample-multi-node-scala/src/test/scala/sample/multinode/STMultiNodeSpec.scala +++ /dev/null @@ -1,22 +0,0 @@ -//#example -package sample.multinode - -//#imports -import org.scalatest.{ BeforeAndAfterAll, WordSpecLike } -import org.scalatest.Matchers -import akka.remote.testkit.MultiNodeSpecCallbacks -//#imports - -//#trait -/** - * Hooks up MultiNodeSpec with ScalaTest - */ -trait STMultiNodeSpec extends MultiNodeSpecCallbacks - with WordSpecLike with Matchers with BeforeAndAfterAll { - - override def beforeAll() = multiNodeSpecBeforeAll() - - override def afterAll() = multiNodeSpecAfterAll() -} -//#trait -//#example diff --git a/akka-samples/akka-sample-multi-node-scala/tutorial/index.html b/akka-samples/akka-sample-multi-node-scala/tutorial/index.html deleted file mode 100644 index 0de31ddf95..0000000000 --- a/akka-samples/akka-sample-multi-node-scala/tutorial/index.html +++ /dev/null @@ -1,89 +0,0 @@ - - -Akka Multi-Node Testing Sample with Scala - - - - -
-

-This sample contains sbt build settings -and test classes for illustrating multi-node testing with Akka. -

- -

-Please refer to the full documentation of -multi-node testing -and the closely related -multi-jvm testing -for details. -There is also an section on -cluster testing. -

- -
- -
-

sbt setup

- -

-Open project/plugins.sbt -

- -

-It adds the sbt-multi-jvm plugin to the build. -

- -

-Open build.sbt -

- -

-It includes the MultiJvm settings that are needed to run multi-jvm tests. -

- -
- -
-

Tests

- -

-Open MultiNodeSample.scala -

- -

-Note that MultiJvm test sources are located in src/multi-jvm/... and the -test classes must end with MultiJvm followed by the node name, typically -Node1, Node2, Node3... -

- -

-To hook up the MultiNodeSpec with with ScalaTest you need something like: -STMultiNodeSpec.scala -

- -

-To see the test in action, open the Test tab -and click Start to run the MultiNodeSample. This corresponds to -sbt test. -

- -

-In case you have many tests in the project it can be convenient to run a single test from -the sbt prompt: -

- -

-> multi-jvm:testOnly sample.multinode.MultiNodeSampleSpec
-
- -

-The same test can be run on multiple machines as described in the -multi-node testing documentation. -

- - -
- - - diff --git a/akka-samples/akka-sample-osgi-dining-hakkers/COPYING b/akka-samples/akka-sample-osgi-dining-hakkers/COPYING deleted file mode 100644 index 0e259d42c9..0000000000 --- a/akka-samples/akka-sample-osgi-dining-hakkers/COPYING +++ /dev/null @@ -1,121 +0,0 @@ -Creative Commons Legal Code - -CC0 1.0 Universal - - CREATIVE COMMONS CORPORATION IS NOT A LAW FIRM AND DOES NOT PROVIDE - LEGAL SERVICES. DISTRIBUTION OF THIS DOCUMENT DOES NOT CREATE AN - ATTORNEY-CLIENT RELATIONSHIP. CREATIVE COMMONS PROVIDES THIS - INFORMATION ON AN "AS-IS" BASIS. CREATIVE COMMONS MAKES NO WARRANTIES - REGARDING THE USE OF THIS DOCUMENT OR THE INFORMATION OR WORKS - PROVIDED HEREUNDER, AND DISCLAIMS LIABILITY FOR DAMAGES RESULTING FROM - THE USE OF THIS DOCUMENT OR THE INFORMATION OR WORKS PROVIDED - HEREUNDER. - -Statement of Purpose - -The laws of most jurisdictions throughout the world automatically confer -exclusive Copyright and Related Rights (defined below) upon the creator -and subsequent owner(s) (each and all, an "owner") of an original work of -authorship and/or a database (each, a "Work"). - -Certain owners wish to permanently relinquish those rights to a Work for -the purpose of contributing to a commons of creative, cultural and -scientific works ("Commons") that the public can reliably and without fear -of later claims of infringement build upon, modify, incorporate in other -works, reuse and redistribute as freely as possible in any form whatsoever -and for any purposes, including without limitation commercial purposes. -These owners may contribute to the Commons to promote the ideal of a free -culture and the further production of creative, cultural and scientific -works, or to gain reputation or greater distribution for their Work in -part through the use and efforts of others. - -For these and/or other purposes and motivations, and without any -expectation of additional consideration or compensation, the person -associating CC0 with a Work (the "Affirmer"), to the extent that he or she -is an owner of Copyright and Related Rights in the Work, voluntarily -elects to apply CC0 to the Work and publicly distribute the Work under its -terms, with knowledge of his or her Copyright and Related Rights in the -Work and the meaning and intended legal effect of CC0 on those rights. - -1. Copyright and Related Rights. A Work made available under CC0 may be -protected by copyright and related or neighboring rights ("Copyright and -Related Rights"). Copyright and Related Rights include, but are not -limited to, the following: - - i. the right to reproduce, adapt, distribute, perform, display, - communicate, and translate a Work; - ii. moral rights retained by the original author(s) and/or performer(s); -iii. publicity and privacy rights pertaining to a person's image or - likeness depicted in a Work; - iv. rights protecting against unfair competition in regards to a Work, - subject to the limitations in paragraph 4(a), below; - v. rights protecting the extraction, dissemination, use and reuse of data - in a Work; - vi. database rights (such as those arising under Directive 96/9/EC of the - European Parliament and of the Council of 11 March 1996 on the legal - protection of databases, and under any national implementation - thereof, including any amended or successor version of such - directive); and -vii. other similar, equivalent or corresponding rights throughout the - world based on applicable law or treaty, and any national - implementations thereof. - -2. Waiver. To the greatest extent permitted by, but not in contravention -of, applicable law, Affirmer hereby overtly, fully, permanently, -irrevocably and unconditionally waives, abandons, and surrenders all of -Affirmer's Copyright and Related Rights and associated claims and causes -of action, whether now known or unknown (including existing as well as -future claims and causes of action), in the Work (i) in all territories -worldwide, (ii) for the maximum duration provided by applicable law or -treaty (including future time extensions), (iii) in any current or future -medium and for any number of copies, and (iv) for any purpose whatsoever, -including without limitation commercial, advertising or promotional -purposes (the "Waiver"). Affirmer makes the Waiver for the benefit of each -member of the public at large and to the detriment of Affirmer's heirs and -successors, fully intending that such Waiver shall not be subject to -revocation, rescission, cancellation, termination, or any other legal or -equitable action to disrupt the quiet enjoyment of the Work by the public -as contemplated by Affirmer's express Statement of Purpose. - -3. Public License Fallback. Should any part of the Waiver for any reason -be judged legally invalid or ineffective under applicable law, then the -Waiver shall be preserved to the maximum extent permitted taking into -account Affirmer's express Statement of Purpose. In addition, to the -extent the Waiver is so judged Affirmer hereby grants to each affected -person a royalty-free, non transferable, non sublicensable, non exclusive, -irrevocable and unconditional license to exercise Affirmer's Copyright and -Related Rights in the Work (i) in all territories worldwide, (ii) for the -maximum duration provided by applicable law or treaty (including future -time extensions), (iii) in any current or future medium and for any number -of copies, and (iv) for any purpose whatsoever, including without -limitation commercial, advertising or promotional purposes (the -"License"). The License shall be deemed effective as of the date CC0 was -applied by Affirmer to the Work. Should any part of the License for any -reason be judged legally invalid or ineffective under applicable law, such -partial invalidity or ineffectiveness shall not invalidate the remainder -of the License, and in such case Affirmer hereby affirms that he or she -will not (i) exercise any of his or her remaining Copyright and Related -Rights in the Work or (ii) assert any associated claims and causes of -action with respect to the Work, in either case contrary to Affirmer's -express Statement of Purpose. - -4. Limitations and Disclaimers. - - a. No trademark or patent rights held by Affirmer are waived, abandoned, - surrendered, licensed or otherwise affected by this document. - b. Affirmer offers the Work as-is and makes no representations or - warranties of any kind concerning the Work, express, implied, - statutory or otherwise, including without limitation warranties of - title, merchantability, fitness for a particular purpose, non - infringement, or the absence of latent or other defects, accuracy, or - the present or absence of errors, whether or not discoverable, all to - the greatest extent permissible under applicable law. - c. Affirmer disclaims responsibility for clearing rights of other persons - that may apply to the Work or any use thereof, including without - limitation any person's Copyright and Related Rights in the Work. - Further, Affirmer disclaims responsibility for obtaining any necessary - consents, permissions or other rights required for any use of the - Work. - d. Affirmer understands and acknowledges that Creative Commons is not a - party to this document and has no duty or obligation with respect to - this CC0 or use of the Work. diff --git a/akka-samples/akka-sample-osgi-dining-hakkers/LICENSE b/akka-samples/akka-sample-osgi-dining-hakkers/LICENSE deleted file mode 100644 index 357d88bd80..0000000000 --- a/akka-samples/akka-sample-osgi-dining-hakkers/LICENSE +++ /dev/null @@ -1,10 +0,0 @@ -Akka Sample by Lightbend - -Licensed under Public Domain (CC0) - -To the extent possible under law, the person who associated CC0 with -this Akka Sample has waived all copyright and related or neighboring -rights to this Akka Sample. - -You should have received a copy of the CC0 legalcode along with this -work. If not, see . diff --git a/akka-samples/akka-sample-osgi-dining-hakkers/README.md b/akka-samples/akka-sample-osgi-dining-hakkers/README.md deleted file mode 100644 index 303b611346..0000000000 --- a/akka-samples/akka-sample-osgi-dining-hakkers/README.md +++ /dev/null @@ -1,49 +0,0 @@ -akka-osgi-sample : Clustered DiningHakkers -================ -This project may be used to test akka bundles in OSGi Frameworks. The build tool (sbt for the moment) provide scripts to run in an OSGi Framework (Karaf only for the moment) a version of the DiningHakkers that runs on several nodes using the akka-cluster module. - -## Bundle overview - -This project provides three Osgi Bundles - - api providing an API for the Service exposed by the core and used by the command - - core implementing the whole logic: clustered connections, Hakkers, ChopSticks. Finally it provides an ActorRef of one created Hakker - - command use a service to get a Hakker (ActorRef) with its position around the table - -An integration testing module is provided to verify OSGi functionality: - - integration-test - -Two modules that provision the project into the Karaf OSGi container for experimentation and integration testing: - - assembly-features defines the karaf "feature" that allows Karaf to provision the bundles - - assembly-dist creates a distribution tar.gz and zip file containing the configured Karaf runtime - -## How to use it - -### Setup with sbt -just run: -```bash -sbt clean -sbt package -sbt osgi-bundle -``` -sbt will creates the bundles in each subproject akka-sample/akka-sample-osgi-dining-hakkers/(api, command, core)/target directories. To have integration tests and OSGi environment loaded, please use the Maven build (at least for the moment) -### Setup with Maven -```bash -mvn clean install -``` - -The assembly-dist/target/ directory will now contain a tar.gz file that contains a pre-configured Karaf runtime. -This can be extracted to any location, and bin/karaf executed. The provided karaf.sh script automates this. - -### Run -Extract the OSGi Framework from the tar.gz described above into any location, or run: -``./karaf.sh`` - -Execute the framework by running ``bin/karaf`` from inside the extracted directory. - -Then try to restart some bundles, to test the stability of the bundles: - -``list`` to get the list of the bundles -``restart #bundle_number`` to restart the bundle using its ID -``exit`` or CTRL-D to exit the Karaf console - -Depending on the akka version you're using, you may need to modify the core bundle when deploying on a second machine, to set its akka.remote.netty.hostname in the application.conf. diff --git a/akka-samples/akka-sample-osgi-dining-hakkers/api/pom.xml b/akka-samples/akka-sample-osgi-dining-hakkers/api/pom.xml deleted file mode 100644 index 73c89d95c5..0000000000 --- a/akka-samples/akka-sample-osgi-dining-hakkers/api/pom.xml +++ /dev/null @@ -1,24 +0,0 @@ - - - - com.typesafe.akka.akka-sample-osgi-dining-hakkers - project - 2.5-SNAPSHOT - - 4.0.0 - - api - Dining Hakker :: Service API - bundle - - - - - com.typesafe.akka - akka-osgi_${scala.dep.version} - - - - diff --git a/akka-samples/akka-sample-osgi-dining-hakkers/api/src/main/scala/akka/sample/osgi/api/DiningHakkersMessages.scala b/akka-samples/akka-sample-osgi-dining-hakkers/api/src/main/scala/akka/sample/osgi/api/DiningHakkersMessages.scala deleted file mode 100644 index a7d008f3a8..0000000000 --- a/akka-samples/akka-sample-osgi-dining-hakkers/api/src/main/scala/akka/sample/osgi/api/DiningHakkersMessages.scala +++ /dev/null @@ -1,34 +0,0 @@ -package akka.sample.osgi.api - -import akka.actor.ActorRef - -/* - * Define our messages, they basically speak for themselves - */ -sealed trait DiningHakkerMessage extends Serializable - -final case class Busy(chopstick: ActorRef) extends DiningHakkerMessage - -final case class Put(hakker: ActorRef) extends DiningHakkerMessage - -final case class Take(hakker: ActorRef) extends DiningHakkerMessage - -final case class Taken(chopstick: ActorRef) extends DiningHakkerMessage - -case object Eat extends DiningHakkerMessage - -case object Think extends DiningHakkerMessage - -case object Identify extends DiningHakkerMessage - -final case class Identification(name: String, busyWith: String) extends DiningHakkerMessage - -case object SubscribeToHakkerStateChanges extends DiningHakkerMessage - -final case class HakkerStateChange(hakkerName: String, from: String, to: String) - -final case class TrackHakker(hakker: ActorRef) extends DiningHakkerMessage - -final case class GetEatingCount(hakkerName: String) extends DiningHakkerMessage - -final case class EatingCount(hakkerName: String, count: Int) extends DiningHakkerMessage diff --git a/akka-samples/akka-sample-osgi-dining-hakkers/api/src/main/scala/akka/sample/osgi/api/DiningHakkersService.scala b/akka-samples/akka-sample-osgi-dining-hakkers/api/src/main/scala/akka/sample/osgi/api/DiningHakkersService.scala deleted file mode 100644 index 9e6e5c070b..0000000000 --- a/akka-samples/akka-sample-osgi-dining-hakkers/api/src/main/scala/akka/sample/osgi/api/DiningHakkersService.scala +++ /dev/null @@ -1,23 +0,0 @@ -/* -Copyright 2013 Crossing-Tech - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - -http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and - limitations under the License. - */ -package akka.sample.osgi.api - -import akka.actor.ActorRef - -trait DiningHakkersService { - def getHakker(name: String, chairNumber: Int): ActorRef - def getTracker(): ActorRef -} diff --git a/akka-samples/akka-sample-osgi-dining-hakkers/assembly-dist/pom.xml b/akka-samples/akka-sample-osgi-dining-hakkers/assembly-dist/pom.xml deleted file mode 100644 index 8e6cf6cbec..0000000000 --- a/akka-samples/akka-sample-osgi-dining-hakkers/assembly-dist/pom.xml +++ /dev/null @@ -1,130 +0,0 @@ - - - 4.0.0 - - - com.typesafe.akka.akka-sample-osgi-dining-hakkers - project - 2.5-SNAPSHOT - - - akka-sample-osgi-dining-hakkers-dist - Dining Hakkers :: Distribution - - - - ${project.groupId} - akka-sample-osgi-dining-hakkers - pom - - - ${project.groupId} - api - - - ${project.groupId} - core - - - ${project.groupId} - command - - - - - - - ${basedir}/src/main/resources - true - - **/* - - - - - - org.apache.karaf.tooling - features-maven-plugin - ${karaf.tooling.maven.version} - - - add-features-to-repo - prepare-package - - add-features-to-repo - - - - mvn:org.apache.karaf.assemblies.features/standard/${karaf.tooling.maven.version}/xml/features - mvn:org.apache.karaf.assemblies.features/enterprise/${karaf.tooling.maven.version}/xml/features - mvn:com.typesafe.akka.akka-sample-osgi-dining-hakkers/akka-sample-osgi-dining-hakkers/${project.version}/xml/features - - target/generated-features-repo - - - - - - - org.apache.maven.plugins - maven-dependency-plugin - 2.6 - - - copy-cli-dependencies - prepare-package - - copy-dependencies - - - true - target/generated-features-repo - - - - - unpack - prepare-package - - unpack - - - - - org.apache.karaf - apache-karaf - tar.gz - target/dependencies - - - - - - - - org.apache.maven.plugins - maven-assembly-plugin - 2.4 - - - bin - package - - single - - - - src/main/descriptors/bin.xml - - false - gnu - - - - - - - - diff --git a/akka-samples/akka-sample-osgi-dining-hakkers/assembly-dist/src/main/descriptors/bin.xml b/akka-samples/akka-sample-osgi-dining-hakkers/assembly-dist/src/main/descriptors/bin.xml deleted file mode 100644 index d3f72a43de..0000000000 --- a/akka-samples/akka-sample-osgi-dining-hakkers/assembly-dist/src/main/descriptors/bin.xml +++ /dev/null @@ -1,85 +0,0 @@ - - - bin - - - tar.gz - - - true - akka-sample-osgi-dining-hakkers-${project.version} - - - - - ${project.build.directory}/dependencies - - *.jar - - /lib/ - - - - ${project.basedir}/src/main/distribution - / - 0755 - 0644 - true - - lib/readme.txt - - - - - ${project.basedir}/src/main/bin - /bin - 0755 - 0755 - - - - ${project.build.directory}/classes/etc - /etc/ - unix - 0755 - 0644 - - - - ${project.build.directory}/generated-features-repo - /system - 0755 - 0644 - - - - - ${project.build.directory}/dependencies/apache-karaf-${karaf.version} - / - - **/demos/** - bin/** - etc/custom.properties - etc/org.apache.karaf.features.cfg - README - RELEASE-NOTES - karaf-manual*.html - karaf-manual*.pdf - - - - - - ${project.build.directory}/dependencies/apache-karaf-${karaf.version} - / - - bin/** - - unix - 0755 - - - - diff --git a/akka-samples/akka-sample-osgi-dining-hakkers/assembly-dist/src/main/distribution/README b/akka-samples/akka-sample-osgi-dining-hakkers/assembly-dist/src/main/distribution/README deleted file mode 100644 index 750b5a9c9e..0000000000 --- a/akka-samples/akka-sample-osgi-dining-hakkers/assembly-dist/src/main/distribution/README +++ /dev/null @@ -1,47 +0,0 @@ -============================================================================== - - ZZ: - ZZZZ - ZZZZZZ - ZZZ' ZZZ - ~7 7ZZ' ZZZ - :ZZZ: IZZ' ZZZ - ,OZZZZ.~ZZ? ZZZ - ZZZZ' 'ZZZ$ ZZZ - . $ZZZ ~ZZ$ ZZZ - .=Z?. .ZZZO ~ZZ7 OZZ - .ZZZZ7..:ZZZ~ 7ZZZ ZZZ~ - .$ZZZ$Z+.ZZZZ ZZZ: ZZZ$ - .,ZZZZ?' =ZZO= .OZZ 'ZZZ - .$ZZZZ+ .ZZZZ IZZZ ZZZ$ - .ZZZZZ' .ZZZZ' .ZZZ$ ?ZZZ - .ZZZZZZ' .OZZZ? ?ZZZ 'ZZZ$ - .?ZZZZZZ' .ZZZZ? .ZZZ? 'ZZZO - .+ZZZZZZ?' .7ZZZZ' .ZZZZ :ZZZZ - .ZZZZZZ$' .?ZZZZZ' .~ZZZZ 'ZZZZ. - - - NNNNN $NNNN+ - NNNNN $NNNN+ - NNNNN $NNNN+ - NNNNN $NNNN+ - NNNNN $NNNN+ - =NNNNNNNNND$ NNNNN DDDDDD: $NNNN+ DDDDDN NDDNNNNNNNN, - NNNNNNNNNNNNND NNNNN DNNNNN $NNNN+ 8NNNNN= :NNNNNNNNNNNNNN - NNNNN$ DNNNNN NNNNN $NNNNN~ $NNNN+ NNNNNN NNNNN, :NNNNN+ - ?DN~ NNNNN NNNNN MNNNNN $NNNN+:NNNNN7 $ND =NNNNN - DNNNNN NNNNNDNNNN$ $NNNNDNNNNN :DNNNNN - ZNDNNNNNNNNND NNNNNNNNNND, $NNNNNNNNNNN DNDNNNNNNNNNN - NNNNNNNDDINNNNN NNNNNNNNNNND $NNNNNNNNNNND ONNNNNNND8+NNNNN - :NNNND NNNNN NNNNNN DNNNN, $NNNNNO 7NNNND NNNNNO :NNNNN - DNNNN NNNNN NNNNN DNNNN $NNNN+ 8NNNNN NNNNN $NNNNN - DNNNNO NNNNNN NNNNN NNNNN $NNNN+ NNNNN$ NNNND, ,NNNNND - NNNNNNDDNNNNNNNN NNNNN =NNNNN $NNNN+ DNNNN? DNNNNNNDNNNNNNNND - NNNNNNNNN NNNN$ NNNNN 8NNNND $NNNN+ NNNNN= ,DNNNNNNND NNNNN$ - -============================================================================== - -Welcome to the Akka ${project.version} OSGi sample. - -The sample runs inside a Karaf environment, but the concepts are easily -applicable to any OSGi environment. diff --git a/akka-samples/akka-sample-osgi-dining-hakkers/assembly-dist/src/main/distribution/VERSION b/akka-samples/akka-sample-osgi-dining-hakkers/assembly-dist/src/main/distribution/VERSION deleted file mode 100644 index df6498b0f5..0000000000 --- a/akka-samples/akka-sample-osgi-dining-hakkers/assembly-dist/src/main/distribution/VERSION +++ /dev/null @@ -1 +0,0 @@ -Akka ${project.version} diff --git a/akka-samples/akka-sample-osgi-dining-hakkers/assembly-dist/src/main/distribution/etc/custom.properties b/akka-samples/akka-sample-osgi-dining-hakkers/assembly-dist/src/main/distribution/etc/custom.properties deleted file mode 100644 index 83e10d70cf..0000000000 --- a/akka-samples/akka-sample-osgi-dining-hakkers/assembly-dist/src/main/distribution/etc/custom.properties +++ /dev/null @@ -1,31 +0,0 @@ -################################################################################ -# -# Licensed to the Apache Software Foundation (ASF) under one or more -# contributor license agreements. See the NOTICE file distributed with -# this work for additional information regarding copyright ownership. -# The ASF licenses this file to You under the Apache License, Version 2.0 -# (the "License"); you may not use this file except in compliance with -# the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -################################################################################ - -# -# All the values specified here will override the default values given -# in config.properties. -# - -karaf.systemBundlesStartLevel=50 - -# Use Equinox -karaf.framework=equinox - -# Poll etc every 5s (default = 1s) -felix.fileinstall.poll=5000 diff --git a/akka-samples/akka-sample-osgi-dining-hakkers/assembly-dist/src/main/distribution/etc/org.apache.karaf.features.cfg b/akka-samples/akka-sample-osgi-dining-hakkers/assembly-dist/src/main/distribution/etc/org.apache.karaf.features.cfg deleted file mode 100644 index 63da8af40c..0000000000 --- a/akka-samples/akka-sample-osgi-dining-hakkers/assembly-dist/src/main/distribution/etc/org.apache.karaf.features.cfg +++ /dev/null @@ -1,8 +0,0 @@ -# -# Comma separated list of features repositories to register by default -# Default list + Dining Hakkers feature -# -featuresRepositories=mvn:org.apache.karaf.assemblies.features/standard/${karaf.version}/xml/features,mvn:org.apache.karaf.assemblies.features/enterprise/${karaf.version}/xml/features,mvn:com.typesafe.akka.akka-sample-osgi-dining-hakkers/akka-sample-osgi-dining-hakkers/${project.version}/xml/features - -# Comma separated list of features to install at startup. Features definitions are looked up from repositories above. -featuresBoot=config,ssh,management,dining-hakker diff --git a/akka-samples/akka-sample-osgi-dining-hakkers/assembly-dist/src/main/distribution/etc/org.ops4j.pax.url.mvn.cfg.onlylocal b/akka-samples/akka-sample-osgi-dining-hakkers/assembly-dist/src/main/distribution/etc/org.ops4j.pax.url.mvn.cfg.onlylocal deleted file mode 100644 index 4ccb568d5f..0000000000 --- a/akka-samples/akka-sample-osgi-dining-hakkers/assembly-dist/src/main/distribution/etc/org.ops4j.pax.url.mvn.cfg.onlylocal +++ /dev/null @@ -1,92 +0,0 @@ -################################################################################ -# -# Licensed to the Apache Software Foundation (ASF) under one or more -# contributor license agreements. See the NOTICE file distributed with -# this work for additional information regarding copyright ownership. -# The ASF licenses this file to You under the Apache License, Version 2.0 -# (the "License"); you may not use this file except in compliance with -# the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -################################################################################ - -# -# If set to true, the following property will not allow any certificate to be used -# when accessing Maven repositories through SSL -# -#org.ops4j.pax.url.mvn.certificateCheck= - -# -# Path to the local Maven settings file. -# The repositories defined in this file will be automatically added to the list -# of default repositories if the 'org.ops4j.pax.url.mvn.repositories' property -# below is not set. -# The following locations are checked for the existence of the settings.xml file -# * 1. looks for the specified url -# * 2. if not found looks for ${user.home}/.m2/settings.xml -# * 3. if not found looks for ${maven.home}/conf/settings.xml -# * 4. if not found looks for ${M2_HOME}/conf/settings.xml -# -#org.ops4j.pax.url.mvn.settings= - -# -# Path to the local Maven repository which is used to avoid downloading -# artifacts when they already exist locally. -# The value of this property will be extracted from the settings.xml file -# above, or defaulted to: -# System.getProperty( "user.home" ) + "/.m2/repository" -# -org.ops4j.pax.url.mvn.localRepository=file:${karaf.home}/${karaf.default.repository} - -# -# Default this to false. It's just weird to use undocumented repos -# -org.ops4j.pax.url.mvn.useFallbackRepositories=false - -# -# Uncomment if you don't wanna use the proxy settings -# from the Maven conf/settings.xml file -# -# org.ops4j.pax.url.mvn.proxySupport=false - -# -# Disable aether support by default. This ensure that the defaultRepositories -# below will be used -# -org.ops4j.pax.url.mvn.disableAether=true - -# -# Comma separated list of repositories scanned when resolving an artifact. -# Those repositories will be checked before iterating through the -# below list of repositories and even before the local repository -# A repository url can be appended with zero or more of the following flags: -# @snapshots : the repository contains snaphots -# @noreleases : the repository does not contain any released artifacts -# -# The following property value will add the system folder as a repo. -# -org.ops4j.pax.url.mvn.defaultRepositories=file:${karaf.home}/${karaf.default.repository}@snapshots,\ - file:${karaf.home}/local-repo@snapshots - -# -# Comma separated list of repositories scanned when resolving an artifact. -# The default list includes the following repositories: -# https://repo1.maven.org/maven2 -# https://repository.apache.org/content/groups/snapshots-group -# https://svn.apache.org/repos/asf/servicemix/m2-repo -# http://repository.springsource.com/maven/bundles/release -# http://repository.springsource.com/maven/bundles/external -# To add repositories to the default ones, prepend '+' to the list of repositories -# to add. -# A repository url can be appended with zero or more of the following flags: -# @snapshots : the repository contains snaphots -# @noreleases : the repository does not contain any released artifacts -# -org.ops4j.pax.url.mvn.repositories=file:${karaf.home}/${karaf.default.repository} diff --git a/akka-samples/akka-sample-osgi-dining-hakkers/assembly-dist/src/main/distribution/etc/readme.txt b/akka-samples/akka-sample-osgi-dining-hakkers/assembly-dist/src/main/distribution/etc/readme.txt deleted file mode 100644 index ba59520b8e..0000000000 --- a/akka-samples/akka-sample-osgi-dining-hakkers/assembly-dist/src/main/distribution/etc/readme.txt +++ /dev/null @@ -1,7 +0,0 @@ -Place files like custom.properties and jre.properties here. Files -placed here will be copied to the Karaf etc/ directory. - -Exclude the Karaf default version of the file in bin.xml. - -Additional configuration files for etc should be placed into -src/main/resources/etc. diff --git a/akka-samples/akka-sample-osgi-dining-hakkers/assembly-dist/src/main/distribution/lib/readme.txt b/akka-samples/akka-sample-osgi-dining-hakkers/assembly-dist/src/main/distribution/lib/readme.txt deleted file mode 100644 index e900b3cdc0..0000000000 --- a/akka-samples/akka-sample-osgi-dining-hakkers/assembly-dist/src/main/distribution/lib/readme.txt +++ /dev/null @@ -1,2 +0,0 @@ -Jars targeted to the ${app.home}/lib directory should be placed here. -See lib/README in the distribution package for more details. diff --git a/akka-samples/akka-sample-osgi-dining-hakkers/assembly-dist/src/main/resources/etc/readme.txt b/akka-samples/akka-sample-osgi-dining-hakkers/assembly-dist/src/main/resources/etc/readme.txt deleted file mode 100644 index f0e22d6db4..0000000000 --- a/akka-samples/akka-sample-osgi-dining-hakkers/assembly-dist/src/main/resources/etc/readme.txt +++ /dev/null @@ -1,3 +0,0 @@ -Configuration files for the Karaf etc/ directory. - -Place Karaf ConfigAdmin files here. diff --git a/akka-samples/akka-sample-osgi-dining-hakkers/assembly-features/pom.xml b/akka-samples/akka-sample-osgi-dining-hakkers/assembly-features/pom.xml deleted file mode 100644 index 5dc99beac5..0000000000 --- a/akka-samples/akka-sample-osgi-dining-hakkers/assembly-features/pom.xml +++ /dev/null @@ -1,74 +0,0 @@ - - - 4.0.0 - - - com.typesafe.akka.akka-sample-osgi-dining-hakkers - project - 2.5-SNAPSHOT - - - akka-sample-osgi-dining-hakkers - Dining Hakkers :: Features - pom - - - - - src/main/resources - true - - **/* - - - - - - org.apache.maven.plugins - maven-resources-plugin - 2.6 - - false - - - ${*} - - - - - filter - generate-resources - - resources - - - - - - org.codehaus.mojo - build-helper-maven-plugin - 1.7 - - - attach-artifact - package - - attach-artifact - - - - - target/classes/features.xml - xml - features - - - - - - - - - diff --git a/akka-samples/akka-sample-osgi-dining-hakkers/assembly-features/src/main/resources/features.xml b/akka-samples/akka-sample-osgi-dining-hakkers/assembly-features/src/main/resources/features.xml deleted file mode 100644 index 42444717f8..0000000000 --- a/akka-samples/akka-sample-osgi-dining-hakkers/assembly-features/src/main/resources/features.xml +++ /dev/null @@ -1,53 +0,0 @@ - - - - - mvn:org.osgi/org.osgi.compendium/${osgi.version} - - - - mvn:org.scala-lang/scala-library/${scala.version} - mvn:org.scala-lang/scala-reflect/${scala.version} - mvn:org.scala-lang.modules/scala-java8-compat_2.11/0.7.0 - - - - - mvn:com.typesafe.akka.akka-sample-osgi-dining-hakkers/uncommons/1.2.2 - - - - mvn:io.netty/netty/${netty.version} - - - - wrap:mvn:org.iq80.leveldb/leveldb/${leveldb.version} - wrap:mvn:org.fusesource.leveldbjni/leveldbjni-all/${leveldbjni.version} - - - - mvn:com.typesafe/config/${typesafe.config.version} - - - - scala - netty - uncommons-maths - typesafe-config - leveldb - mvn:com.typesafe.akka/akka-protobuf_${scala.dep.version}/${akka.version} - mvn:com.typesafe.akka/akka-actor_${scala.dep.version}/${akka.version} - mvn:com.typesafe.akka/akka-osgi_${scala.dep.version}/${akka.version} - mvn:com.typesafe.akka/akka-remote_${scala.dep.version}/${akka.version} - mvn:com.typesafe.akka/akka-cluster_${scala.dep.version}/${akka.version} - mvn:com.typesafe.akka/akka-persistence_${scala.dep.version}/${akka.version} - mvn:com.typesafe.akka/akka-slf4j_${scala.dep.version}/${akka.version} - - - - akka - mvn:com.typesafe.akka.akka-sample-osgi-dining-hakkers/api/${project.version} - mvn:com.typesafe.akka.akka-sample-osgi-dining-hakkers/core/${project.version} - mvn:com.typesafe.akka.akka-sample-osgi-dining-hakkers/command/${project.version} - - diff --git a/akka-samples/akka-sample-osgi-dining-hakkers/command/pom.xml b/akka-samples/akka-sample-osgi-dining-hakkers/command/pom.xml deleted file mode 100644 index 5ab4186172..0000000000 --- a/akka-samples/akka-sample-osgi-dining-hakkers/command/pom.xml +++ /dev/null @@ -1,48 +0,0 @@ - - - - com.typesafe.akka.akka-sample-osgi-dining-hakkers - project - 2.5-SNAPSHOT - - 4.0.0 - - command - Dining Hakker :: Service consumer - bundle - - - - org.osgi - org.osgi.core - - - org.osgi - org.osgi.compendium - - - ${project.groupId} - api - - - com.typesafe.akka - akka-osgi_${scala.dep.version} - - - - - - org.apache.felix - maven-bundle-plugin - - - akka.sample.osgi.command.Activator - * - - - - - - diff --git a/akka-samples/akka-sample-osgi-dining-hakkers/command/src/main/scala/akka/sample/osgi/command/Activator.scala b/akka-samples/akka-sample-osgi-dining-hakkers/command/src/main/scala/akka/sample/osgi/command/Activator.scala deleted file mode 100644 index 253ed0b5b3..0000000000 --- a/akka-samples/akka-sample-osgi-dining-hakkers/command/src/main/scala/akka/sample/osgi/command/Activator.scala +++ /dev/null @@ -1,41 +0,0 @@ -/* -Copyright 2013 Crossing-Tech - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - -http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and - limitations under the License. - */ -package akka.sample.osgi.command - -import org.osgi.framework.{ ServiceEvent, BundleContext, BundleActivator } -import akka.sample.osgi.api.DiningHakkersService -import akka.actor.{ ActorRef, PoisonPill } -import org.osgi.util.tracker.ServiceTracker - -class Activator extends BundleActivator { - println("Command Activator created") - var hakker: Option[ActorRef] = None - - def start(context: BundleContext) { - val logServiceTracker = new ServiceTracker(context, classOf[DiningHakkersService].getName, null) - logServiceTracker.open() - val service = Option(logServiceTracker.getService.asInstanceOf[DiningHakkersService]) - service.foreach(startHakker(_, context.getBundle.getSymbolicName + ":" + context.getBundle.getBundleId)) - } - - def startHakker(service: DiningHakkersService, name: String) { - hakker = Some(service.getHakker(name, 4)) - } - - def stop(context: BundleContext) { - hakker.foreach(_ ! PoisonPill) - } -} diff --git a/akka-samples/akka-sample-osgi-dining-hakkers/core/pom.xml b/akka-samples/akka-sample-osgi-dining-hakkers/core/pom.xml deleted file mode 100644 index 7b0d3232a4..0000000000 --- a/akka-samples/akka-sample-osgi-dining-hakkers/core/pom.xml +++ /dev/null @@ -1,86 +0,0 @@ - - - - com.typesafe.akka.akka-sample-osgi-dining-hakkers - project - 2.5-SNAPSHOT - - 4.0.0 - - core - Dining Hakker :: Core - bundle - - - - org.osgi - org.osgi.core - - - org.osgi - org.osgi.compendium - - - com.typesafe.akka - akka-osgi_${scala.dep.version} - - - com.typesafe.akka - akka-remote_${scala.dep.version} - - - com.typesafe.akka - akka-slf4j_${scala.dep.version} - - - com.typesafe.akka - akka-persistence_${scala.dep.version} - - - com.typesafe - config - - - ${project.groupId} - api - - - - - io.netty - netty - - - com.google.protobuf - protobuf-java - - - com.typesafe.akka - akka-cluster_${scala.dep.version} - - - org.iq80.leveldb - leveldb - - - org.fusesource.leveldbjni - leveldbjni-all - - - - - - org.apache.felix - maven-bundle-plugin - - - akka.sample.osgi.internal, akka.sample.osgi.service - akka.sample.osgi.activation.Activator - - - - - - diff --git a/akka-samples/akka-sample-osgi-dining-hakkers/core/src/main/resources/application.conf b/akka-samples/akka-sample-osgi-dining-hakkers/core/src/main/resources/application.conf deleted file mode 100644 index 368dcb8e60..0000000000 --- a/akka-samples/akka-sample-osgi-dining-hakkers/core/src/main/resources/application.conf +++ /dev/null @@ -1,32 +0,0 @@ -akka { - - loggers = ["akka.event.slf4j.Slf4jLogger", "akka.event.Logging$DefaultLogger"] - logging-filter = "akka.event.slf4j.Slf4jLoggingFilter" - - actor { - provider = "cluster" - - serialize-messages = on - - serializers { - dining = "akka.sample.osgi.serialization.DiningHakkerSerializer" - } - - serialization-bindings { - "akka.sample.osgi.api.DiningHakkerMessage" = dining - } - } - - remote { - netty.tcp { - hostname = "localhost" - port = 4242 - } - } - - cluster { - seed-nodes = ["akka.tcp://akka-osgi-sample@localhost:4242"] - auto-down-unreachable-after = 20 s - } - -} diff --git a/akka-samples/akka-sample-osgi-dining-hakkers/core/src/main/scala/akka/sample/osgi/activation/Activator.scala b/akka-samples/akka-sample-osgi-dining-hakkers/core/src/main/scala/akka/sample/osgi/activation/Activator.scala deleted file mode 100644 index f0b1b7a347..0000000000 --- a/akka-samples/akka-sample-osgi-dining-hakkers/core/src/main/scala/akka/sample/osgi/activation/Activator.scala +++ /dev/null @@ -1,73 +0,0 @@ -/* -Copyright 2013 Crossing-Tech - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - -http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and - limitations under the License. - */ -package akka.sample.osgi.activation - -import akka.osgi.ActorSystemActivator -import akka.actor.{ Props, ActorSystem } -import akka.sample.osgi.internal.Table -import akka.sample.osgi.service.DiningHakkersServiceImpl -import akka.sample.osgi.api.DiningHakkersService -import akka.event.{ LogSource, Logging } -import org.osgi.framework.{ ServiceRegistration, BundleContext } - -class Activator extends ActorSystemActivator { - - import Activator._ - - var diningHakkerService: Option[ServiceRegistration[_]] = None - - def configure(context: BundleContext, system: ActorSystem) { - val log = Logging(system, this) - log.info("Core bundle configured") - system.actorOf(Props[Table], "table") - registerService(context, system) - registerHakkersService(context, system) - log.info("Hakker service registered") - } - - /** - * registers the DinningHakkerService as a Service to be tracked and find by other OSGi bundles. - * in other words, this instance may be used in other bundles which listen or track the OSGi Service - * @param context OSGi BundleContext - * @param system ActorSystem - */ - def registerHakkersService(context: BundleContext, system: ActorSystem) { - - val hakkersService = new DiningHakkersServiceImpl(system) - - diningHakkerService = Some(context.registerService(classOf[DiningHakkersService].getName(), hakkersService, null)) - - } - - override def stop(context: BundleContext) { - unregisterServices(context) - println("Hakker service unregistred") - super.stop(context) - } - - def unregisterServices(context: BundleContext) { - diningHakkerService foreach (_.unregister()) - } - - override def getActorSystemName(context: BundleContext): String = "akka-osgi-sample" -} - -object Activator { - implicit val logSource: LogSource[AnyRef] = new LogSource[AnyRef] { - def genString(o: AnyRef): String = o.getClass.getName - override def getClazz(o: AnyRef): Class[_] = o.getClass - } -} diff --git a/akka-samples/akka-sample-osgi-dining-hakkers/core/src/main/scala/akka/sample/osgi/internal/Hakker.scala b/akka-samples/akka-sample-osgi-dining-hakkers/core/src/main/scala/akka/sample/osgi/internal/Hakker.scala deleted file mode 100644 index 96a754b8bb..0000000000 --- a/akka-samples/akka-sample-osgi-dining-hakkers/core/src/main/scala/akka/sample/osgi/internal/Hakker.scala +++ /dev/null @@ -1,196 +0,0 @@ -package akka.sample.osgi.internal - -import language.postfixOps -import scala.concurrent.duration._ -import akka.actor.Terminated -import akka.cluster.Cluster -import akka.cluster.ClusterEvent.{ CurrentClusterState, LeaderChanged } -import akka.event.Logging -import akka.sample.osgi.api._ -import akka.actor.{ RootActorPath, Address, ActorRef, Actor } -import akka.sample.osgi.api.SubscribeToHakkerStateChanges -import akka.sample.osgi.api.HakkerStateChange - -//Akka adaptation of -//http://www.dalnefre.com/wp/2010/08/dining-philosophers-in-humus/ - -/* -* A Chopstick is an actor, it can be taken, and put back -*/ -class Chopstick extends Actor { - - val log = Logging(context.system, this) - - import context._ - - //When a Chopstick is taken by a hakker - //It will refuse to be taken by other hakkers - //But the owning hakker can put it back - def takenBy(hakker: ActorRef): Receive = { - case Take(otherHakker) => - otherHakker ! Busy(self) - case Put(`hakker`) => - become(available) - } - - //When a Chopstick is available, it can be taken by a hakker - def available: Receive = { - case Take(hakker) => - log.info(self.path + " is taken by " + hakker) - become(takenBy(hakker)) - hakker ! Taken(self) - } - - //A Chopstick begins its existence as available - def receive = available -} - -/* -* A hakker is an awesome dude or dudette who either thinks about hacking or has to eat ;-) -*/ -class Hakker(name: String, chair: Int) extends Actor { - - val log = Logging(context.system, this) - - log.info("Created Hakker at" + self.path) - - import context._ - - val cluster = Cluster(context.system) - - override def preStart() { - log.info(s"Hakker ($name) takes position($chair)") - cluster.subscribe(self, classOf[LeaderChanged]) - } - - override def postStop() { - log.info(s"Hakker ($name) leaves position($chair)") - cluster.unsubscribe(self) - } - - var subscribers = Set.empty[ActorRef] - - //When a hakker is thinking it can become hungry - //and try to pick up its chopsticks and eat - def thinking(left: ActorRef, right: ActorRef): Receive = { - case Eat => - pubStateChange("thinking", "hungry") - become(hungry(left, right) orElse (managementEvents)) - left ! Take(self) - right ! Take(self) - case Identify => identify("Thinking") - } - - //When a hakker is hungry it tries to pick up its chopsticks and eat - //When it picks one up, it goes into wait for the other - //If the hakkers first attempt at grabbing a chopstick fails, - //it starts to wait for the response of the other grab - def hungry(left: ActorRef, right: ActorRef): Receive = { - case Taken(`left`) => - pubStateChange("hungry", "waiting") - become(waiting_for(left, right, false) orElse (managementEvents)) - case Taken(`right`) => - pubStateChange("hungry", "waiting") - become(waiting_for(left, right, true) orElse (managementEvents)) - case Busy(chopstick) => - pubStateChange("hungry", "denied_a_chopstick") - become(denied_a_chopstick(left, right) orElse (managementEvents)) - case Identify => identify("Hungry") - } - - //When a hakker is waiting for the last chopstick it can either obtain it - //and start eating, or the other chopstick was busy, and the hakker goes - //back to think about how he should obtain his chopsticks :-) - def waiting_for(left: ActorRef, right: ActorRef, waitingForLeft: Boolean): Receive = { - case Taken(`left`) if waitingForLeft => - log.info("%s has picked up %s and %s and starts to eat".format(name, left.path.name, right.path.name)) - pubStateChange("waiting", "eating") - become(eating(left, right) orElse (managementEvents)) - system.scheduler.scheduleOnce(5 seconds, self, Think) - case Taken(`right`) if !waitingForLeft => - log.info("%s has picked up %s and %s and starts to eat".format(name, left.path.name, right.path.name)) - pubStateChange("waiting", "eating") - become(eating(left, right) orElse (managementEvents)) - system.scheduler.scheduleOnce(5 seconds, self, Think) - case Busy(chopstick) => - pubStateChange("waiting", "thinking") - become(thinking(left, right) orElse (managementEvents)) - if (waitingForLeft) { - right ! Put(self) - } else { - left ! Put(self) - } - self ! Eat - case Identify => identify("Waiting for Chopstick") - } - - //When the results of the other grab comes back, - //he needs to put it back if he got the other one. - //Then go back and think and try to grab the chopsticks again - def denied_a_chopstick(left: ActorRef, right: ActorRef): Receive = { - case Taken(chopstick) => - pubStateChange("denied_a_chopstick", "thinking") - become(thinking(left, right) orElse (managementEvents)) - chopstick ! Put(self) - self ! Eat - case Busy(chopstick) => - pubStateChange("denied_a_chopstick", "thinking") - become(thinking(left, right) orElse (managementEvents)) - self ! Eat - case Identify => identify("Denied a Chopstick") - } - - //When a hakker is eating, he can decide to start to think, - //then he puts down his chopsticks and starts to think - def eating(left: ActorRef, right: ActorRef): Receive = { - case Think => - pubStateChange("eating", "thinking") - become(thinking(left, right) orElse (managementEvents)) - left ! Put(self) - right ! Put(self) - log.info("%s puts down his chopsticks and starts to think".format(name)) - system.scheduler.scheduleOnce(5 seconds, self, Eat) - case Identify => identify("Eating") - } - - def waitForChopsticks: Receive = { - case (left: ActorRef, right: ActorRef) => - pubStateChange("waiting", "thinking") - become(thinking(left, right) orElse managementEvents) - system.scheduler.scheduleOnce(5 seconds, self, Eat) - case Identify => identify("Waiting") - } - - def managementEvents: Receive = { - case state: CurrentClusterState => state.leader foreach updateTable - case LeaderChanged(Some(leaderAddress)) => updateTable(leaderAddress) - case SubscribeToHakkerStateChanges => - subscribers += sender() - context watch sender() - case Terminated(subscriber) => - subscribers -= subscriber - } - - def initializing: Receive = { - case Identify => identify("Initializing") - } - - def identify(busyWith: String): Unit = { - sender() ! Identification(name, busyWith) - } - - def updateTable(leaderAdress: Address): Unit = { - pubStateChange("-", "waiting") - become(waitForChopsticks orElse managementEvents) - context.actorSelection(RootActorPath(leaderAdress) / "user" / "table") ! chair - } - - //All hakkers start in a non-eating state - def receive = initializing orElse managementEvents - - def pubStateChange(from: String, to: String): Unit = { - val chg = HakkerStateChange(name, from, to) - subscribers foreach { _ ! chg } - } - -} diff --git a/akka-samples/akka-sample-osgi-dining-hakkers/core/src/main/scala/akka/sample/osgi/internal/HakkerTracker.scala b/akka-samples/akka-sample-osgi-dining-hakkers/core/src/main/scala/akka/sample/osgi/internal/HakkerTracker.scala deleted file mode 100644 index 60926051de..0000000000 --- a/akka-samples/akka-sample-osgi-dining-hakkers/core/src/main/scala/akka/sample/osgi/internal/HakkerTracker.scala +++ /dev/null @@ -1,60 +0,0 @@ -package akka.sample.osgi.internal - -import akka.persistence.PersistentActor -import akka.actor.ActorRef -import akka.sample.osgi.api.HakkerStateChange -import akka.sample.osgi.api.SubscribeToHakkerStateChanges -import akka.sample.osgi.api.EatingCount -import akka.sample.osgi.api.GetEatingCount -import akka.sample.osgi.api.TrackHakker - -object HakkerTracker { - sealed trait DomainEvent - final case class StartedEating(name: String) extends DomainEvent - final case class StoppedEating(name: String) extends DomainEvent - - object State { - val empty: State = new State(Map.empty) - } - final case class State private (eatingCounts: Map[String, Int]) { - def updated(event: DomainEvent): State = event match { - case StartedEating(name) => - val c = eatingCounts.getOrElse(name, 0) + 1 - copy(eatingCounts = eatingCounts + (name -> c)) - case StoppedEating(name) => - this - } - } -} - -class HakkerTracker extends PersistentActor { - import HakkerTracker._ - - var state = State.empty - - override def persistenceId: String = "hakkerTracker" - - override def receiveRecover: Receive = { - case evt: DomainEvent => - state = state.updated(evt) - } - - override def receiveCommand: Receive = { - case TrackHakker(hakker) => - hakker ! SubscribeToHakkerStateChanges - - case HakkerStateChange(name, _, "eating") => - persist(StartedEating(name)) { evt => - state = state.updated(evt) - } - - case HakkerStateChange(name, "eating", _) => - persist(StoppedEating(name)) { evt => - state = state.updated(evt) - } - - case GetEatingCount(name) => - sender() ! EatingCount(name, 17) - } - -} \ No newline at end of file diff --git a/akka-samples/akka-sample-osgi-dining-hakkers/core/src/main/scala/akka/sample/osgi/internal/Table.scala b/akka-samples/akka-sample-osgi-dining-hakkers/core/src/main/scala/akka/sample/osgi/internal/Table.scala deleted file mode 100644 index 850e3beda4..0000000000 --- a/akka-samples/akka-sample-osgi-dining-hakkers/core/src/main/scala/akka/sample/osgi/internal/Table.scala +++ /dev/null @@ -1,26 +0,0 @@ -/* -Copyright 2013 Crossing-Tech - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - -http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and - limitations under the License. - */ -package akka.sample.osgi.internal - -import akka.actor.{ Props, Actor } - -class Table extends Actor { - val chopsticks = for (i <- 1 to 5) yield context.actorOf(Props[Chopstick], "Chopstick" + i) - - def receive = { - case x: Int => sender() ! ((chopsticks(x), chopsticks((x + 1) % 5))) - } -} diff --git a/akka-samples/akka-sample-osgi-dining-hakkers/core/src/main/scala/akka/sample/osgi/serialization/DiningHakkerSerializer.scala b/akka-samples/akka-sample-osgi-dining-hakkers/core/src/main/scala/akka/sample/osgi/serialization/DiningHakkerSerializer.scala deleted file mode 100644 index 37b23d85b6..0000000000 --- a/akka-samples/akka-sample-osgi-dining-hakkers/core/src/main/scala/akka/sample/osgi/serialization/DiningHakkerSerializer.scala +++ /dev/null @@ -1,25 +0,0 @@ -package akka.sample.osgi.serialization - -import akka.serialization.Serializer -import akka.actor.ExtendedActorSystem -import akka.serialization.Serialization -import akka.serialization.SerializationExtension - -class DiningHakkerSerializer(val system: ExtendedActorSystem) extends Serializer { - - override def includeManifest: Boolean = true - - override def identifier = 98765 - - lazy val javaSerializer = SerializationExtension(system).findSerializerFor(classOf[java.io.Serializable]) - - def toBinary(obj: AnyRef): Array[Byte] = { - javaSerializer.toBinary(obj) - } - - def fromBinary(bytes: Array[Byte], - clazz: Option[Class[_]]): AnyRef = { - javaSerializer.fromBinary(bytes, clazz) - } - -} diff --git a/akka-samples/akka-sample-osgi-dining-hakkers/core/src/main/scala/akka/sample/osgi/service/DiningHakkersServiceImpl.scala b/akka-samples/akka-sample-osgi-dining-hakkers/core/src/main/scala/akka/sample/osgi/service/DiningHakkersServiceImpl.scala deleted file mode 100644 index 64aab41f25..0000000000 --- a/akka-samples/akka-sample-osgi-dining-hakkers/core/src/main/scala/akka/sample/osgi/service/DiningHakkersServiceImpl.scala +++ /dev/null @@ -1,30 +0,0 @@ -/* -Copyright 2013 Crossing-Tech - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - -http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and - limitations under the License. - */ -package akka.sample.osgi.service - -import akka.sample.osgi.api.DiningHakkersService -import akka.actor.{ Props, ActorSystem } -import akka.actor.ActorRef -import akka.sample.osgi.internal.Hakker -import akka.sample.osgi.internal.HakkerTracker - -class DiningHakkersServiceImpl(system: ActorSystem) extends DiningHakkersService { - def getHakker(name: String, chairNumber: Int): ActorRef = - system.actorOf(Props(classOf[Hakker], name, chairNumber)) - - def getTracker(): ActorRef = - system.actorOf(Props[HakkerTracker], "tracker") -} diff --git a/akka-samples/akka-sample-osgi-dining-hakkers/integration-test/pom.xml b/akka-samples/akka-sample-osgi-dining-hakkers/integration-test/pom.xml deleted file mode 100644 index e25b2ea06b..0000000000 --- a/akka-samples/akka-sample-osgi-dining-hakkers/integration-test/pom.xml +++ /dev/null @@ -1,70 +0,0 @@ - - - - com.typesafe.akka.akka-sample-osgi-dining-hakkers - project - 2.5-SNAPSHOT - - 4.0.0 - - integration-test - Dining Hakker :: Integration Test - jar - - - - ${project.groupId} - api - - - org.apache.karaf.tooling.exam - org.apache.karaf.tooling.exam.container - - - org.ops4j.pax.exam - pax-exam-junit4 - - - com.typesafe.akka - akka-testkit_${scala.dep.version} - - - junit - junit - - - - - - - - org.ops4j.pax.exam - maven-paxexam-plugin - 1.2.4 - - - generate-config - - generate-depends-file - - - - - - org.apache.maven.plugins - maven-surefire-plugin - 2.12.4 - - - 30000 - ${karaf.version} - ${project.version} - ${scala.dep.version} - - - - - - diff --git a/akka-samples/akka-sample-osgi-dining-hakkers/integration-test/src/test/scala/akka/sample/osgi/test/HakkerStatusTest.scala b/akka-samples/akka-sample-osgi-dining-hakkers/integration-test/src/test/scala/akka/sample/osgi/test/HakkerStatusTest.scala deleted file mode 100644 index 032b4031e8..0000000000 --- a/akka-samples/akka-sample-osgi-dining-hakkers/integration-test/src/test/scala/akka/sample/osgi/test/HakkerStatusTest.scala +++ /dev/null @@ -1,93 +0,0 @@ -package akka.sample.osgi.test - -import akka.actor.{ Identify => _, _ } -import akka.sample.osgi.api._ -import akka.sample.osgi.test.TestOptions._ -import akka.testkit.TestProbe -import javax.inject.Inject -import org.junit.Assert._ -import org.junit.runner.RunWith -import org.junit.{ Before, Test } -import org.ops4j.pax.exam.junit.{ Configuration, JUnit4TestRunner } -import org.ops4j.pax.exam.util.Filter -import org.ops4j.pax.exam.{ Option => PaxOption } -import scala.concurrent.duration._ -import org.apache.karaf.tooling.exam.options.LogLevelOption - -/** - * This is a ScalaTest based integration test. Pax-Exam, which is responsible for loading the test class into - * the OSGi environment and executing it, currently does not support ScalaTest directly. However, ScalaTest - * provides a JUnit-compatible runner, so the test is defined to use that runner. Pax-Exam can then invoke - * it as a normal JUnit test. Because Pax Exam is using the JUnitRunner and not one of the ScalaTest traits such - * as FunSuite, the test should be defined using the JUnit @Test annotation. - * - * This is a simple test demonstrating in-container integration testing. - * - * One thing to note is that we only depend on the API bundle, not the implementation in core. The implementation - * is injected into the test at runtime via an OSGi service lookup performed by Pax Exam. - * - * TODO attempt to use the Akka test probe - */ -@RunWith(classOf[JUnit4TestRunner]) -class HakkerStatusTest { - - @Inject @Filter(timeout = 30000) - var actorSystem: ActorSystem = _ - - @Inject @Filter(timeout = 30000) - var service: DiningHakkersService = _ - - var testProbe: TestProbe = _ - - @Configuration - def config: Array[PaxOption] = Array[PaxOption]( - karafOptionsWithTestBundles(), - featureDiningHakkers() //, debugOptions(level = LogLevelOption.LogLevel.DEBUG) - ) - - // Junit @Before and @After can be used as well - - @Before - def setupAkkaTestkit() { - testProbe = new TestProbe(actorSystem) - } - - @Test - def verifyObtainingAHakkerViaTheTheDiningHakkersService() { - - val name = "TestHakker" - val hakker = Option(service.getHakker(name, 2)) - .getOrElse(throw new IllegalStateException("No Hakker was created via DiningHakkerService")) - - // takes some time for the first message to get through - testProbe.within(10.seconds) { - testProbe.send(hakker, Identify) - val Identification(fromHakker, busyWith) = testProbe.expectMsgType[Identification] - - println("---------------> %s is busy with %s.".format(fromHakker, busyWith)) - assertEquals(fromHakker, "TestHakker") - assertNotNull(busyWith) - } - - } - - @Test - def verifyHakkerTracker() { - - val name = "TestHakker" - val hakker = service.getHakker(name, 3) - val tracker = service.getTracker() - tracker ! TrackHakker(hakker) - testProbe.within(10.seconds) { - testProbe.awaitAssert { - testProbe.within(1.second) { - tracker.tell(GetEatingCount(name), testProbe.ref) - val reply = testProbe.expectMsgType[EatingCount] - assertEquals(reply.hakkerName, name) - assertTrue(reply.count > 0) - } - } - } - } - -} diff --git a/akka-samples/akka-sample-osgi-dining-hakkers/integration-test/src/test/scala/akka/sample/osgi/test/TestOptions.scala b/akka-samples/akka-sample-osgi-dining-hakkers/integration-test/src/test/scala/akka/sample/osgi/test/TestOptions.scala deleted file mode 100644 index aab1843c6e..0000000000 --- a/akka-samples/akka-sample-osgi-dining-hakkers/integration-test/src/test/scala/akka/sample/osgi/test/TestOptions.scala +++ /dev/null @@ -1,54 +0,0 @@ -package akka.sample.osgi.test - -import org.ops4j.pax.exam.CoreOptions._ -import org.ops4j.pax.exam.options.DefaultCompositeOption -import org.ops4j.pax.exam.{ Option => PaxOption } -import org.apache.karaf.tooling.exam.options.LogLevelOption -import org.apache.karaf.tooling.exam.options.KarafDistributionOption._ -import java.io.File - -/** - * Re-usable PAX Exam option groups. - */ -object TestOptions { - - val scalaDepVersion = System.getProperty("scala.dep.version") - - def karafOptions(useDeployFolder: Boolean = false, extractInTargetFolder: Boolean = true): PaxOption = { - val kdc = karafDistributionConfiguration.frameworkUrl( - maven.groupId("org.apache.karaf").artifactId("apache-karaf").`type`("zip").version(System.getProperty("karaf.version"))) - .karafVersion(System.getProperty("karaf.version")).name("Apache Karaf").useDeployFolder(useDeployFolder) - - new DefaultCompositeOption(if (extractInTargetFolder) kdc.unpackDirectory(new File("target/paxexam/unpack/")) else kdc, - editConfigurationFilePut("etc/config.properties", "karaf.framework", "equinox")) - } - - def testBundles(): PaxOption = { - new DefaultCompositeOption( - mavenBundle("com.typesafe.akka", "akka-testkit_%s".format(scalaDepVersion)).versionAsInProject, - junitBundles) - } - - def debugOptions(level: LogLevelOption.LogLevel = LogLevelOption.LogLevel.INFO, debugPort: Option[Int] = None): PaxOption = { - val options: List[PaxOption] = List(logLevel(level), configureConsole().startLocalConsole(), configureConsole().startRemoteShell()) ++ - debugPort.toList.map(p => debugConfiguration(String.valueOf(p), true)) - new DefaultCompositeOption(options: _*) - } - - def karafOptionsWithTestBundles(useDeployFolder: Boolean = false, extractInTargetFolder: Boolean = true): PaxOption = { - new DefaultCompositeOption( - karafOptions(useDeployFolder, extractInTargetFolder), - testBundles()) - } - - def featureDiningHakkers(): PaxOption = { - akkaFeature("dining-hakker") - } - - def akkaFeature(feature: String): PaxOption = { - scanFeatures(maven.groupId("com.typesafe.akka.akka-sample-osgi-dining-hakkers") - .artifactId("akka-sample-osgi-dining-hakkers").`type`("xml").classifier("features") - .version(System.getProperty("project.version")), feature) - } - -} diff --git a/akka-samples/akka-sample-osgi-dining-hakkers/karaf.sh b/akka-samples/akka-sample-osgi-dining-hakkers/karaf.sh deleted file mode 100755 index 8396458595..0000000000 --- a/akka-samples/akka-sample-osgi-dining-hakkers/karaf.sh +++ /dev/null @@ -1,18 +0,0 @@ -#!/bin/bash - -projdir=$(cd $(dirname $0); pwd) -version=2.5-SNAPSHOT - -# This directory is specified in the build as the root of the tar -# Use tar --strip-components=1 to ignore the root -outputdir="$projdir/target/akka-sample-osgi-dining-hakkers-$version" - -mkdir $projdir/target - -if [[ -d "$outputdir" ]]; then - echo Deleting existing $outputdir... - rm -fr "$outputdir" -fi -echo Extracting configured container into $outputdir... -tar -C $projdir/target -zxf assembly-dist/target/akka-sample-osgi-dining-hakkers-dist-$version.tar.gz -echo Extract complete, please run $outputdir/bin/karaf diff --git a/akka-samples/akka-sample-osgi-dining-hakkers/pom.xml b/akka-samples/akka-sample-osgi-dining-hakkers/pom.xml deleted file mode 100644 index 33fc68357a..0000000000 --- a/akka-samples/akka-sample-osgi-dining-hakkers/pom.xml +++ /dev/null @@ -1,253 +0,0 @@ - - - 4.0.0 - com.typesafe.akka.akka-sample-osgi-dining-hakkers - project - 2.5-SNAPSHOT - - - UTF-8 - 2.5-SNAPSHOT - - - 3.0.8 - 2.3.12 - 2.4.4 - 3.10.5.Final - 4.3.1 - 2.6.0 - 1.6.0 - 2.5.0 - 2.11.7 - 2.11 - 1.3.0 - 0.7 - 1.8 - 1.2.2 - - - - api - command - uncommons - core - - assembly-features - assembly-dist - integration-test - - pom - akka-sample-osgi-dining-hakkers - - - - - org.osgi - org.osgi.core - ${osgi.version} - - - org.osgi - org.osgi.compendium - ${osgi.version} - - - - ${project.groupId} - api - ${project.version} - - - ${project.groupId} - core - ${project.version} - - - ${project.groupId} - command - ${project.version} - - - ${project.groupId} - akka-sample-osgi-dining-hakkers - ${project.version} - pom - - - - com.typesafe.akka - akka-osgi_${scala.dep.version} - ${akka.version} - - - com.typesafe.akka - akka-cluster_${scala.dep.version} - ${akka.version} - - - com.typesafe.akka - akka-remote_${scala.dep.version} - ${akka.version} - - - com.typesafe.akka - akka-slf4j_${scala.dep.version} - ${akka.version} - - - com.typesafe.akka - akka-persistence_${scala.dep.version} - ${akka.version} - - - com.typesafe - config - ${typesafe.config.version} - - - com.google.protobuf - protobuf-java - ${protobuf.version} - - - io.netty - netty - ${netty.version} - - - org.iq80.leveldb - leveldb - ${leveldb.version} - - - org.fusesource.leveldbjni - leveldbjni-all - ${leveldbjni.version} - - - - org.apache.karaf.tooling.exam - org.apache.karaf.tooling.exam.container - ${karaf.tooling.exam.version} - test - - - org.ops4j.pax.exam - pax-exam-junit4 - ${paxexam.version} - test - - - com.typesafe.akka - akka-testkit_${scala.dep.version} - ${akka.version} - test - - - junit - junit - 4.10 - test - - - - - - - org.scala-lang - scala-compiler - ${scala.version} - - - - - org.apache.karaf - apache-karaf - ${karaf.version} - tar.gz - - - org.apache.karaf - apache-karaf - ${karaf.version} - zip - - - - - oss-sonatype-releases - https://oss.sonatype.org/content/repositories/releases - - - springsource-releases - http://repository.springsource.com/maven/bundles/release - - - springsource-external - http://repository.springsource.com/maven/bundles/external - - - - - - - org.codehaus.mojo - build-helper-maven-plugin - 1.7 - - - generate-sources - - add-source - - - - src/main/scala - - - - - - - net.alchim31.maven - scala-maven-plugin - 3.1.2 - - - scala-compile-first - process-resources - - add-source - compile - - - - scala-test-compile - process-test-resources - - testCompile - - - - - - -deprecation - -feature - -encoding - UTF-8 - - - - - - - org.apache.felix - maven-bundle-plugin - 2.3.7 - true - - - - diff --git a/akka-samples/akka-sample-osgi-dining-hakkers/uncommons/pom.xml b/akka-samples/akka-sample-osgi-dining-hakkers/uncommons/pom.xml deleted file mode 100644 index 126da1c2ba..0000000000 --- a/akka-samples/akka-sample-osgi-dining-hakkers/uncommons/pom.xml +++ /dev/null @@ -1,73 +0,0 @@ - - - - com.typesafe.akka.akka-sample-osgi-dining-hakkers - project - 2.5-SNAPSHOT - - 4.0.0 - - - - uncommons - org.uncommons.maths.random - 1.2.2 - bundle - - - - - org.uncommons.maths - uncommons-maths - 1.2.2 - - - - - - jfree - jcommon - - - jfree - jfreechart - - - - - jfree - jfreechart - 1.0.13 - - - jfree - jcommon - 1.0.16 - - - - - - - - org.apache.felix - maven-bundle-plugin - - - ${project.name} - ${project.artifactId} - org.uncommons.maths.random - !sun.misc, * - org.uncommons.maths.binary, org.uncommons.maths, org.uncommons.maths.number - - - - - - diff --git a/akka-samples/akka-sample-persistence-java-lambda/.gitignore b/akka-samples/akka-sample-persistence-java-lambda/.gitignore deleted file mode 100644 index 660c959e44..0000000000 --- a/akka-samples/akka-sample-persistence-java-lambda/.gitignore +++ /dev/null @@ -1,17 +0,0 @@ -*# -*.iml -*.ipr -*.iws -*.pyc -*.tm.epoch -*.vim -*-shim.sbt -.idea/ -/project/plugins/project -project/boot -target/ -/logs -.cache -.classpath -.project -.settings \ No newline at end of file diff --git a/akka-samples/akka-sample-persistence-java-lambda/COPYING b/akka-samples/akka-sample-persistence-java-lambda/COPYING deleted file mode 100644 index 0e259d42c9..0000000000 --- a/akka-samples/akka-sample-persistence-java-lambda/COPYING +++ /dev/null @@ -1,121 +0,0 @@ -Creative Commons Legal Code - -CC0 1.0 Universal - - CREATIVE COMMONS CORPORATION IS NOT A LAW FIRM AND DOES NOT PROVIDE - LEGAL SERVICES. DISTRIBUTION OF THIS DOCUMENT DOES NOT CREATE AN - ATTORNEY-CLIENT RELATIONSHIP. CREATIVE COMMONS PROVIDES THIS - INFORMATION ON AN "AS-IS" BASIS. CREATIVE COMMONS MAKES NO WARRANTIES - REGARDING THE USE OF THIS DOCUMENT OR THE INFORMATION OR WORKS - PROVIDED HEREUNDER, AND DISCLAIMS LIABILITY FOR DAMAGES RESULTING FROM - THE USE OF THIS DOCUMENT OR THE INFORMATION OR WORKS PROVIDED - HEREUNDER. - -Statement of Purpose - -The laws of most jurisdictions throughout the world automatically confer -exclusive Copyright and Related Rights (defined below) upon the creator -and subsequent owner(s) (each and all, an "owner") of an original work of -authorship and/or a database (each, a "Work"). - -Certain owners wish to permanently relinquish those rights to a Work for -the purpose of contributing to a commons of creative, cultural and -scientific works ("Commons") that the public can reliably and without fear -of later claims of infringement build upon, modify, incorporate in other -works, reuse and redistribute as freely as possible in any form whatsoever -and for any purposes, including without limitation commercial purposes. -These owners may contribute to the Commons to promote the ideal of a free -culture and the further production of creative, cultural and scientific -works, or to gain reputation or greater distribution for their Work in -part through the use and efforts of others. - -For these and/or other purposes and motivations, and without any -expectation of additional consideration or compensation, the person -associating CC0 with a Work (the "Affirmer"), to the extent that he or she -is an owner of Copyright and Related Rights in the Work, voluntarily -elects to apply CC0 to the Work and publicly distribute the Work under its -terms, with knowledge of his or her Copyright and Related Rights in the -Work and the meaning and intended legal effect of CC0 on those rights. - -1. Copyright and Related Rights. A Work made available under CC0 may be -protected by copyright and related or neighboring rights ("Copyright and -Related Rights"). Copyright and Related Rights include, but are not -limited to, the following: - - i. the right to reproduce, adapt, distribute, perform, display, - communicate, and translate a Work; - ii. moral rights retained by the original author(s) and/or performer(s); -iii. publicity and privacy rights pertaining to a person's image or - likeness depicted in a Work; - iv. rights protecting against unfair competition in regards to a Work, - subject to the limitations in paragraph 4(a), below; - v. rights protecting the extraction, dissemination, use and reuse of data - in a Work; - vi. database rights (such as those arising under Directive 96/9/EC of the - European Parliament and of the Council of 11 March 1996 on the legal - protection of databases, and under any national implementation - thereof, including any amended or successor version of such - directive); and -vii. other similar, equivalent or corresponding rights throughout the - world based on applicable law or treaty, and any national - implementations thereof. - -2. Waiver. To the greatest extent permitted by, but not in contravention -of, applicable law, Affirmer hereby overtly, fully, permanently, -irrevocably and unconditionally waives, abandons, and surrenders all of -Affirmer's Copyright and Related Rights and associated claims and causes -of action, whether now known or unknown (including existing as well as -future claims and causes of action), in the Work (i) in all territories -worldwide, (ii) for the maximum duration provided by applicable law or -treaty (including future time extensions), (iii) in any current or future -medium and for any number of copies, and (iv) for any purpose whatsoever, -including without limitation commercial, advertising or promotional -purposes (the "Waiver"). Affirmer makes the Waiver for the benefit of each -member of the public at large and to the detriment of Affirmer's heirs and -successors, fully intending that such Waiver shall not be subject to -revocation, rescission, cancellation, termination, or any other legal or -equitable action to disrupt the quiet enjoyment of the Work by the public -as contemplated by Affirmer's express Statement of Purpose. - -3. Public License Fallback. Should any part of the Waiver for any reason -be judged legally invalid or ineffective under applicable law, then the -Waiver shall be preserved to the maximum extent permitted taking into -account Affirmer's express Statement of Purpose. In addition, to the -extent the Waiver is so judged Affirmer hereby grants to each affected -person a royalty-free, non transferable, non sublicensable, non exclusive, -irrevocable and unconditional license to exercise Affirmer's Copyright and -Related Rights in the Work (i) in all territories worldwide, (ii) for the -maximum duration provided by applicable law or treaty (including future -time extensions), (iii) in any current or future medium and for any number -of copies, and (iv) for any purpose whatsoever, including without -limitation commercial, advertising or promotional purposes (the -"License"). The License shall be deemed effective as of the date CC0 was -applied by Affirmer to the Work. Should any part of the License for any -reason be judged legally invalid or ineffective under applicable law, such -partial invalidity or ineffectiveness shall not invalidate the remainder -of the License, and in such case Affirmer hereby affirms that he or she -will not (i) exercise any of his or her remaining Copyright and Related -Rights in the Work or (ii) assert any associated claims and causes of -action with respect to the Work, in either case contrary to Affirmer's -express Statement of Purpose. - -4. Limitations and Disclaimers. - - a. No trademark or patent rights held by Affirmer are waived, abandoned, - surrendered, licensed or otherwise affected by this document. - b. Affirmer offers the Work as-is and makes no representations or - warranties of any kind concerning the Work, express, implied, - statutory or otherwise, including without limitation warranties of - title, merchantability, fitness for a particular purpose, non - infringement, or the absence of latent or other defects, accuracy, or - the present or absence of errors, whether or not discoverable, all to - the greatest extent permissible under applicable law. - c. Affirmer disclaims responsibility for clearing rights of other persons - that may apply to the Work or any use thereof, including without - limitation any person's Copyright and Related Rights in the Work. - Further, Affirmer disclaims responsibility for obtaining any necessary - consents, permissions or other rights required for any use of the - Work. - d. Affirmer understands and acknowledges that Creative Commons is not a - party to this document and has no duty or obligation with respect to - this CC0 or use of the Work. diff --git a/akka-samples/akka-sample-persistence-java-lambda/LICENSE b/akka-samples/akka-sample-persistence-java-lambda/LICENSE deleted file mode 100644 index 2a18b45589..0000000000 --- a/akka-samples/akka-sample-persistence-java-lambda/LICENSE +++ /dev/null @@ -1,10 +0,0 @@ -Activator Template by Lightbend - -Licensed under Public Domain (CC0) - -To the extent possible under law, the person who associated CC0 with -this Activator Tempate has waived all copyright and related or neighboring -rights to this Activator Template. - -You should have received a copy of the CC0 legalcode along with this -work. If not, see . diff --git a/akka-samples/akka-sample-persistence-java-lambda/activator.properties b/akka-samples/akka-sample-persistence-java-lambda/activator.properties deleted file mode 100644 index 917d565c6e..0000000000 --- a/akka-samples/akka-sample-persistence-java-lambda/activator.properties +++ /dev/null @@ -1,7 +0,0 @@ -name=akka-sample-persistence-java-lambda -title=Akka Persistence Samples in Java with Lambdas -description=Akka Persistence Samples in Java with Lambdas -tags=akka,java,java8,sample,persistence,sample -authorName=Akka Team -authorLink=http://akka.io/ -sourceLink=https://github.com/akka/akka diff --git a/akka-samples/akka-sample-persistence-java-lambda/build.sbt b/akka-samples/akka-sample-persistence-java-lambda/build.sbt deleted file mode 100644 index c1a47907a0..0000000000 --- a/akka-samples/akka-sample-persistence-java-lambda/build.sbt +++ /dev/null @@ -1,17 +0,0 @@ -name := "akka-sample-persistence-java-lambda" - -version := "2.5-SNAPSHOT" - -scalaVersion := "2.11.8" - -javacOptions in compile ++= Seq("-encoding", "UTF-8", "-source", "1.8", "-target", "1.8", "-Xlint") - -javacOptions in doc ++= Seq("-encoding", "UTF-8", "-source", "1.8", "-Xdoclint:none") - -libraryDependencies ++= Seq( - "com.typesafe.akka" %% "akka-persistence" % "2.5-SNAPSHOT", - "org.iq80.leveldb" % "leveldb" % "0.7", - "org.fusesource.leveldbjni" % "leveldbjni-all" % "1.8" -) - -licenses := Seq(("CC0", url("http://creativecommons.org/publicdomain/zero/1.0"))) diff --git a/akka-samples/akka-sample-persistence-java-lambda/pom.xml b/akka-samples/akka-sample-persistence-java-lambda/pom.xml deleted file mode 100644 index 28890ebeb3..0000000000 --- a/akka-samples/akka-sample-persistence-java-lambda/pom.xml +++ /dev/null @@ -1,59 +0,0 @@ - - 4.0.0 - - - UTF-8 - - - sample - akka-sample-persistence-java8 - jar - 2.5-SNAPSHOT - - - - com.typesafe.akka - akka-actor_2.11 - 2.5-SNAPSHOT - - - com.typesafe.akka - akka-persistence_2.11 - 2.5-SNAPSHOT - - - com.typesafe.akka - akka-testkit_2.11 - 2.5-SNAPSHOT - - - junit - junit - 4.12 - test - - - - - - - org.apache.maven.plugins - maven-compiler-plugin - 3.1 - - 1.8 - 1.8 - true - - -Xlint - - - - - - - - diff --git a/akka-samples/akka-sample-persistence-java-lambda/project/build.properties b/akka-samples/akka-sample-persistence-java-lambda/project/build.properties deleted file mode 100644 index 748703f770..0000000000 --- a/akka-samples/akka-sample-persistence-java-lambda/project/build.properties +++ /dev/null @@ -1 +0,0 @@ -sbt.version=0.13.7 diff --git a/akka-samples/akka-sample-persistence-java-lambda/src/main/java/sample/persistence/PersistentActorFailureExample.java b/akka-samples/akka-sample-persistence-java-lambda/src/main/java/sample/persistence/PersistentActorFailureExample.java deleted file mode 100644 index 0c986ddd60..0000000000 --- a/akka-samples/akka-sample-persistence-java-lambda/src/main/java/sample/persistence/PersistentActorFailureExample.java +++ /dev/null @@ -1,77 +0,0 @@ -/** - * Copyright (C) 2009-2017 Lightbend Inc. - */ - -package sample.persistence; - -import akka.actor.ActorRef; -import akka.actor.ActorSystem; -import akka.actor.Props; -import akka.japi.pf.ReceiveBuilder; -import akka.persistence.AbstractPersistentActor; -import scala.PartialFunction; -import scala.runtime.BoxedUnit; - -import java.util.ArrayList; - -public class PersistentActorFailureExample { - public static class ExamplePersistentActor extends AbstractPersistentActor { - private ArrayList received = new ArrayList(); - - @Override - public String persistenceId() { return "sample-id-2"; } - - @Override - public PartialFunction receiveCommand() { - return ReceiveBuilder. - match(String.class, s -> s.equals("boom"), s -> {throw new RuntimeException("boom");}). - match(String.class, s -> s.equals("print"), s -> System.out.println("received " + received)). - match(String.class, s -> { - persist(s, evt -> { - received.add(evt); - }); - }). - build(); - } - - @Override - public PartialFunction receiveRecover() { - return ReceiveBuilder. - match(String.class, s -> received.add(s)). - build(); - } - - - } - - public static void main(String... args) throws Exception { - final ActorSystem system = ActorSystem.create("example"); - final ActorRef persistentActor = system.actorOf(Props.create(ExamplePersistentActor.class), "persistentActor-2"); - - persistentActor.tell("a", null); - persistentActor.tell("print", null); - persistentActor.tell("boom", null); - persistentActor.tell("print", null); - persistentActor.tell("b", null); - persistentActor.tell("print", null); - persistentActor.tell("c", null); - persistentActor.tell("print", null); - - // Will print in a first run (i.e. with empty journal): - - // received [a] - // received [a, b] - // received [a, b, c] - - // Will print in a second run: - - // received [a, b, c, a] - // received [a, b, c, a, b] - // received [a, b, c, a, b, c] - - // etc ... - - Thread.sleep(10000); - system.terminate(); - } -} diff --git a/akka-samples/akka-sample-persistence-java-lambda/src/main/java/sample/persistence/SnapshotExample.java b/akka-samples/akka-sample-persistence-java-lambda/src/main/java/sample/persistence/SnapshotExample.java deleted file mode 100644 index 416d2cbdff..0000000000 --- a/akka-samples/akka-sample-persistence-java-lambda/src/main/java/sample/persistence/SnapshotExample.java +++ /dev/null @@ -1,94 +0,0 @@ -/** - * Copyright (C) 2009-2017 Lightbend Inc. - */ - -package sample.persistence; - -import akka.actor.ActorRef; -import akka.actor.ActorSystem; -import akka.actor.Props; -import akka.japi.pf.ReceiveBuilder; -import akka.persistence.AbstractPersistentActor; -import akka.persistence.SnapshotOffer; -import scala.PartialFunction; -import scala.runtime.BoxedUnit; - -import java.io.Serializable; -import java.util.ArrayList; - -public class SnapshotExample { - public static class ExampleState implements Serializable { - private static final long serialVersionUID = 1L; - private final ArrayList received; - - public ExampleState() { - this(new ArrayList()); - } - - public ExampleState(ArrayList received) { - this.received = received; - } - - public ExampleState copy() { - return new ExampleState(new ArrayList(received)); - } - - public void update(String s) { - received.add(s); - } - - @Override - public String toString() { - return received.toString(); - } - } - - public static class ExamplePersistentActor extends AbstractPersistentActor { - private ExampleState state = new ExampleState(); - - @Override - public PartialFunction receiveCommand() { - return ReceiveBuilder. - match(String.class, s -> s.equals("print"), s -> System.out.println("current state = " + state)). - match(String.class, s -> s.equals("snap"), s -> - // IMPORTANT: create a copy of snapshot - // because ExampleState is mutable !!! - saveSnapshot(state.copy())). - match(String.class, s -> { - persist(s, evt -> { - state.update(evt); - }); - }). - build(); - } - - @Override - public String persistenceId() { return "sample-id-3"; } - - @Override - public PartialFunction receiveRecover() { - return ReceiveBuilder. - match(String.class, evt -> state.update(evt)). - match(SnapshotOffer.class, ss -> { - System.out.println("offered state = " + ss); - state = (ExampleState) ss.snapshot(); - }). - build(); - } - } - - public static void main(String... args) throws Exception { - final ActorSystem system = ActorSystem.create("example"); - final ActorRef persistentActor = system.actorOf(Props.create(ExamplePersistentActor.class), "persistentActor-3-java"); - - persistentActor.tell("a", null); - persistentActor.tell("b", null); - persistentActor.tell("snap", null); - persistentActor.tell("c", null); - persistentActor.tell("d", null); - persistentActor.tell("print", null); - - Thread.sleep(10000); - system.terminate(); - } -} diff --git a/akka-samples/akka-sample-persistence-java-lambda/src/main/resources/application.conf b/akka-samples/akka-sample-persistence-java-lambda/src/main/resources/application.conf deleted file mode 100644 index 48bbe84040..0000000000 --- a/akka-samples/akka-sample-persistence-java-lambda/src/main/resources/application.conf +++ /dev/null @@ -1,9 +0,0 @@ -akka.persistence.journal.plugin = "akka.persistence.journal.leveldb" -akka.persistence.snapshot-store.plugin = "akka.persistence.snapshot-store.local" - -akka.persistence.journal.leveldb.dir = "target/example/journal" -akka.persistence.snapshot-store.local.dir = "target/example/snapshots" - -# DO NOT USE THIS IN PRODUCTION !!! -# See also https://github.com/typesafehub/activator/issues/287 -akka.persistence.journal.leveldb.native = false diff --git a/akka-samples/akka-sample-persistence-java-lambda/tutorial/index.html b/akka-samples/akka-sample-persistence-java-lambda/tutorial/index.html deleted file mode 100644 index 54d9b9389e..0000000000 --- a/akka-samples/akka-sample-persistence-java-lambda/tutorial/index.html +++ /dev/null @@ -1,89 +0,0 @@ - - -Akka Persistence Samples in Java with Lambdas - - - - -
-

Akka Persistence Samples

-

-This tutorial contains examples that illustrate a subset of -Akka Persistence features. -

-
    -
  • persistent actor
  • -
  • persistent actor snapshots
  • -
  • persistent actor recovery
  • -
  • persistent actor views
  • -
- -

-Custom storage locations for the journal and snapshots can be defined in -application.conf. -

-
- -
-

Persistent actor

-

-PersistentActorExample.java -is described in detail in the Event sourcing -section of the user documentation. With every application run, the ExamplePersistentActor is recovered from -events stored in previous application runs, processes new commands, stores new events and snapshots and prints the -current persistent actor state to stdout. -

- -

-To run this example, go to the Run tab, and run the application main class -sample.persistence.PersistentActorExample several times. -

-
- -
-

Persistent actor snapshots

-

-SnapshotExample.java -demonstrates how persistent actors can take snapshots of application state and recover from previously stored snapshots. -Snapshots are offered to persistent actors at the beginning of recovery, before any messages (younger than the snapshot) -are replayed. -

- -

-To run this example, go to the Run tab, and run the application main class -sample.persistence.SnapshotExample several times. With every run, the state offered by the -most recent snapshot is printed to stdout, followed by the updated state after sending new persistent -messages to the persistent actor. -

-
- -
-

Persistent actor recovery

-

-PersistentActorFailureExample.java -shows how a persistent actor can throw an exception, restart and restore the state by replaying the events. -

- -

-To run this example, go to the Run tab, and run the application main class -sample.persistence.PersistentActorFailureExample several times. -

-
- -
-

Persistent actor views

-

-ViewExample.java demonstrates -how a view (ExampleView) is updated with the persistent message stream of a persistent actor -(ExamplePersistentActor). Messages sent to the persistent actor are scheduled periodically. Views also support -snapshotting to reduce recovery time. -

- -

-To run this example, go to the Run tab, and run the application main class -sample.persistence.PersistentViewExample. -

-
- - - diff --git a/akka-samples/akka-sample-persistence-java/.gitignore b/akka-samples/akka-sample-persistence-java/.gitignore deleted file mode 100644 index 660c959e44..0000000000 --- a/akka-samples/akka-sample-persistence-java/.gitignore +++ /dev/null @@ -1,17 +0,0 @@ -*# -*.iml -*.ipr -*.iws -*.pyc -*.tm.epoch -*.vim -*-shim.sbt -.idea/ -/project/plugins/project -project/boot -target/ -/logs -.cache -.classpath -.project -.settings \ No newline at end of file diff --git a/akka-samples/akka-sample-persistence-java/COPYING b/akka-samples/akka-sample-persistence-java/COPYING deleted file mode 100644 index 0e259d42c9..0000000000 --- a/akka-samples/akka-sample-persistence-java/COPYING +++ /dev/null @@ -1,121 +0,0 @@ -Creative Commons Legal Code - -CC0 1.0 Universal - - CREATIVE COMMONS CORPORATION IS NOT A LAW FIRM AND DOES NOT PROVIDE - LEGAL SERVICES. DISTRIBUTION OF THIS DOCUMENT DOES NOT CREATE AN - ATTORNEY-CLIENT RELATIONSHIP. CREATIVE COMMONS PROVIDES THIS - INFORMATION ON AN "AS-IS" BASIS. CREATIVE COMMONS MAKES NO WARRANTIES - REGARDING THE USE OF THIS DOCUMENT OR THE INFORMATION OR WORKS - PROVIDED HEREUNDER, AND DISCLAIMS LIABILITY FOR DAMAGES RESULTING FROM - THE USE OF THIS DOCUMENT OR THE INFORMATION OR WORKS PROVIDED - HEREUNDER. - -Statement of Purpose - -The laws of most jurisdictions throughout the world automatically confer -exclusive Copyright and Related Rights (defined below) upon the creator -and subsequent owner(s) (each and all, an "owner") of an original work of -authorship and/or a database (each, a "Work"). - -Certain owners wish to permanently relinquish those rights to a Work for -the purpose of contributing to a commons of creative, cultural and -scientific works ("Commons") that the public can reliably and without fear -of later claims of infringement build upon, modify, incorporate in other -works, reuse and redistribute as freely as possible in any form whatsoever -and for any purposes, including without limitation commercial purposes. -These owners may contribute to the Commons to promote the ideal of a free -culture and the further production of creative, cultural and scientific -works, or to gain reputation or greater distribution for their Work in -part through the use and efforts of others. - -For these and/or other purposes and motivations, and without any -expectation of additional consideration or compensation, the person -associating CC0 with a Work (the "Affirmer"), to the extent that he or she -is an owner of Copyright and Related Rights in the Work, voluntarily -elects to apply CC0 to the Work and publicly distribute the Work under its -terms, with knowledge of his or her Copyright and Related Rights in the -Work and the meaning and intended legal effect of CC0 on those rights. - -1. Copyright and Related Rights. A Work made available under CC0 may be -protected by copyright and related or neighboring rights ("Copyright and -Related Rights"). Copyright and Related Rights include, but are not -limited to, the following: - - i. the right to reproduce, adapt, distribute, perform, display, - communicate, and translate a Work; - ii. moral rights retained by the original author(s) and/or performer(s); -iii. publicity and privacy rights pertaining to a person's image or - likeness depicted in a Work; - iv. rights protecting against unfair competition in regards to a Work, - subject to the limitations in paragraph 4(a), below; - v. rights protecting the extraction, dissemination, use and reuse of data - in a Work; - vi. database rights (such as those arising under Directive 96/9/EC of the - European Parliament and of the Council of 11 March 1996 on the legal - protection of databases, and under any national implementation - thereof, including any amended or successor version of such - directive); and -vii. other similar, equivalent or corresponding rights throughout the - world based on applicable law or treaty, and any national - implementations thereof. - -2. Waiver. To the greatest extent permitted by, but not in contravention -of, applicable law, Affirmer hereby overtly, fully, permanently, -irrevocably and unconditionally waives, abandons, and surrenders all of -Affirmer's Copyright and Related Rights and associated claims and causes -of action, whether now known or unknown (including existing as well as -future claims and causes of action), in the Work (i) in all territories -worldwide, (ii) for the maximum duration provided by applicable law or -treaty (including future time extensions), (iii) in any current or future -medium and for any number of copies, and (iv) for any purpose whatsoever, -including without limitation commercial, advertising or promotional -purposes (the "Waiver"). Affirmer makes the Waiver for the benefit of each -member of the public at large and to the detriment of Affirmer's heirs and -successors, fully intending that such Waiver shall not be subject to -revocation, rescission, cancellation, termination, or any other legal or -equitable action to disrupt the quiet enjoyment of the Work by the public -as contemplated by Affirmer's express Statement of Purpose. - -3. Public License Fallback. Should any part of the Waiver for any reason -be judged legally invalid or ineffective under applicable law, then the -Waiver shall be preserved to the maximum extent permitted taking into -account Affirmer's express Statement of Purpose. In addition, to the -extent the Waiver is so judged Affirmer hereby grants to each affected -person a royalty-free, non transferable, non sublicensable, non exclusive, -irrevocable and unconditional license to exercise Affirmer's Copyright and -Related Rights in the Work (i) in all territories worldwide, (ii) for the -maximum duration provided by applicable law or treaty (including future -time extensions), (iii) in any current or future medium and for any number -of copies, and (iv) for any purpose whatsoever, including without -limitation commercial, advertising or promotional purposes (the -"License"). The License shall be deemed effective as of the date CC0 was -applied by Affirmer to the Work. Should any part of the License for any -reason be judged legally invalid or ineffective under applicable law, such -partial invalidity or ineffectiveness shall not invalidate the remainder -of the License, and in such case Affirmer hereby affirms that he or she -will not (i) exercise any of his or her remaining Copyright and Related -Rights in the Work or (ii) assert any associated claims and causes of -action with respect to the Work, in either case contrary to Affirmer's -express Statement of Purpose. - -4. Limitations and Disclaimers. - - a. No trademark or patent rights held by Affirmer are waived, abandoned, - surrendered, licensed or otherwise affected by this document. - b. Affirmer offers the Work as-is and makes no representations or - warranties of any kind concerning the Work, express, implied, - statutory or otherwise, including without limitation warranties of - title, merchantability, fitness for a particular purpose, non - infringement, or the absence of latent or other defects, accuracy, or - the present or absence of errors, whether or not discoverable, all to - the greatest extent permissible under applicable law. - c. Affirmer disclaims responsibility for clearing rights of other persons - that may apply to the Work or any use thereof, including without - limitation any person's Copyright and Related Rights in the Work. - Further, Affirmer disclaims responsibility for obtaining any necessary - consents, permissions or other rights required for any use of the - Work. - d. Affirmer understands and acknowledges that Creative Commons is not a - party to this document and has no duty or obligation with respect to - this CC0 or use of the Work. diff --git a/akka-samples/akka-sample-persistence-java/LICENSE b/akka-samples/akka-sample-persistence-java/LICENSE deleted file mode 100644 index 2a18b45589..0000000000 --- a/akka-samples/akka-sample-persistence-java/LICENSE +++ /dev/null @@ -1,10 +0,0 @@ -Activator Template by Lightbend - -Licensed under Public Domain (CC0) - -To the extent possible under law, the person who associated CC0 with -this Activator Tempate has waived all copyright and related or neighboring -rights to this Activator Template. - -You should have received a copy of the CC0 legalcode along with this -work. If not, see . diff --git a/akka-samples/akka-sample-persistence-java/activator.properties b/akka-samples/akka-sample-persistence-java/activator.properties deleted file mode 100644 index eca7098e0f..0000000000 --- a/akka-samples/akka-sample-persistence-java/activator.properties +++ /dev/null @@ -1,7 +0,0 @@ -name=akka-sample-persistence-java -title=Akka Persistence Samples with Java -description=Akka Persistence Samples with Java -tags=akka,persistence,java,sample -authorName=Akka Team -authorLink=http://akka.io/ -sourceLink=https://github.com/akka/akka diff --git a/akka-samples/akka-sample-persistence-java/build.sbt b/akka-samples/akka-sample-persistence-java/build.sbt deleted file mode 100644 index 3e05bd753b..0000000000 --- a/akka-samples/akka-sample-persistence-java/build.sbt +++ /dev/null @@ -1,13 +0,0 @@ -name := "akka-sample-persistence-java" - -version := "2.5-SNAPSHOT" - -scalaVersion := "2.11.8" - -libraryDependencies ++= Seq( - "com.typesafe.akka" %% "akka-persistence" % "2.5-SNAPSHOT", - "org.iq80.leveldb" % "leveldb" % "0.7", - "org.fusesource.leveldbjni" % "leveldbjni-all" % "1.8" -) - -licenses := Seq(("CC0", url("http://creativecommons.org/publicdomain/zero/1.0"))) diff --git a/akka-samples/akka-sample-persistence-java/project/build.properties b/akka-samples/akka-sample-persistence-java/project/build.properties deleted file mode 100644 index 748703f770..0000000000 --- a/akka-samples/akka-sample-persistence-java/project/build.properties +++ /dev/null @@ -1 +0,0 @@ -sbt.version=0.13.7 diff --git a/akka-samples/akka-sample-persistence-java/src/main/java/sample/persistence/PersistentActorExample.java b/akka-samples/akka-sample-persistence-java/src/main/java/sample/persistence/PersistentActorExample.java deleted file mode 100644 index 62a924ffbf..0000000000 --- a/akka-samples/akka-sample-persistence-java/src/main/java/sample/persistence/PersistentActorExample.java +++ /dev/null @@ -1,135 +0,0 @@ -package sample.persistence; - -//#persistent-actor-example -import akka.actor.ActorRef; -import akka.actor.ActorSystem; -import akka.actor.Props; -import akka.japi.Procedure; -import akka.persistence.SnapshotOffer; -import akka.persistence.UntypedPersistentActor; - -import java.io.Serializable; -import java.util.ArrayList; -import static java.util.Arrays.asList; - -class Cmd implements Serializable { - private static final long serialVersionUID = 1L; - private final String data; - - public Cmd(String data) { - this.data = data; - } - - public String getData() { - return data; - } -} - -class Evt implements Serializable { - private static final long serialVersionUID = 1L; - private final String data; - - public Evt(String data) { - this.data = data; - } - - public String getData() { - return data; - } -} - -class ExampleState implements Serializable { - private static final long serialVersionUID = 1L; - private final ArrayList events; - - public ExampleState() { - this(new ArrayList()); - } - - public ExampleState(ArrayList events) { - this.events = events; - } - - public ExampleState copy() { - return new ExampleState(new ArrayList(events)); - } - - public void update(Evt evt) { - events.add(evt.getData()); - } - - public int size() { - return events.size(); - } - - @Override - public String toString() { - return events.toString(); - } -} - -class ExamplePersistentActor extends UntypedPersistentActor { - @Override - public String persistenceId() { return "sample-id-1"; } - - private ExampleState state = new ExampleState(); - - public int getNumEvents() { - return state.size(); - } - - @Override - public void onReceiveRecover(Object msg) { - if (msg instanceof Evt) { - state.update((Evt) msg); - } else if (msg instanceof SnapshotOffer) { - state = (ExampleState)((SnapshotOffer)msg).snapshot(); - } else { - unhandled(msg); - } - } - - @Override - public void onReceiveCommand(Object msg) { - if (msg instanceof Cmd) { - final String data = ((Cmd)msg).getData(); - final Evt evt1 = new Evt(data + "-" + getNumEvents()); - final Evt evt2 = new Evt(data + "-" + (getNumEvents() + 1)); - persistAll(asList(evt1, evt2), new Procedure() { - public void apply(Evt evt) throws Exception { - state.update(evt); - if (evt.equals(evt2)) { - getContext().system().eventStream().publish(evt); - } - } - }); - } else if (msg.equals("snap")) { - // IMPORTANT: create a copy of snapshot - // because ExampleState is mutable !!! - saveSnapshot(state.copy()); - } else if (msg.equals("print")) { - System.out.println(state); - } else { - unhandled(msg); - } - } -} -//#persistent-actor-example - -public class PersistentActorExample { - public static void main(String... args) throws Exception { - final ActorSystem system = ActorSystem.create("example"); - final ActorRef persistentActor = - system.actorOf(Props.create(ExamplePersistentActor.class), "persistentActor-4-java"); - - persistentActor.tell(new Cmd("foo"), null); - persistentActor.tell(new Cmd("baz"), null); - persistentActor.tell(new Cmd("bar"), null); - persistentActor.tell("snap", null); - persistentActor.tell(new Cmd("buzz"), null); - persistentActor.tell("print", null); - - Thread.sleep(10000); - system.terminate(); - } -} diff --git a/akka-samples/akka-sample-persistence-java/src/main/java/sample/persistence/PersistentActorFailureExample.java b/akka-samples/akka-sample-persistence-java/src/main/java/sample/persistence/PersistentActorFailureExample.java deleted file mode 100644 index 291ee55c93..0000000000 --- a/akka-samples/akka-sample-persistence-java/src/main/java/sample/persistence/PersistentActorFailureExample.java +++ /dev/null @@ -1,76 +0,0 @@ -package sample.persistence; - -import akka.actor.ActorRef; -import akka.actor.ActorSystem; -import akka.actor.Props; -import akka.japi.Procedure; -import akka.persistence.UntypedPersistentActor; - -import java.util.ArrayList; - -public class PersistentActorFailureExample { - public static class ExamplePersistentActor extends UntypedPersistentActor { - @Override - public String persistenceId() { return "sample-id-2"; } - - private ArrayList received = new ArrayList(); - - @Override - public void onReceiveCommand(Object message) throws Exception { - if (message.equals("boom")) { - throw new Exception("boom"); - } else if (message.equals("print")) { - System.out.println("received " + received); - } else if (message instanceof String) { - String s = (String) message; - persist(s, new Procedure() { - public void apply(String evt) throws Exception { - received.add(evt); - } - }); - } else { - unhandled(message); - } - } - - @Override - public void onReceiveRecover(Object message) { - if (message instanceof String) { - received.add((String) message); - } else { - unhandled(message); - } - } - } - - public static void main(String... args) throws Exception { - final ActorSystem system = ActorSystem.create("example"); - final ActorRef persistentActor = system.actorOf(Props.create(ExamplePersistentActor.class), "persistentActor-2"); - - persistentActor.tell("a", null); - persistentActor.tell("print", null); - persistentActor.tell("boom", null); - persistentActor.tell("print", null); - persistentActor.tell("b", null); - persistentActor.tell("print", null); - persistentActor.tell("c", null); - persistentActor.tell("print", null); - - // Will print in a first run (i.e. with empty journal): - - // received [a] - // received [a, b] - // received [a, b, c] - - // Will print in a second run: - - // received [a, b, c, a] - // received [a, b, c, a, b] - // received [a, b, c, a, b, c] - - // etc ... - - Thread.sleep(10000); - system.terminate(); - } -} diff --git a/akka-samples/akka-sample-persistence-java/src/main/java/sample/persistence/SnapshotExample.java b/akka-samples/akka-sample-persistence-java/src/main/java/sample/persistence/SnapshotExample.java deleted file mode 100644 index 0c1ed7cac0..0000000000 --- a/akka-samples/akka-sample-persistence-java/src/main/java/sample/persistence/SnapshotExample.java +++ /dev/null @@ -1,101 +0,0 @@ -package sample.persistence; - -import akka.actor.ActorRef; -import akka.actor.ActorSystem; -import akka.actor.Props; -import akka.japi.Procedure; -import akka.persistence.SaveSnapshotFailure; -import akka.persistence.SaveSnapshotSuccess; -import akka.persistence.SnapshotOffer; -import akka.persistence.UntypedPersistentActor; - -import java.io.Serializable; -import java.util.ArrayList; - -public class SnapshotExample { - public static class ExampleState implements Serializable { - private static final long serialVersionUID = 1L; - private final ArrayList received; - - public ExampleState() { - this(new ArrayList()); - } - - public ExampleState(ArrayList received) { - this.received = received; - } - - public ExampleState copy() { - return new ExampleState(new ArrayList(received)); - } - - public void update(String s) { - received.add(s); - } - - @Override - public String toString() { - return received.toString(); - } - } - - public static class ExamplePersistentActor extends UntypedPersistentActor { - @Override - public String persistenceId() { return "sample-id-3"; } - - private ExampleState state = new ExampleState(); - - @Override - public void onReceiveCommand(Object message) { - if (message.equals("print")) { - System.out.println("current state = " + state); - } else if (message.equals("snap")) { - // IMPORTANT: create a copy of snapshot - // because ExampleState is mutable !!! - saveSnapshot(state.copy()); - } else if (message instanceof SaveSnapshotSuccess) { - // ... - } else if (message instanceof SaveSnapshotFailure) { - // ... - } else if (message instanceof String) { - String s = (String) message; - persist(s, new Procedure() { - public void apply(String evt) throws Exception { - state.update(evt); - } - }); - } else { - unhandled(message); - } - } - - @Override - public void onReceiveRecover(Object message) { - if (message instanceof SnapshotOffer) { - ExampleState s = (ExampleState)((SnapshotOffer)message).snapshot(); - System.out.println("offered state = " + s); - state = s; - } else if (message instanceof String) { - state.update((String) message); - } else { - unhandled(message); - } - } - - } - - public static void main(String... args) throws Exception { - final ActorSystem system = ActorSystem.create("example"); - final ActorRef persistentActor = system.actorOf(Props.create(ExamplePersistentActor.class), "persistentActor-3-java"); - - persistentActor.tell("a", null); - persistentActor.tell("b", null); - persistentActor.tell("snap", null); - persistentActor.tell("c", null); - persistentActor.tell("d", null); - persistentActor.tell("print", null); - - Thread.sleep(10000); - system.terminate(); - } -} diff --git a/akka-samples/akka-sample-persistence-java/src/main/resources/application.conf b/akka-samples/akka-sample-persistence-java/src/main/resources/application.conf deleted file mode 100644 index 48bbe84040..0000000000 --- a/akka-samples/akka-sample-persistence-java/src/main/resources/application.conf +++ /dev/null @@ -1,9 +0,0 @@ -akka.persistence.journal.plugin = "akka.persistence.journal.leveldb" -akka.persistence.snapshot-store.plugin = "akka.persistence.snapshot-store.local" - -akka.persistence.journal.leveldb.dir = "target/example/journal" -akka.persistence.snapshot-store.local.dir = "target/example/snapshots" - -# DO NOT USE THIS IN PRODUCTION !!! -# See also https://github.com/typesafehub/activator/issues/287 -akka.persistence.journal.leveldb.native = false diff --git a/akka-samples/akka-sample-persistence-java/tutorial/index.html b/akka-samples/akka-sample-persistence-java/tutorial/index.html deleted file mode 100644 index 2fa99ad9d7..0000000000 --- a/akka-samples/akka-sample-persistence-java/tutorial/index.html +++ /dev/null @@ -1,106 +0,0 @@ - - -Akka Persistence Samples in Java - - - - -
-

Akka Persistence Samples

-

-This tutorial contains examples that illustrate a subset of -Akka Persistence features. -

-
    -
  • persistent actor
  • -
  • persistent actor snapshots
  • -
  • persistent actor recovery
  • -
  • persistent actor views
  • -
- -

-Custom storage locations for the journal and snapshots can be defined in -application.conf. -

-
- -
-

Persistent actor

-

-PersistentActorExample.java -is described in detail in the Event sourcing -section of the user documentation. With every application run, the ExamplePersistentActor is recovered from -events stored in previous application runs, processes new commands, stores new events and snapshots and prints the -current persistent actor state to stdout. -

- -

-To run this example, go to the Run tab, and run the application main class -sample.persistence.PersistentActorExample several times. -

-
- -
-

Persistent actor snapshots

-

-SnapshotExample.java -demonstrates how persistent actors can take snapshots of application state and recover from previously stored snapshots. -Snapshots are offered to persistent actors at the beginning of recovery, before any messages (younger than the snapshot) -are replayed. -

- -

-To run this example, go to the Run tab, and run the application main class -sample.persistence.SnapshotExample several times. With every run, the state offered by the -most recent snapshot is printed to stdout, followed by the updated state after sending new persistent -messages to the persistent actor. -

-
- -
-

Persistent actor recovery

-

-PersistentActorFailureExample.java -shows how a persistent actor can throw an exception, restart and restore the state by replaying the events. -

- -

-To run this example, go to the Run tab, and run the application main class -sample.persistence.PersistentActorFailureExample several times. -

-
- -
-

Persistent actor views

-

-In a command sourced system you may persist the commands to the journal immediately. However this might cause problems -in the case where you persist a command that corrupts your state. The corrupt command will be replayed again whenever you recover your state and you would have -to delete the corrupt commands from the journal. -

-

-In an event sourced -system, one does not persist commands directly but rather events that have been derived from received commands -(not shown here). These events are known to be successfully applicable to current processor state i.e. there's -no need for deleting them from the journal. Event sourced processors usually have a lower throughput than command -sourced processors, as the maximum size of a write batch is limited by the number of persisted events per received -command. -

-
- -
-

Processor views

-

-ViewExample.java demonstrates -how a view (ExampleView) is updated with the persistent message stream of a persistent actor -(ExamplePersistentActor). Messages sent to the persistent actor are scheduled periodically. Views also support -snapshotting to reduce recovery time. -

- -

-To run this example, go to the Run tab, and run the application main class -sample.persistence.PersistentViewExample. -

-
- - - diff --git a/akka-samples/akka-sample-persistence-scala/.gitignore b/akka-samples/akka-sample-persistence-scala/.gitignore deleted file mode 100644 index 660c959e44..0000000000 --- a/akka-samples/akka-sample-persistence-scala/.gitignore +++ /dev/null @@ -1,17 +0,0 @@ -*# -*.iml -*.ipr -*.iws -*.pyc -*.tm.epoch -*.vim -*-shim.sbt -.idea/ -/project/plugins/project -project/boot -target/ -/logs -.cache -.classpath -.project -.settings \ No newline at end of file diff --git a/akka-samples/akka-sample-persistence-scala/COPYING b/akka-samples/akka-sample-persistence-scala/COPYING deleted file mode 100644 index 0e259d42c9..0000000000 --- a/akka-samples/akka-sample-persistence-scala/COPYING +++ /dev/null @@ -1,121 +0,0 @@ -Creative Commons Legal Code - -CC0 1.0 Universal - - CREATIVE COMMONS CORPORATION IS NOT A LAW FIRM AND DOES NOT PROVIDE - LEGAL SERVICES. DISTRIBUTION OF THIS DOCUMENT DOES NOT CREATE AN - ATTORNEY-CLIENT RELATIONSHIP. CREATIVE COMMONS PROVIDES THIS - INFORMATION ON AN "AS-IS" BASIS. CREATIVE COMMONS MAKES NO WARRANTIES - REGARDING THE USE OF THIS DOCUMENT OR THE INFORMATION OR WORKS - PROVIDED HEREUNDER, AND DISCLAIMS LIABILITY FOR DAMAGES RESULTING FROM - THE USE OF THIS DOCUMENT OR THE INFORMATION OR WORKS PROVIDED - HEREUNDER. - -Statement of Purpose - -The laws of most jurisdictions throughout the world automatically confer -exclusive Copyright and Related Rights (defined below) upon the creator -and subsequent owner(s) (each and all, an "owner") of an original work of -authorship and/or a database (each, a "Work"). - -Certain owners wish to permanently relinquish those rights to a Work for -the purpose of contributing to a commons of creative, cultural and -scientific works ("Commons") that the public can reliably and without fear -of later claims of infringement build upon, modify, incorporate in other -works, reuse and redistribute as freely as possible in any form whatsoever -and for any purposes, including without limitation commercial purposes. -These owners may contribute to the Commons to promote the ideal of a free -culture and the further production of creative, cultural and scientific -works, or to gain reputation or greater distribution for their Work in -part through the use and efforts of others. - -For these and/or other purposes and motivations, and without any -expectation of additional consideration or compensation, the person -associating CC0 with a Work (the "Affirmer"), to the extent that he or she -is an owner of Copyright and Related Rights in the Work, voluntarily -elects to apply CC0 to the Work and publicly distribute the Work under its -terms, with knowledge of his or her Copyright and Related Rights in the -Work and the meaning and intended legal effect of CC0 on those rights. - -1. Copyright and Related Rights. A Work made available under CC0 may be -protected by copyright and related or neighboring rights ("Copyright and -Related Rights"). Copyright and Related Rights include, but are not -limited to, the following: - - i. the right to reproduce, adapt, distribute, perform, display, - communicate, and translate a Work; - ii. moral rights retained by the original author(s) and/or performer(s); -iii. publicity and privacy rights pertaining to a person's image or - likeness depicted in a Work; - iv. rights protecting against unfair competition in regards to a Work, - subject to the limitations in paragraph 4(a), below; - v. rights protecting the extraction, dissemination, use and reuse of data - in a Work; - vi. database rights (such as those arising under Directive 96/9/EC of the - European Parliament and of the Council of 11 March 1996 on the legal - protection of databases, and under any national implementation - thereof, including any amended or successor version of such - directive); and -vii. other similar, equivalent or corresponding rights throughout the - world based on applicable law or treaty, and any national - implementations thereof. - -2. Waiver. To the greatest extent permitted by, but not in contravention -of, applicable law, Affirmer hereby overtly, fully, permanently, -irrevocably and unconditionally waives, abandons, and surrenders all of -Affirmer's Copyright and Related Rights and associated claims and causes -of action, whether now known or unknown (including existing as well as -future claims and causes of action), in the Work (i) in all territories -worldwide, (ii) for the maximum duration provided by applicable law or -treaty (including future time extensions), (iii) in any current or future -medium and for any number of copies, and (iv) for any purpose whatsoever, -including without limitation commercial, advertising or promotional -purposes (the "Waiver"). Affirmer makes the Waiver for the benefit of each -member of the public at large and to the detriment of Affirmer's heirs and -successors, fully intending that such Waiver shall not be subject to -revocation, rescission, cancellation, termination, or any other legal or -equitable action to disrupt the quiet enjoyment of the Work by the public -as contemplated by Affirmer's express Statement of Purpose. - -3. Public License Fallback. Should any part of the Waiver for any reason -be judged legally invalid or ineffective under applicable law, then the -Waiver shall be preserved to the maximum extent permitted taking into -account Affirmer's express Statement of Purpose. In addition, to the -extent the Waiver is so judged Affirmer hereby grants to each affected -person a royalty-free, non transferable, non sublicensable, non exclusive, -irrevocable and unconditional license to exercise Affirmer's Copyright and -Related Rights in the Work (i) in all territories worldwide, (ii) for the -maximum duration provided by applicable law or treaty (including future -time extensions), (iii) in any current or future medium and for any number -of copies, and (iv) for any purpose whatsoever, including without -limitation commercial, advertising or promotional purposes (the -"License"). The License shall be deemed effective as of the date CC0 was -applied by Affirmer to the Work. Should any part of the License for any -reason be judged legally invalid or ineffective under applicable law, such -partial invalidity or ineffectiveness shall not invalidate the remainder -of the License, and in such case Affirmer hereby affirms that he or she -will not (i) exercise any of his or her remaining Copyright and Related -Rights in the Work or (ii) assert any associated claims and causes of -action with respect to the Work, in either case contrary to Affirmer's -express Statement of Purpose. - -4. Limitations and Disclaimers. - - a. No trademark or patent rights held by Affirmer are waived, abandoned, - surrendered, licensed or otherwise affected by this document. - b. Affirmer offers the Work as-is and makes no representations or - warranties of any kind concerning the Work, express, implied, - statutory or otherwise, including without limitation warranties of - title, merchantability, fitness for a particular purpose, non - infringement, or the absence of latent or other defects, accuracy, or - the present or absence of errors, whether or not discoverable, all to - the greatest extent permissible under applicable law. - c. Affirmer disclaims responsibility for clearing rights of other persons - that may apply to the Work or any use thereof, including without - limitation any person's Copyright and Related Rights in the Work. - Further, Affirmer disclaims responsibility for obtaining any necessary - consents, permissions or other rights required for any use of the - Work. - d. Affirmer understands and acknowledges that Creative Commons is not a - party to this document and has no duty or obligation with respect to - this CC0 or use of the Work. diff --git a/akka-samples/akka-sample-persistence-scala/LICENSE b/akka-samples/akka-sample-persistence-scala/LICENSE deleted file mode 100644 index 2a18b45589..0000000000 --- a/akka-samples/akka-sample-persistence-scala/LICENSE +++ /dev/null @@ -1,10 +0,0 @@ -Activator Template by Lightbend - -Licensed under Public Domain (CC0) - -To the extent possible under law, the person who associated CC0 with -this Activator Tempate has waived all copyright and related or neighboring -rights to this Activator Template. - -You should have received a copy of the CC0 legalcode along with this -work. If not, see . diff --git a/akka-samples/akka-sample-persistence-scala/activator.properties b/akka-samples/akka-sample-persistence-scala/activator.properties deleted file mode 100644 index 21d1a65a6d..0000000000 --- a/akka-samples/akka-sample-persistence-scala/activator.properties +++ /dev/null @@ -1,7 +0,0 @@ -name=akka-sample-persistence-scala -title=Akka Persistence Samples with Scala -description=Akka Persistence Samples with Scala -tags=akka,persistence,scala,sample -authorName=Akka Team -authorLink=http://akka.io/ -sourceLink=https://github.com/akka/akka diff --git a/akka-samples/akka-sample-persistence-scala/build.sbt b/akka-samples/akka-sample-persistence-scala/build.sbt deleted file mode 100644 index 8330b0cd40..0000000000 --- a/akka-samples/akka-sample-persistence-scala/build.sbt +++ /dev/null @@ -1,14 +0,0 @@ -name := "akka-sample-persistence-scala" - -version := "2.5-SNAPSHOT" - -scalaVersion := "2.11.8" - -libraryDependencies ++= Seq( - "com.typesafe.akka" %% "akka-actor" % "2.5-SNAPSHOT", - "com.typesafe.akka" %% "akka-persistence" % "2.5-SNAPSHOT", - "org.iq80.leveldb" % "leveldb" % "0.7", - "org.fusesource.leveldbjni" % "leveldbjni-all" % "1.8" -) - -licenses := Seq(("CC0", url("http://creativecommons.org/publicdomain/zero/1.0"))) diff --git a/akka-samples/akka-sample-persistence-scala/project/build.properties b/akka-samples/akka-sample-persistence-scala/project/build.properties deleted file mode 100644 index 748703f770..0000000000 --- a/akka-samples/akka-sample-persistence-scala/project/build.properties +++ /dev/null @@ -1 +0,0 @@ -sbt.version=0.13.7 diff --git a/akka-samples/akka-sample-persistence-scala/src/main/resources/application.conf b/akka-samples/akka-sample-persistence-scala/src/main/resources/application.conf deleted file mode 100644 index 48bbe84040..0000000000 --- a/akka-samples/akka-sample-persistence-scala/src/main/resources/application.conf +++ /dev/null @@ -1,9 +0,0 @@ -akka.persistence.journal.plugin = "akka.persistence.journal.leveldb" -akka.persistence.snapshot-store.plugin = "akka.persistence.snapshot-store.local" - -akka.persistence.journal.leveldb.dir = "target/example/journal" -akka.persistence.snapshot-store.local.dir = "target/example/snapshots" - -# DO NOT USE THIS IN PRODUCTION !!! -# See also https://github.com/typesafehub/activator/issues/287 -akka.persistence.journal.leveldb.native = false diff --git a/akka-samples/akka-sample-persistence-scala/src/main/scala/sample/persistence/PersistentActorFailureExample.scala b/akka-samples/akka-sample-persistence-scala/src/main/scala/sample/persistence/PersistentActorFailureExample.scala deleted file mode 100644 index 387429e9f5..0000000000 --- a/akka-samples/akka-sample-persistence-scala/src/main/scala/sample/persistence/PersistentActorFailureExample.scala +++ /dev/null @@ -1,53 +0,0 @@ -package sample.persistence - -import akka.actor._ -import akka.persistence._ - -object PersistentActorFailureExample extends App { - class ExamplePersistentActor extends PersistentActor { - override def persistenceId = "sample-id-2" - - var received: List[String] = Nil // state - - def receiveCommand: Receive = { - case "print" => println(s"received ${received.reverse}") - case "boom" => throw new Exception("boom") - case payload: String => - persist(payload) { p => received = p :: received } - - } - - def receiveRecover: Receive = { - case s: String => received = s :: received - } - } - - val system = ActorSystem("example") - val persistentActor = system.actorOf(Props(classOf[ExamplePersistentActor]), "persistentActor-2") - - persistentActor ! "a" - persistentActor ! "print" - persistentActor ! "boom" // restart and recovery - persistentActor ! "print" - persistentActor ! "b" - persistentActor ! "print" - persistentActor ! "c" - persistentActor ! "print" - - // Will print in a first run (i.e. with empty journal): - - // received List(a) - // received List(a, b) - // received List(a, b, c) - - // Will print in a second run: - - // received List(a, b, c, a) - // received List(a, b, c, a, b) - // received List(a, b, c, a, b, c) - - // etc ... - - Thread.sleep(10000) - system.terminate() -} diff --git a/akka-samples/akka-sample-persistence-scala/src/main/scala/sample/persistence/SnapshotExample.scala b/akka-samples/akka-sample-persistence-scala/src/main/scala/sample/persistence/SnapshotExample.scala deleted file mode 100644 index 3a9b1a03ce..0000000000 --- a/akka-samples/akka-sample-persistence-scala/src/main/scala/sample/persistence/SnapshotExample.scala +++ /dev/null @@ -1,48 +0,0 @@ -package sample.persistence - -import akka.actor._ -import akka.persistence._ - -object SnapshotExample extends App { - final case class ExampleState(received: List[String] = Nil) { - def updated(s: String): ExampleState = copy(s :: received) - override def toString = received.reverse.toString - } - - class ExamplePersistentActor extends PersistentActor { - def persistenceId: String = "sample-id-3" - - var state = ExampleState() - - def receiveCommand: Receive = { - case "print" => println("current state = " + state) - case "snap" => saveSnapshot(state) - case SaveSnapshotSuccess(metadata) => // ... - case SaveSnapshotFailure(metadata, reason) => // ... - case s: String => - persist(s) { evt => state = state.updated(evt) } - } - - def receiveRecover: Receive = { - case SnapshotOffer(_, s: ExampleState) => - println("offered state = " + s) - state = s - case evt: String => - state = state.updated(evt) - } - - } - - val system = ActorSystem("example") - val persistentActor = system.actorOf(Props(classOf[ExamplePersistentActor]), "persistentActor-3-scala") - - persistentActor ! "a" - persistentActor ! "b" - persistentActor ! "snap" - persistentActor ! "c" - persistentActor ! "d" - persistentActor ! "print" - - Thread.sleep(10000) - system.terminate() -} diff --git a/akka-samples/akka-sample-persistence-scala/tutorial/index.html b/akka-samples/akka-sample-persistence-scala/tutorial/index.html deleted file mode 100644 index 5e0ac1829a..0000000000 --- a/akka-samples/akka-sample-persistence-scala/tutorial/index.html +++ /dev/null @@ -1,115 +0,0 @@ - - -Akka Persistence Samples with Scala - - - - -
-

-This tutorial contains examples that illustrate a subset of -Akka Persistence features. -

-
    -
  • persistent actor
  • -
  • persistent actor snapshots
  • -
  • persistent actor recovery
  • -
  • persistent actor views
  • -
- -

-Custom storage locations for the journal and snapshots can be defined in -application.conf. -

-
- -
-

Persistent actor

-

-PersistentActorExample.scala -is described in detail in the Event sourcing -section of the user documentation. With every application run, the ExamplePersistentActor is recovered from -events stored in previous application runs, processes new commands, stores new events and snapshots and prints the -current persistent actor state to stdout. -

- -

-To run this example, go to the Run tab, and run the application main class -sample.persistence.PersistentActorExample several times. -

-
- -
-

Persistent actor snapshots

-

-SnapshotExample.scala -demonstrates how persistent actors can take snapshots of application state and recover from previously stored snapshots. -Snapshots are offered to persistent actors at the beginning of recovery, before any messages (younger than the snapshot) -are replayed. -

- -

-To run this example, go to the Run tab, and run the application main class -sample.persistence.SnapshotExample several times. With every run, the state offered by the -most recent snapshot is printed to stdout, followed by the updated state after sending new persistent -messages to the persistent actor. -

-
- -
-

Persistent actor recovery

-

-PersistentActorFailureExample.scala -shows how a persistent actor can throw an exception, restart and restore the state by replaying the events. -

- -

-To run this example, go to the Run tab, and run the application main class -sample.persistence.PersistentActorFailureExample several times. -

-
- -
-

Processor failure handling

-

-ProcessorFailureExample.scala -shows how a processor can delete persistent messages from the journal if they threw an exception. Throwing an exception -restarts the processor and replays messages. In order to prevent that the message that caused the exception is replayed, -it is marked as deleted in the journal (during invocation of preRestart). This is a common pattern in -command-sourcing to compensate write-ahead logging of messages. -

- -

-To run this example, go to the Run tab, and run the application main class -sample.persistence.ProcessorFailureExample several times. -

- -

-Event sourcing -on the other hand, does not persist commands directly but rather events that have been derived from received commands -(not shown here). These events are known to be successfully applicable to current processor state i.e. there's -no need for deleting them from the journal. Event sourced processors usually have a lower throughput than command -sourced processors, as the maximum size of a write batch is limited by the number of persisted events per received -command. -

-
- -
-

Processor views

-

Persistent actor views

-

-ViewExample.scala demonstrates -how a view (ExampleView) is updated with the persistent message stream of a persistent actor -(ExamplePersistentActor). Messages sent to the persistent actor are scheduled periodically. Views also support -snapshotting to reduce recovery time. -

- -

-To run this example, go to the Run tab, and run the application main class -sample.persistence.ViewExample. -

-
- - - - diff --git a/akka-samples/akka-sample-remote-java/.gitignore b/akka-samples/akka-sample-remote-java/.gitignore deleted file mode 100644 index 660c959e44..0000000000 --- a/akka-samples/akka-sample-remote-java/.gitignore +++ /dev/null @@ -1,17 +0,0 @@ -*# -*.iml -*.ipr -*.iws -*.pyc -*.tm.epoch -*.vim -*-shim.sbt -.idea/ -/project/plugins/project -project/boot -target/ -/logs -.cache -.classpath -.project -.settings \ No newline at end of file diff --git a/akka-samples/akka-sample-remote-java/COPYING b/akka-samples/akka-sample-remote-java/COPYING deleted file mode 100644 index 0e259d42c9..0000000000 --- a/akka-samples/akka-sample-remote-java/COPYING +++ /dev/null @@ -1,121 +0,0 @@ -Creative Commons Legal Code - -CC0 1.0 Universal - - CREATIVE COMMONS CORPORATION IS NOT A LAW FIRM AND DOES NOT PROVIDE - LEGAL SERVICES. DISTRIBUTION OF THIS DOCUMENT DOES NOT CREATE AN - ATTORNEY-CLIENT RELATIONSHIP. CREATIVE COMMONS PROVIDES THIS - INFORMATION ON AN "AS-IS" BASIS. CREATIVE COMMONS MAKES NO WARRANTIES - REGARDING THE USE OF THIS DOCUMENT OR THE INFORMATION OR WORKS - PROVIDED HEREUNDER, AND DISCLAIMS LIABILITY FOR DAMAGES RESULTING FROM - THE USE OF THIS DOCUMENT OR THE INFORMATION OR WORKS PROVIDED - HEREUNDER. - -Statement of Purpose - -The laws of most jurisdictions throughout the world automatically confer -exclusive Copyright and Related Rights (defined below) upon the creator -and subsequent owner(s) (each and all, an "owner") of an original work of -authorship and/or a database (each, a "Work"). - -Certain owners wish to permanently relinquish those rights to a Work for -the purpose of contributing to a commons of creative, cultural and -scientific works ("Commons") that the public can reliably and without fear -of later claims of infringement build upon, modify, incorporate in other -works, reuse and redistribute as freely as possible in any form whatsoever -and for any purposes, including without limitation commercial purposes. -These owners may contribute to the Commons to promote the ideal of a free -culture and the further production of creative, cultural and scientific -works, or to gain reputation or greater distribution for their Work in -part through the use and efforts of others. - -For these and/or other purposes and motivations, and without any -expectation of additional consideration or compensation, the person -associating CC0 with a Work (the "Affirmer"), to the extent that he or she -is an owner of Copyright and Related Rights in the Work, voluntarily -elects to apply CC0 to the Work and publicly distribute the Work under its -terms, with knowledge of his or her Copyright and Related Rights in the -Work and the meaning and intended legal effect of CC0 on those rights. - -1. Copyright and Related Rights. A Work made available under CC0 may be -protected by copyright and related or neighboring rights ("Copyright and -Related Rights"). Copyright and Related Rights include, but are not -limited to, the following: - - i. the right to reproduce, adapt, distribute, perform, display, - communicate, and translate a Work; - ii. moral rights retained by the original author(s) and/or performer(s); -iii. publicity and privacy rights pertaining to a person's image or - likeness depicted in a Work; - iv. rights protecting against unfair competition in regards to a Work, - subject to the limitations in paragraph 4(a), below; - v. rights protecting the extraction, dissemination, use and reuse of data - in a Work; - vi. database rights (such as those arising under Directive 96/9/EC of the - European Parliament and of the Council of 11 March 1996 on the legal - protection of databases, and under any national implementation - thereof, including any amended or successor version of such - directive); and -vii. other similar, equivalent or corresponding rights throughout the - world based on applicable law or treaty, and any national - implementations thereof. - -2. Waiver. To the greatest extent permitted by, but not in contravention -of, applicable law, Affirmer hereby overtly, fully, permanently, -irrevocably and unconditionally waives, abandons, and surrenders all of -Affirmer's Copyright and Related Rights and associated claims and causes -of action, whether now known or unknown (including existing as well as -future claims and causes of action), in the Work (i) in all territories -worldwide, (ii) for the maximum duration provided by applicable law or -treaty (including future time extensions), (iii) in any current or future -medium and for any number of copies, and (iv) for any purpose whatsoever, -including without limitation commercial, advertising or promotional -purposes (the "Waiver"). Affirmer makes the Waiver for the benefit of each -member of the public at large and to the detriment of Affirmer's heirs and -successors, fully intending that such Waiver shall not be subject to -revocation, rescission, cancellation, termination, or any other legal or -equitable action to disrupt the quiet enjoyment of the Work by the public -as contemplated by Affirmer's express Statement of Purpose. - -3. Public License Fallback. Should any part of the Waiver for any reason -be judged legally invalid or ineffective under applicable law, then the -Waiver shall be preserved to the maximum extent permitted taking into -account Affirmer's express Statement of Purpose. In addition, to the -extent the Waiver is so judged Affirmer hereby grants to each affected -person a royalty-free, non transferable, non sublicensable, non exclusive, -irrevocable and unconditional license to exercise Affirmer's Copyright and -Related Rights in the Work (i) in all territories worldwide, (ii) for the -maximum duration provided by applicable law or treaty (including future -time extensions), (iii) in any current or future medium and for any number -of copies, and (iv) for any purpose whatsoever, including without -limitation commercial, advertising or promotional purposes (the -"License"). The License shall be deemed effective as of the date CC0 was -applied by Affirmer to the Work. Should any part of the License for any -reason be judged legally invalid or ineffective under applicable law, such -partial invalidity or ineffectiveness shall not invalidate the remainder -of the License, and in such case Affirmer hereby affirms that he or she -will not (i) exercise any of his or her remaining Copyright and Related -Rights in the Work or (ii) assert any associated claims and causes of -action with respect to the Work, in either case contrary to Affirmer's -express Statement of Purpose. - -4. Limitations and Disclaimers. - - a. No trademark or patent rights held by Affirmer are waived, abandoned, - surrendered, licensed or otherwise affected by this document. - b. Affirmer offers the Work as-is and makes no representations or - warranties of any kind concerning the Work, express, implied, - statutory or otherwise, including without limitation warranties of - title, merchantability, fitness for a particular purpose, non - infringement, or the absence of latent or other defects, accuracy, or - the present or absence of errors, whether or not discoverable, all to - the greatest extent permissible under applicable law. - c. Affirmer disclaims responsibility for clearing rights of other persons - that may apply to the Work or any use thereof, including without - limitation any person's Copyright and Related Rights in the Work. - Further, Affirmer disclaims responsibility for obtaining any necessary - consents, permissions or other rights required for any use of the - Work. - d. Affirmer understands and acknowledges that Creative Commons is not a - party to this document and has no duty or obligation with respect to - this CC0 or use of the Work. diff --git a/akka-samples/akka-sample-remote-java/LICENSE b/akka-samples/akka-sample-remote-java/LICENSE deleted file mode 100644 index 2a18b45589..0000000000 --- a/akka-samples/akka-sample-remote-java/LICENSE +++ /dev/null @@ -1,10 +0,0 @@ -Activator Template by Lightbend - -Licensed under Public Domain (CC0) - -To the extent possible under law, the person who associated CC0 with -this Activator Tempate has waived all copyright and related or neighboring -rights to this Activator Template. - -You should have received a copy of the CC0 legalcode along with this -work. If not, see . diff --git a/akka-samples/akka-sample-remote-java/activator.properties b/akka-samples/akka-sample-remote-java/activator.properties deleted file mode 100644 index f467f0b843..0000000000 --- a/akka-samples/akka-sample-remote-java/activator.properties +++ /dev/null @@ -1,7 +0,0 @@ -name=akka-sample-remote-java -title=Akka Remote Samples with Java -description=Akka Remote Samples with Java -tags=akka,remote,java,sample -authorName=Akka Team -authorLink=http://akka.io/ -sourceLink=https://github.com/akka/akka diff --git a/akka-samples/akka-sample-remote-java/build.sbt b/akka-samples/akka-sample-remote-java/build.sbt deleted file mode 100644 index 76fb58ef75..0000000000 --- a/akka-samples/akka-sample-remote-java/build.sbt +++ /dev/null @@ -1,12 +0,0 @@ -name := "akka-sample-remote-java" - -version := "2.5-SNAPSHOT" - -scalaVersion := "2.11.8" - -libraryDependencies ++= Seq( - "com.typesafe.akka" %% "akka-actor" % "2.5-SNAPSHOT", - "com.typesafe.akka" %% "akka-remote" % "2.5-SNAPSHOT" -) - -licenses := Seq(("CC0", url("http://creativecommons.org/publicdomain/zero/1.0"))) diff --git a/akka-samples/akka-sample-remote-java/project/build.properties b/akka-samples/akka-sample-remote-java/project/build.properties deleted file mode 100644 index 748703f770..0000000000 --- a/akka-samples/akka-sample-remote-java/project/build.properties +++ /dev/null @@ -1 +0,0 @@ -sbt.version=0.13.7 diff --git a/akka-samples/akka-sample-remote-java/src/main/java/sample/remote/calculator/CalculatorActor.java b/akka-samples/akka-sample-remote-java/src/main/java/sample/remote/calculator/CalculatorActor.java deleted file mode 100644 index b36cccbc90..0000000000 --- a/akka-samples/akka-sample-remote-java/src/main/java/sample/remote/calculator/CalculatorActor.java +++ /dev/null @@ -1,45 +0,0 @@ -package sample.remote.calculator; - -import akka.actor.UntypedActor; - -public class CalculatorActor extends UntypedActor { - @Override - public void onReceive(Object message) { - - if (message instanceof Op.Add) { - Op.Add add = (Op.Add) message; - System.out.println("Calculating " + add.getN1() + " + " + add.getN2()); - Op.AddResult result = new Op.AddResult(add.getN1(), add.getN2(), - add.getN1() + add.getN2()); - getSender().tell(result, getSelf()); - - } else if (message instanceof Op.Subtract) { - Op.Subtract subtract = (Op.Subtract) message; - System.out.println("Calculating " + subtract.getN1() + " - " - + subtract.getN2()); - Op.SubtractResult result = new Op.SubtractResult(subtract.getN1(), - subtract.getN2(), subtract.getN1() - subtract.getN2()); - getSender().tell(result, getSelf()); - - } else if (message instanceof Op.Multiply) { - Op.Multiply multiply = (Op.Multiply) message; - System.out.println("Calculating " + multiply.getN1() + " * " - + multiply.getN2()); - Op.MultiplicationResult result = new Op.MultiplicationResult( - multiply.getN1(), multiply.getN2(), multiply.getN1() - * multiply.getN2()); - getSender().tell(result, getSelf()); - - } else if (message instanceof Op.Divide) { - Op.Divide divide = (Op.Divide) message; - System.out.println("Calculating " + divide.getN1() + " / " - + divide.getN2()); - Op.DivisionResult result = new Op.DivisionResult(divide.getN1(), - divide.getN2(), divide.getN1() / divide.getN2()); - getSender().tell(result, getSelf()); - - } else { - unhandled(message); - } - } -} diff --git a/akka-samples/akka-sample-remote-java/src/main/java/sample/remote/calculator/CreationActor.java b/akka-samples/akka-sample-remote-java/src/main/java/sample/remote/calculator/CreationActor.java deleted file mode 100644 index 0d0784f8ee..0000000000 --- a/akka-samples/akka-sample-remote-java/src/main/java/sample/remote/calculator/CreationActor.java +++ /dev/null @@ -1,33 +0,0 @@ -package sample.remote.calculator; - -import akka.actor.ActorRef; -import akka.actor.Props; -import akka.actor.UntypedActor; - -public class CreationActor extends UntypedActor { - - @Override - public void onReceive(Object message) throws Exception { - - if (message instanceof Op.MathOp) { - ActorRef calculator = getContext().actorOf( - Props.create(CalculatorActor.class)); - calculator.tell(message, getSelf()); - - } else if (message instanceof Op.MultiplicationResult) { - Op.MultiplicationResult result = (Op.MultiplicationResult) message; - System.out.printf("Mul result: %d * %d = %d\n", result.getN1(), - result.getN2(), result.getResult()); - getContext().stop(getSender()); - - } else if (message instanceof Op.DivisionResult) { - Op.DivisionResult result = (Op.DivisionResult) message; - System.out.printf("Div result: %.0f / %d = %.2f\n", result.getN1(), - result.getN2(), result.getResult()); - getContext().stop(getSender()); - - } else { - unhandled(message); - } - } -} diff --git a/akka-samples/akka-sample-remote-java/src/main/java/sample/remote/calculator/CreationApplication.java b/akka-samples/akka-sample-remote-java/src/main/java/sample/remote/calculator/CreationApplication.java deleted file mode 100644 index 5f8ef3a269..0000000000 --- a/akka-samples/akka-sample-remote-java/src/main/java/sample/remote/calculator/CreationApplication.java +++ /dev/null @@ -1,48 +0,0 @@ -package sample.remote.calculator; - -import static java.util.concurrent.TimeUnit.SECONDS; -import java.util.Random; -import scala.concurrent.duration.Duration; -import akka.actor.ActorRef; -import akka.actor.ActorSystem; -import akka.actor.Props; - -import com.typesafe.config.ConfigFactory; - -public class CreationApplication { - - public static void main(String[] args) { - if (args.length == 0 || args[0].equals("CalculatorWorker")) - startRemoteWorkerSystem(); - if (args.length == 0 || args[0].equals("Creation")) - startRemoteCreationSystem(); - } - - public static void startRemoteWorkerSystem() { - ActorSystem.create("CalculatorWorkerSystem", - ConfigFactory.load(("calculator"))); - System.out.println("Started CalculatorWorkerSystem"); - } - - public static void startRemoteCreationSystem() { - final ActorSystem system = ActorSystem.create("CreationSystem", - ConfigFactory.load("remotecreation")); - final ActorRef actor = system.actorOf(Props.create(CreationActor.class), - "creationActor"); - - System.out.println("Started CreationSystem"); - final Random r = new Random(); - system.scheduler().schedule(Duration.create(1, SECONDS), - Duration.create(1, SECONDS), new Runnable() { - @Override - public void run() { - if (r.nextInt(100) % 2 == 0) { - actor.tell(new Op.Multiply(r.nextInt(100), r.nextInt(100)), null); - } else { - actor.tell(new Op.Divide(r.nextInt(10000), r.nextInt(99) + 1), - null); - } - } - }, system.dispatcher()); - } -} diff --git a/akka-samples/akka-sample-remote-java/src/main/java/sample/remote/calculator/LookupActor.java b/akka-samples/akka-sample-remote-java/src/main/java/sample/remote/calculator/LookupActor.java deleted file mode 100644 index 3610df9556..0000000000 --- a/akka-samples/akka-sample-remote-java/src/main/java/sample/remote/calculator/LookupActor.java +++ /dev/null @@ -1,83 +0,0 @@ -package sample.remote.calculator; - -import static java.util.concurrent.TimeUnit.SECONDS; -import scala.concurrent.duration.Duration; -import akka.actor.ActorRef; -import akka.actor.ActorIdentity; -import akka.actor.Identify; -import akka.actor.Terminated; -import akka.actor.UntypedActor; -import akka.actor.ReceiveTimeout; -import akka.japi.Procedure; - -public class LookupActor extends UntypedActor { - - private final String path; - private ActorRef calculator = null; - - public LookupActor(String path) { - this.path = path; - sendIdentifyRequest(); - } - - private void sendIdentifyRequest() { - getContext().actorSelection(path).tell(new Identify(path), getSelf()); - getContext() - .system() - .scheduler() - .scheduleOnce(Duration.create(3, SECONDS), getSelf(), - ReceiveTimeout.getInstance(), getContext().dispatcher(), getSelf()); - } - - @Override - public void onReceive(Object message) throws Exception { - if (message instanceof ActorIdentity) { - calculator = ((ActorIdentity) message).getRef(); - if (calculator == null) { - System.out.println("Remote actor not available: " + path); - } else { - getContext().watch(calculator); - getContext().become(active, true); - } - - } else if (message instanceof ReceiveTimeout) { - sendIdentifyRequest(); - - } else { - System.out.println("Not ready yet"); - - } - } - - Procedure active = new Procedure() { - @Override - public void apply(Object message) { - if (message instanceof Op.MathOp) { - // send message to server actor - calculator.tell(message, getSelf()); - - } else if (message instanceof Op.AddResult) { - Op.AddResult result = (Op.AddResult) message; - System.out.printf("Add result: %d + %d = %d\n", result.getN1(), - result.getN2(), result.getResult()); - - } else if (message instanceof Op.SubtractResult) { - Op.SubtractResult result = (Op.SubtractResult) message; - System.out.printf("Sub result: %d - %d = %d\n", result.getN1(), - result.getN2(), result.getResult()); - - } else if (message instanceof Terminated) { - System.out.println("Calculator terminated"); - sendIdentifyRequest(); - getContext().unbecome(); - - } else if (message instanceof ReceiveTimeout) { - // ignore - - } else { - unhandled(message); - } - - } - }; -} diff --git a/akka-samples/akka-sample-remote-java/src/main/java/sample/remote/calculator/LookupApplication.java b/akka-samples/akka-sample-remote-java/src/main/java/sample/remote/calculator/LookupApplication.java deleted file mode 100644 index baf1f1811c..0000000000 --- a/akka-samples/akka-sample-remote-java/src/main/java/sample/remote/calculator/LookupApplication.java +++ /dev/null @@ -1,50 +0,0 @@ -package sample.remote.calculator; - -import static java.util.concurrent.TimeUnit.SECONDS; -import java.util.Random; -import scala.concurrent.duration.Duration; -import akka.actor.ActorRef; -import akka.actor.ActorSystem; -import akka.actor.Props; -import com.typesafe.config.ConfigFactory; - -public class LookupApplication { - public static void main(String[] args) { - if (args.length == 0 || args[0].equals("Calculator")) - startRemoteCalculatorSystem(); - if (args.length == 0 || args[0].equals("Lookup")) - startRemoteLookupSystem(); - } - - public static void startRemoteCalculatorSystem() { - final ActorSystem system = ActorSystem.create("CalculatorSystem", - ConfigFactory.load(("calculator"))); - system.actorOf(Props.create(CalculatorActor.class), "calculator"); - System.out.println("Started CalculatorSystem"); - } - - public static void startRemoteLookupSystem() { - - final ActorSystem system = ActorSystem.create("LookupSystem", - ConfigFactory.load("remotelookup")); - final String path = "akka.tcp://CalculatorSystem@127.0.0.1:2552/user/calculator"; - final ActorRef actor = system.actorOf( - Props.create(LookupActor.class, path), "lookupActor"); - - System.out.println("Started LookupSystem"); - final Random r = new Random(); - system.scheduler().schedule(Duration.create(1, SECONDS), - Duration.create(1, SECONDS), new Runnable() { - @Override - public void run() { - if (r.nextInt(100) % 2 == 0) { - actor.tell(new Op.Add(r.nextInt(100), r.nextInt(100)), null); - } else { - actor.tell(new Op.Subtract(r.nextInt(100), r.nextInt(100)), null); - } - - } - }, system.dispatcher()); - - } -} diff --git a/akka-samples/akka-sample-remote-java/src/main/java/sample/remote/calculator/Op.java b/akka-samples/akka-sample-remote-java/src/main/java/sample/remote/calculator/Op.java deleted file mode 100644 index d0e461ea88..0000000000 --- a/akka-samples/akka-sample-remote-java/src/main/java/sample/remote/calculator/Op.java +++ /dev/null @@ -1,188 +0,0 @@ -package sample.remote.calculator; - -import java.io.Serializable; - -public class Op { - - public interface MathOp extends Serializable { - } - - public interface MathResult extends Serializable { - } - - static class Add implements MathOp { - private static final long serialVersionUID = 1L; - private final int n1; - private final int n2; - - public Add(int n1, int n2) { - this.n1 = n1; - this.n2 = n2; - } - - public int getN1() { - return n1; - } - - public int getN2() { - return n2; - } - } - - static class AddResult implements MathResult { - private static final long serialVersionUID = 1L; - private final int n1; - private final int n2; - private final int result; - - public AddResult(int n1, int n2, int result) { - this.n1 = n1; - this.n2 = n2; - this.result = result; - } - - public int getN1() { - return n1; - } - - public int getN2() { - return n2; - } - - public int getResult() { - return result; - } - } - - static class Subtract implements MathOp { - private static final long serialVersionUID = 1L; - private final int n1; - private final int n2; - - public Subtract(int n1, int n2) { - this.n1 = n1; - this.n2 = n2; - } - - public int getN1() { - return n1; - } - - public int getN2() { - return n2; - } - } - - static class SubtractResult implements MathResult { - private static final long serialVersionUID = 1L; - private final int n1; - private final int n2; - private final int result; - - public SubtractResult(int n1, int n2, int result) { - this.n1 = n1; - this.n2 = n2; - this.result = result; - } - - public int getN1() { - return n1; - } - - public int getN2() { - return n2; - } - - public int getResult() { - return result; - } - } - - static class Multiply implements MathOp { - private static final long serialVersionUID = 1L; - private final int n1; - private final int n2; - - public Multiply(int n1, int n2) { - this.n1 = n1; - this.n2 = n2; - } - - public int getN1() { - return n1; - } - - public int getN2() { - return n2; - } - } - - static class MultiplicationResult implements MathResult { - private static final long serialVersionUID = 1L; - private final int n1; - private final int n2; - private final int result; - - public MultiplicationResult(int n1, int n2, int result) { - this.n1 = n1; - this.n2 = n2; - this.result = result; - } - - public int getN1() { - return n1; - } - - public int getN2() { - return n2; - } - - public int getResult() { - return result; - } - } - - static class Divide implements MathOp { - private static final long serialVersionUID = 1L; - private final double n1; - private final int n2; - - public Divide(double n1, int n2) { - this.n1 = n1; - this.n2 = n2; - } - - public double getN1() { - return n1; - } - - public int getN2() { - return n2; - } - } - - static class DivisionResult implements MathResult { - private static final long serialVersionUID = 1L; - private final double n1; - private final int n2; - private final double result; - - public DivisionResult(double n1, int n2, double result) { - this.n1 = n1; - this.n2 = n2; - this.result = result; - } - - public double getN1() { - return n1; - } - - public int getN2() { - return n2; - } - - public double getResult() { - return result; - } - } -} diff --git a/akka-samples/akka-sample-remote-java/src/main/resources/calculator.conf b/akka-samples/akka-sample-remote-java/src/main/resources/calculator.conf deleted file mode 100644 index 948c1f2929..0000000000 --- a/akka-samples/akka-sample-remote-java/src/main/resources/calculator.conf +++ /dev/null @@ -1,6 +0,0 @@ -include "common" - -akka { - # LISTEN on tcp port 2552 - remote.netty.tcp.port = 2552 -} diff --git a/akka-samples/akka-sample-remote-java/src/main/resources/common.conf b/akka-samples/akka-sample-remote-java/src/main/resources/common.conf deleted file mode 100644 index 9e99e7ab6f..0000000000 --- a/akka-samples/akka-sample-remote-java/src/main/resources/common.conf +++ /dev/null @@ -1,13 +0,0 @@ -akka { - - actor { - provider = remote - } - - remote { - netty.tcp { - hostname = "127.0.0.1" - } - } - -} diff --git a/akka-samples/akka-sample-remote-java/src/main/resources/remotecreation.conf b/akka-samples/akka-sample-remote-java/src/main/resources/remotecreation.conf deleted file mode 100644 index 76292f999f..0000000000 --- a/akka-samples/akka-sample-remote-java/src/main/resources/remotecreation.conf +++ /dev/null @@ -1,13 +0,0 @@ -include "common" - -akka { - actor { - deployment { - "/creationActor/*" { - remote = "akka.tcp://CalculatorWorkerSystem@127.0.0.1:2552" - } - } - } - - remote.netty.tcp.port = 2554 -} diff --git a/akka-samples/akka-sample-remote-java/src/main/resources/remotelookup.conf b/akka-samples/akka-sample-remote-java/src/main/resources/remotelookup.conf deleted file mode 100644 index 336f557a08..0000000000 --- a/akka-samples/akka-sample-remote-java/src/main/resources/remotelookup.conf +++ /dev/null @@ -1,5 +0,0 @@ -include "common" - -akka { - remote.netty.tcp.port = 2553 -} diff --git a/akka-samples/akka-sample-remote-java/src/test/resources/reference.conf b/akka-samples/akka-sample-remote-java/src/test/resources/reference.conf deleted file mode 100644 index 90492329b7..0000000000 --- a/akka-samples/akka-sample-remote-java/src/test/resources/reference.conf +++ /dev/null @@ -1,4 +0,0 @@ -# Don't terminate ActorSystem in tests -akka.coordinated-shutdown.run-by-jvm-shutdown-hook = off -akka.coordinated-shutdown.terminate-actor-system = off -akka.cluster.run-coordinated-shutdown-when-down = off diff --git a/akka-samples/akka-sample-remote-java/tutorial/index.html b/akka-samples/akka-sample-remote-java/tutorial/index.html deleted file mode 100644 index 1c7ea8e62f..0000000000 --- a/akka-samples/akka-sample-remote-java/tutorial/index.html +++ /dev/null @@ -1,256 +0,0 @@ - - -Akka Remote Samples with Java - - - - -
-

-In order to showcase the remote capabilities of Akka -we thought a remote calculator could do the trick. -This sample demonstrates both remote deployment and look-up of remote actors. -

-
- -
-

Lookup Remote Actors

-

-This sample involves two actor systems. -

- -
    -
  • CalculatorSystem listens on port 2552 and starts one actor, the -CalculatorActor -that provides a service for arithmetic operations.
  • -
  • LookupSystem listens on port 2553 and starts one actor, the -LookupActor -that sends operations to the remote calculator service.
  • -
- -

-Open LookupApplication.java. -

- -

-There you see how the two actor systems and actors are started. In this first step they are running in the same JVM process, -but you can run them in separate processes as described later. Note that this changes nothing in the configuration or implementation. -

- -

-The two actor systems use different configuration, which is where the listen port is defined. -The CalculatorSystem uses calculator.conf -and the LookupSystem uses remotelookup.conf. -

- -

-Note that the configuration files also import the -common.conf. -This enables the remoting by installing the RemoteActorRefProvider and chooses the default remote transport. -Be sure to replace the default IP 127.0.0.1 with the real address the system is reachable -by if you deploy onto multiple machines! -

- -

-The CalculatorActor -does not illustrate anything exciting. More interesting is the -LookupActor. -It takes a String path as constructor parameter. This is the full path, including the remote -address of the calculator service. Observe how the actor system name of the path matches the remote system’s -name, as do IP and port number. Top-level actors are always created below the "/user" guardian, which supervises them. -

- -

-"akka.tcp://CalculatorSystem@127.0.0.1:2552/user/calculator"
-
- -

-First it sends an Identify message to the actor selection of the path. -The remote calculator actor will reply with ActorIdentity containing its ActorRef. -Identify is a built-in message that all Actors will understand and automatically reply to with a -ActorIdentity. If the identification fails it will be retried after the scheduled timeout -by the LookupActor. -

- -

-Note how none of the code is specific to remoting, this also applies when talking to a local actor which -might terminate and be recreated. That is what we call Location Transparency. -

- -

-Once it has the ActorRef of the remote service it can watch it. The remote system -might be shutdown and later started up again, then Terminated is received on the watching -side and it can retry the identification to establish a connection to the new remote system. -

- -
- -
-

Run the Lookup Sample

- -

-To run this sample, go to the Run -tab, and start the application main class sample.remote.calculator.LookupApplication if it is not already started. -

- -

-In the log pane you should see something like: -

- -

-Started LookupSystem
-Calculating 74 - 42
-Sub result: 74 - 42 = 32
-Calculating 15 + 71
-Add result: 15 + 71 = 86
-
- -

-The two actor systems are running in the same JVM process. It can be more interesting to run them in separate -processes. Stop the application in the Run tab and then open two -terminal windows. -

- -

-Start the CalculatorSystem in the first terminal window with the following command (on one line): -

- -

-<path to activator dir>/activator 
-  "run-main sample.remote.calculator.LookupApplication Calculator"		
-
- -

-Start the LookupSystem in the second terminal window with the following command (on one line): -

- -

-<path to activator dir>/activator 
-  "run-main sample.remote.calculator.LookupApplication Lookup"		
-
- -

-Thereafter you can try to shutdown the CalculatorSystem in the first terminal window with -'ctrl-c' and then start it again. In the second terminal window you should see the -failure detection and then how the successful calculation results are logged again when it has -established a connection to the new system. -

- -
- -
-

Create Remote Actors

-

-This sample involves two actor systems. -

- -
    -
  • CalculatorWorkerSystem listens on port 2552
  • -
  • CreationSystem listens on port 2554 and starts one actor, the -CreationActor -that creates remote calculator worker actors in the CalculatorWorkerSystem and sends operations to them.
  • -
- -

-Open CreationApplication.java. -

- -

-There you see how the two actor systems and actors are started. In this first step they are running in the same JVM process, -but you can run them in separate processes as described later. -

- -

-The two actor systems use different configuration, which is where the listen port is defined. -The CalculatorWorkerSystem uses calculator.conf -and the CreationSystem uses remotecreation.conf. -

- -

-Note that the configuration files also import the -common.conf. -This enables the remoting by installing the RemoteActorRefProvider and chooses the default remote transport. -Be sure to replace the default IP 127.0.0.1 with the real address the system is reachable -by if you deploy onto multiple machines! -

- -

-The CreationActor -creates a child CalculatorActor -for each incoming MathOp message. The -configuration contains a deployment section that -matches these child actors and defines that the actors are to be deployed at the remote system. The wildcard (*) is needed -because the child actors are created with unique anonymous names. -

- -

-akka.actor.deployment {
-  /creationActor/* {
-    remote = "akka.tcp://CalculatorWorkerSystem@127.0.0.1:2552"
-  }
-}
-
- -

-Error handling, i.e. supervision, works exactly in the same way as if the child actor was a local child actor. -In addtion, in case of network failures or JVM crash the child will be terminated and automatically removed -from the parent even though they are located on different machines. -

- -
- -
-

Run the Creation Sample

- -

-To run this sample, go to the Run -tab, and start the application main class sample.remote.calculator.CreationApplication if it is not already started. -

- -

-In the log pane you should see something like: -

- -

-Started CreationSystem
-Calculating 7135 / 62
-Div result: 7135 / 62 = 115.08
-Calculating 0 * 9
-Mul result: 0 * 9 = 0
-
- -

-The two actor systems are running in the same JVM process. It can be more interesting to run them in separate -processes. Stop the application in the Run tab and then open two -terminal windows. -

- -

-Start the CalculatorWorkerSystem in the first terminal window with the following command (on one line): -

- -

-<path to activator dir>/activator 
-  "run-main sample.remote.calculator.CreationApplication CalculatorWorker"		
-
- -

-Start the CreationSystem in the second terminal window with the following command (on one line): -

- -

-<path to activator dir>/activator 
-  "run-main sample.remote.calculator.CreationApplication Creation"		
-
- -

-Thereafter you can try to shutdown the CalculatorWorkerSystem in the first terminal window with -'ctrl-c' and then start it again. In the second terminal window you should see the -failure detection and then how the successful calculation results are logged again when it has -established a connection to the new system. -

- -
- - - diff --git a/akka-samples/akka-sample-remote-scala/.gitignore b/akka-samples/akka-sample-remote-scala/.gitignore deleted file mode 100644 index 660c959e44..0000000000 --- a/akka-samples/akka-sample-remote-scala/.gitignore +++ /dev/null @@ -1,17 +0,0 @@ -*# -*.iml -*.ipr -*.iws -*.pyc -*.tm.epoch -*.vim -*-shim.sbt -.idea/ -/project/plugins/project -project/boot -target/ -/logs -.cache -.classpath -.project -.settings \ No newline at end of file diff --git a/akka-samples/akka-sample-remote-scala/COPYING b/akka-samples/akka-sample-remote-scala/COPYING deleted file mode 100644 index 0e259d42c9..0000000000 --- a/akka-samples/akka-sample-remote-scala/COPYING +++ /dev/null @@ -1,121 +0,0 @@ -Creative Commons Legal Code - -CC0 1.0 Universal - - CREATIVE COMMONS CORPORATION IS NOT A LAW FIRM AND DOES NOT PROVIDE - LEGAL SERVICES. DISTRIBUTION OF THIS DOCUMENT DOES NOT CREATE AN - ATTORNEY-CLIENT RELATIONSHIP. CREATIVE COMMONS PROVIDES THIS - INFORMATION ON AN "AS-IS" BASIS. CREATIVE COMMONS MAKES NO WARRANTIES - REGARDING THE USE OF THIS DOCUMENT OR THE INFORMATION OR WORKS - PROVIDED HEREUNDER, AND DISCLAIMS LIABILITY FOR DAMAGES RESULTING FROM - THE USE OF THIS DOCUMENT OR THE INFORMATION OR WORKS PROVIDED - HEREUNDER. - -Statement of Purpose - -The laws of most jurisdictions throughout the world automatically confer -exclusive Copyright and Related Rights (defined below) upon the creator -and subsequent owner(s) (each and all, an "owner") of an original work of -authorship and/or a database (each, a "Work"). - -Certain owners wish to permanently relinquish those rights to a Work for -the purpose of contributing to a commons of creative, cultural and -scientific works ("Commons") that the public can reliably and without fear -of later claims of infringement build upon, modify, incorporate in other -works, reuse and redistribute as freely as possible in any form whatsoever -and for any purposes, including without limitation commercial purposes. -These owners may contribute to the Commons to promote the ideal of a free -culture and the further production of creative, cultural and scientific -works, or to gain reputation or greater distribution for their Work in -part through the use and efforts of others. - -For these and/or other purposes and motivations, and without any -expectation of additional consideration or compensation, the person -associating CC0 with a Work (the "Affirmer"), to the extent that he or she -is an owner of Copyright and Related Rights in the Work, voluntarily -elects to apply CC0 to the Work and publicly distribute the Work under its -terms, with knowledge of his or her Copyright and Related Rights in the -Work and the meaning and intended legal effect of CC0 on those rights. - -1. Copyright and Related Rights. A Work made available under CC0 may be -protected by copyright and related or neighboring rights ("Copyright and -Related Rights"). Copyright and Related Rights include, but are not -limited to, the following: - - i. the right to reproduce, adapt, distribute, perform, display, - communicate, and translate a Work; - ii. moral rights retained by the original author(s) and/or performer(s); -iii. publicity and privacy rights pertaining to a person's image or - likeness depicted in a Work; - iv. rights protecting against unfair competition in regards to a Work, - subject to the limitations in paragraph 4(a), below; - v. rights protecting the extraction, dissemination, use and reuse of data - in a Work; - vi. database rights (such as those arising under Directive 96/9/EC of the - European Parliament and of the Council of 11 March 1996 on the legal - protection of databases, and under any national implementation - thereof, including any amended or successor version of such - directive); and -vii. other similar, equivalent or corresponding rights throughout the - world based on applicable law or treaty, and any national - implementations thereof. - -2. Waiver. To the greatest extent permitted by, but not in contravention -of, applicable law, Affirmer hereby overtly, fully, permanently, -irrevocably and unconditionally waives, abandons, and surrenders all of -Affirmer's Copyright and Related Rights and associated claims and causes -of action, whether now known or unknown (including existing as well as -future claims and causes of action), in the Work (i) in all territories -worldwide, (ii) for the maximum duration provided by applicable law or -treaty (including future time extensions), (iii) in any current or future -medium and for any number of copies, and (iv) for any purpose whatsoever, -including without limitation commercial, advertising or promotional -purposes (the "Waiver"). Affirmer makes the Waiver for the benefit of each -member of the public at large and to the detriment of Affirmer's heirs and -successors, fully intending that such Waiver shall not be subject to -revocation, rescission, cancellation, termination, or any other legal or -equitable action to disrupt the quiet enjoyment of the Work by the public -as contemplated by Affirmer's express Statement of Purpose. - -3. Public License Fallback. Should any part of the Waiver for any reason -be judged legally invalid or ineffective under applicable law, then the -Waiver shall be preserved to the maximum extent permitted taking into -account Affirmer's express Statement of Purpose. In addition, to the -extent the Waiver is so judged Affirmer hereby grants to each affected -person a royalty-free, non transferable, non sublicensable, non exclusive, -irrevocable and unconditional license to exercise Affirmer's Copyright and -Related Rights in the Work (i) in all territories worldwide, (ii) for the -maximum duration provided by applicable law or treaty (including future -time extensions), (iii) in any current or future medium and for any number -of copies, and (iv) for any purpose whatsoever, including without -limitation commercial, advertising or promotional purposes (the -"License"). The License shall be deemed effective as of the date CC0 was -applied by Affirmer to the Work. Should any part of the License for any -reason be judged legally invalid or ineffective under applicable law, such -partial invalidity or ineffectiveness shall not invalidate the remainder -of the License, and in such case Affirmer hereby affirms that he or she -will not (i) exercise any of his or her remaining Copyright and Related -Rights in the Work or (ii) assert any associated claims and causes of -action with respect to the Work, in either case contrary to Affirmer's -express Statement of Purpose. - -4. Limitations and Disclaimers. - - a. No trademark or patent rights held by Affirmer are waived, abandoned, - surrendered, licensed or otherwise affected by this document. - b. Affirmer offers the Work as-is and makes no representations or - warranties of any kind concerning the Work, express, implied, - statutory or otherwise, including without limitation warranties of - title, merchantability, fitness for a particular purpose, non - infringement, or the absence of latent or other defects, accuracy, or - the present or absence of errors, whether or not discoverable, all to - the greatest extent permissible under applicable law. - c. Affirmer disclaims responsibility for clearing rights of other persons - that may apply to the Work or any use thereof, including without - limitation any person's Copyright and Related Rights in the Work. - Further, Affirmer disclaims responsibility for obtaining any necessary - consents, permissions or other rights required for any use of the - Work. - d. Affirmer understands and acknowledges that Creative Commons is not a - party to this document and has no duty or obligation with respect to - this CC0 or use of the Work. diff --git a/akka-samples/akka-sample-remote-scala/LICENSE b/akka-samples/akka-sample-remote-scala/LICENSE deleted file mode 100644 index 2a18b45589..0000000000 --- a/akka-samples/akka-sample-remote-scala/LICENSE +++ /dev/null @@ -1,10 +0,0 @@ -Activator Template by Lightbend - -Licensed under Public Domain (CC0) - -To the extent possible under law, the person who associated CC0 with -this Activator Tempate has waived all copyright and related or neighboring -rights to this Activator Template. - -You should have received a copy of the CC0 legalcode along with this -work. If not, see . diff --git a/akka-samples/akka-sample-remote-scala/activator.properties b/akka-samples/akka-sample-remote-scala/activator.properties deleted file mode 100644 index e7a6f58e5d..0000000000 --- a/akka-samples/akka-sample-remote-scala/activator.properties +++ /dev/null @@ -1,7 +0,0 @@ -name=akka-sample-remote-scala -title=Akka Remote Samples with Scala -description=Akka Remote Samples with Scala -tags=akka,remote,scala,sample -authorName=Akka Team -authorLink=http://akka.io/ -sourceLink=https://github.com/akka/akka diff --git a/akka-samples/akka-sample-remote-scala/build.sbt b/akka-samples/akka-sample-remote-scala/build.sbt deleted file mode 100644 index 31f8bb865e..0000000000 --- a/akka-samples/akka-sample-remote-scala/build.sbt +++ /dev/null @@ -1,12 +0,0 @@ -name := "akka-sample-remote-scala" - -version := "2.5-SNAPSHOT" - -scalaVersion := "2.11.8" - -libraryDependencies ++= Seq( - "com.typesafe.akka" %% "akka-actor" % "2.5-SNAPSHOT", - "com.typesafe.akka" %% "akka-remote" % "2.5-SNAPSHOT" -) - -licenses := Seq(("CC0", url("http://creativecommons.org/publicdomain/zero/1.0"))) diff --git a/akka-samples/akka-sample-remote-scala/project/build.properties b/akka-samples/akka-sample-remote-scala/project/build.properties deleted file mode 100644 index 748703f770..0000000000 --- a/akka-samples/akka-sample-remote-scala/project/build.properties +++ /dev/null @@ -1 +0,0 @@ -sbt.version=0.13.7 diff --git a/akka-samples/akka-sample-remote-scala/src/main/resources/calculator.conf b/akka-samples/akka-sample-remote-scala/src/main/resources/calculator.conf deleted file mode 100644 index 948c1f2929..0000000000 --- a/akka-samples/akka-sample-remote-scala/src/main/resources/calculator.conf +++ /dev/null @@ -1,6 +0,0 @@ -include "common" - -akka { - # LISTEN on tcp port 2552 - remote.netty.tcp.port = 2552 -} diff --git a/akka-samples/akka-sample-remote-scala/src/main/resources/common.conf b/akka-samples/akka-sample-remote-scala/src/main/resources/common.conf deleted file mode 100644 index 9e99e7ab6f..0000000000 --- a/akka-samples/akka-sample-remote-scala/src/main/resources/common.conf +++ /dev/null @@ -1,13 +0,0 @@ -akka { - - actor { - provider = remote - } - - remote { - netty.tcp { - hostname = "127.0.0.1" - } - } - -} diff --git a/akka-samples/akka-sample-remote-scala/src/main/resources/remotecreation.conf b/akka-samples/akka-sample-remote-scala/src/main/resources/remotecreation.conf deleted file mode 100644 index 76292f999f..0000000000 --- a/akka-samples/akka-sample-remote-scala/src/main/resources/remotecreation.conf +++ /dev/null @@ -1,13 +0,0 @@ -include "common" - -akka { - actor { - deployment { - "/creationActor/*" { - remote = "akka.tcp://CalculatorWorkerSystem@127.0.0.1:2552" - } - } - } - - remote.netty.tcp.port = 2554 -} diff --git a/akka-samples/akka-sample-remote-scala/src/main/resources/remotelookup.conf b/akka-samples/akka-sample-remote-scala/src/main/resources/remotelookup.conf deleted file mode 100644 index 336f557a08..0000000000 --- a/akka-samples/akka-sample-remote-scala/src/main/resources/remotelookup.conf +++ /dev/null @@ -1,5 +0,0 @@ -include "common" - -akka { - remote.netty.tcp.port = 2553 -} diff --git a/akka-samples/akka-sample-remote-scala/src/main/scala/sample/remote/benchmark/Receiver.scala b/akka-samples/akka-sample-remote-scala/src/main/scala/sample/remote/benchmark/Receiver.scala deleted file mode 100644 index f0f377f6d0..0000000000 --- a/akka-samples/akka-sample-remote-scala/src/main/scala/sample/remote/benchmark/Receiver.scala +++ /dev/null @@ -1,24 +0,0 @@ -package sample.remote.benchmark - -import akka.actor.Actor -import akka.actor.ActorSystem -import com.typesafe.config.ConfigFactory -import akka.actor.Props - -object Receiver { - def main(args: Array[String]): Unit = { - val system = ActorSystem("Sys", ConfigFactory.load("remotelookup")) - system.actorOf(Props[Receiver], "rcv") - } -} - -class Receiver extends Actor { - import Sender._ - - def receive = { - case m: Echo => sender() ! m - case Shutdown => context.system.terminate() - case _ => - } -} - diff --git a/akka-samples/akka-sample-remote-scala/src/main/scala/sample/remote/benchmark/Sender.scala b/akka-samples/akka-sample-remote-scala/src/main/scala/sample/remote/benchmark/Sender.scala deleted file mode 100644 index 2c856ce97d..0000000000 --- a/akka-samples/akka-sample-remote-scala/src/main/scala/sample/remote/benchmark/Sender.scala +++ /dev/null @@ -1,120 +0,0 @@ -package sample.remote.benchmark - -import scala.concurrent.duration._ -import akka.actor.Actor -import akka.actor.ActorIdentity -import akka.actor.ActorRef -import akka.actor.Identify -import akka.actor.ReceiveTimeout -import akka.actor.Terminated -import akka.actor.Props -import akka.actor.ActorSystem -import com.typesafe.config.ConfigFactory - -object Sender { - def main(args: Array[String]): Unit = { - val system = ActorSystem("Sys", ConfigFactory.load("calculator")) - - val remoteHostPort = if (args.length >= 1) args(0) else "127.0.0.1:2553" - val remotePath = s"akka.tcp://Sys@$remoteHostPort/user/rcv" - val totalMessages = if (args.length >= 2) args(1).toInt else 500000 - val burstSize = if (args.length >= 3) args(2).toInt else 5000 - val payloadSize = if (args.length >= 4) args(3).toInt else 100 - - system.actorOf(Sender.props(remotePath, totalMessages, burstSize, payloadSize), "snd") - } - - def props(path: String, totalMessages: Int, burstSize: Int, payloadSize: Int): Props = - Props(new Sender(path, totalMessages, burstSize, payloadSize)) - - private case object Warmup - case object Shutdown - sealed trait Echo - case object Start extends Echo - case object Done extends Echo - case class Continue(remaining: Int, startTime: Long, burstStartTime: Long, n: Int) - extends Echo -} - -class Sender(path: String, totalMessages: Int, burstSize: Int, payloadSize: Int) extends Actor { - import Sender._ - - val payload: Array[Byte] = Vector.fill(payloadSize)("a").mkString.getBytes - var startTime = 0L - var maxRoundTripMillis = 0L - - context.setReceiveTimeout(3.seconds) - sendIdentifyRequest() - - def sendIdentifyRequest(): Unit = - context.actorSelection(path) ! Identify(path) - - def receive = identifying - - def identifying: Receive = { - case ActorIdentity(`path`, Some(actor)) => - context.watch(actor) - context.become(active(actor)) - context.setReceiveTimeout(Duration.Undefined) - self ! Warmup - case ActorIdentity(`path`, None) => println(s"Remote actor not available: $path") - case ReceiveTimeout => sendIdentifyRequest() - } - - def active(actor: ActorRef): Receive = { - case Warmup => - sendBatch(actor, burstSize) - actor ! Start - - case Start => - println(s"Starting benchmark of $totalMessages messages with burst size $burstSize and payload size $payloadSize") - startTime = System.nanoTime - val remaining = sendBatch(actor, totalMessages) - if (remaining == 0) - actor ! Done - else - actor ! Continue(remaining, startTime, startTime, burstSize) - - case c @ Continue(remaining, t0, t1, n) => - val now = System.nanoTime() - val duration = (now - t0).nanos.toMillis - val roundTripMillis = (now - t1).nanos.toMillis - maxRoundTripMillis = math.max(maxRoundTripMillis, roundTripMillis) - if (duration >= 500) { - val throughtput = (n * 1000.0 / duration).toInt - println(s"It took $duration ms to deliver $n messages, throughtput $throughtput msg/s, " + - s"latest round-trip $roundTripMillis ms, remaining $remaining of $totalMessages") - } - - val nextRemaining = sendBatch(actor, remaining) - if (nextRemaining == 0) - actor ! Done - else if (duration >= 500) - actor ! Continue(nextRemaining, now, now, burstSize) - else - actor ! c.copy(remaining = nextRemaining, burstStartTime = now, n = n + burstSize) - - case Done => - val took = (System.nanoTime - startTime).nanos.toMillis - val throughtput = (totalMessages * 1000.0 / took).toInt - println(s"== It took $took ms to deliver $totalMessages messages, throughtput $throughtput msg/s, " + - s"max round-trip $maxRoundTripMillis ms, burst size $burstSize, " + - s"payload size $payloadSize") - actor ! Shutdown - - case Terminated(`actor`) => - println("Receiver terminated") - context.system.terminate() - - } - - /** - * @return remaining messages after sending the batch - */ - def sendBatch(actor: ActorRef, remaining: Int): Int = { - val batchSize = math.min(remaining, burstSize) - (1 to batchSize) foreach { x => actor ! payload } - remaining - batchSize - } -} - diff --git a/akka-samples/akka-sample-remote-scala/src/main/scala/sample/remote/calculator/CalculatorActor.scala b/akka-samples/akka-sample-remote-scala/src/main/scala/sample/remote/calculator/CalculatorActor.scala deleted file mode 100644 index 90d209c3a1..0000000000 --- a/akka-samples/akka-sample-remote-scala/src/main/scala/sample/remote/calculator/CalculatorActor.scala +++ /dev/null @@ -1,22 +0,0 @@ -package sample.remote.calculator - -import akka.actor.Props -import akka.actor.Actor - -class CalculatorActor extends Actor { - def receive = { - case Add(n1, n2) => - println("Calculating %d + %d".format(n1, n2)) - sender() ! AddResult(n1, n2, n1 + n2) - case Subtract(n1, n2) => - println("Calculating %d - %d".format(n1, n2)) - sender() ! SubtractResult(n1, n2, n1 - n2) - case Multiply(n1, n2) => - println("Calculating %d * %d".format(n1, n2)) - sender() ! MultiplicationResult(n1, n2, n1 * n2) - case Divide(n1, n2) => - println("Calculating %.0f / %d".format(n1, n2)) - sender() ! DivisionResult(n1, n2, n1 / n2) - } -} - diff --git a/akka-samples/akka-sample-remote-scala/src/main/scala/sample/remote/calculator/CreationActor.scala b/akka-samples/akka-sample-remote-scala/src/main/scala/sample/remote/calculator/CreationActor.scala deleted file mode 100644 index 7d6962d5a8..0000000000 --- a/akka-samples/akka-sample-remote-scala/src/main/scala/sample/remote/calculator/CreationActor.scala +++ /dev/null @@ -1,22 +0,0 @@ -package sample.remote.calculator - -import akka.actor.Actor -import akka.actor.ActorRef -import akka.actor.Props - -class CreationActor extends Actor { - - def receive = { - case op: MathOp => - val calculator = context.actorOf(Props[CalculatorActor]) - calculator ! op - case result: MathResult => result match { - case MultiplicationResult(n1, n2, r) => - printf("Mul result: %d * %d = %d\n", n1, n2, r) - context.stop(sender()) - case DivisionResult(n1, n2, r) => - printf("Div result: %.0f / %d = %.2f\n", n1, n2, r) - context.stop(sender()) - } - } -} diff --git a/akka-samples/akka-sample-remote-scala/src/main/scala/sample/remote/calculator/CreationApplication.scala b/akka-samples/akka-sample-remote-scala/src/main/scala/sample/remote/calculator/CreationApplication.scala deleted file mode 100644 index abf09be5da..0000000000 --- a/akka-samples/akka-sample-remote-scala/src/main/scala/sample/remote/calculator/CreationApplication.scala +++ /dev/null @@ -1,38 +0,0 @@ -package sample.remote.calculator - -import scala.concurrent.duration._ -import com.typesafe.config.ConfigFactory -import scala.util.Random -import akka.actor.ActorSystem -import akka.actor.Props - -object CreationApplication { - def main(args: Array[String]): Unit = { - if (args.isEmpty || args.head == "CalculatorWorker") - startRemoteWorkerSystem() - if (args.isEmpty || args.head == "Creation") - startRemoteCreationSystem() - } - - def startRemoteWorkerSystem(): Unit = { - ActorSystem("CalculatorWorkerSystem", ConfigFactory.load("calculator")) - println("Started CalculatorWorkerSystem") - } - - def startRemoteCreationSystem(): Unit = { - val system = - ActorSystem("CreationSystem", ConfigFactory.load("remotecreation")) - val actor = system.actorOf(Props[CreationActor], - name = "creationActor") - - println("Started CreationSystem") - import system.dispatcher - system.scheduler.schedule(1.second, 1.second) { - if (Random.nextInt(100) % 2 == 0) - actor ! Multiply(Random.nextInt(20), Random.nextInt(20)) - else - actor ! Divide(Random.nextInt(10000), (Random.nextInt(99) + 1)) - } - - } -} diff --git a/akka-samples/akka-sample-remote-scala/src/main/scala/sample/remote/calculator/LookupActor.scala b/akka-samples/akka-sample-remote-scala/src/main/scala/sample/remote/calculator/LookupActor.scala deleted file mode 100644 index b06d2383de..0000000000 --- a/akka-samples/akka-sample-remote-scala/src/main/scala/sample/remote/calculator/LookupActor.scala +++ /dev/null @@ -1,48 +0,0 @@ -package sample.remote.calculator - -import scala.concurrent.duration._ -import akka.actor.Actor -import akka.actor.ActorIdentity -import akka.actor.ActorRef -import akka.actor.Identify -import akka.actor.ReceiveTimeout -import akka.actor.Terminated - -class LookupActor(path: String) extends Actor { - - sendIdentifyRequest() - - def sendIdentifyRequest(): Unit = { - context.actorSelection(path) ! Identify(path) - import context.dispatcher - context.system.scheduler.scheduleOnce(3.seconds, self, ReceiveTimeout) - } - - def receive = identifying - - def identifying: Actor.Receive = { - case ActorIdentity(`path`, Some(actor)) => - context.watch(actor) - context.become(active(actor)) - case ActorIdentity(`path`, None) => println(s"Remote actor not available: $path") - case ReceiveTimeout => sendIdentifyRequest() - case _ => println("Not ready yet") - } - - def active(actor: ActorRef): Actor.Receive = { - case op: MathOp => actor ! op - case result: MathResult => result match { - case AddResult(n1, n2, r) => - printf("Add result: %d + %d = %d\n", n1, n2, r) - case SubtractResult(n1, n2, r) => - printf("Sub result: %d - %d = %d\n", n1, n2, r) - } - case Terminated(`actor`) => - println("Calculator terminated") - sendIdentifyRequest() - context.become(identifying) - case ReceiveTimeout => - // ignore - - } -} diff --git a/akka-samples/akka-sample-remote-scala/src/main/scala/sample/remote/calculator/LookupApplication.scala b/akka-samples/akka-sample-remote-scala/src/main/scala/sample/remote/calculator/LookupApplication.scala deleted file mode 100644 index 8129358d53..0000000000 --- a/akka-samples/akka-sample-remote-scala/src/main/scala/sample/remote/calculator/LookupApplication.scala +++ /dev/null @@ -1,41 +0,0 @@ -package sample.remote.calculator - -import scala.concurrent.duration._ -import scala.util.Random -import com.typesafe.config.ConfigFactory -import akka.actor.ActorSystem -import akka.actor.Props - -object LookupApplication { - def main(args: Array[String]): Unit = { - if (args.isEmpty || args.head == "Calculator") - startRemoteCalculatorSystem() - if (args.isEmpty || args.head == "Lookup") - startRemoteLookupSystem() - } - - def startRemoteCalculatorSystem(): Unit = { - val system = ActorSystem("CalculatorSystem", - ConfigFactory.load("calculator")) - system.actorOf(Props[CalculatorActor], "calculator") - - println("Started CalculatorSystem - waiting for messages") - } - - def startRemoteLookupSystem(): Unit = { - val system = - ActorSystem("LookupSystem", ConfigFactory.load("remotelookup")) - val remotePath = - "akka.tcp://CalculatorSystem@127.0.0.1:2552/user/calculator" - val actor = system.actorOf(Props(classOf[LookupActor], remotePath), "lookupActor") - - println("Started LookupSystem") - import system.dispatcher - system.scheduler.schedule(1.second, 1.second) { - if (Random.nextInt(100) % 2 == 0) - actor ! Add(Random.nextInt(100), Random.nextInt(100)) - else - actor ! Subtract(Random.nextInt(100), Random.nextInt(100)) - } - } -} diff --git a/akka-samples/akka-sample-remote-scala/src/main/scala/sample/remote/calculator/MathOp.scala b/akka-samples/akka-sample-remote-scala/src/main/scala/sample/remote/calculator/MathOp.scala deleted file mode 100644 index aaf02818cc..0000000000 --- a/akka-samples/akka-sample-remote-scala/src/main/scala/sample/remote/calculator/MathOp.scala +++ /dev/null @@ -1,22 +0,0 @@ -package sample.remote.calculator - -trait MathOp - -final case class Add(nbr1: Int, nbr2: Int) extends MathOp - -final case class Subtract(nbr1: Int, nbr2: Int) extends MathOp - -final case class Multiply(nbr1: Int, nbr2: Int) extends MathOp - -final case class Divide(nbr1: Double, nbr2: Int) extends MathOp - -trait MathResult - -final case class AddResult(nbr: Int, nbr2: Int, result: Int) extends MathResult - -final case class SubtractResult(nbr1: Int, nbr2: Int, result: Int) extends MathResult - -final case class MultiplicationResult(nbr1: Int, nbr2: Int, result: Int) extends MathResult - -final case class DivisionResult(nbr1: Double, nbr2: Int, result: Double) extends MathResult - diff --git a/akka-samples/akka-sample-remote-scala/src/test/resources/reference.conf b/akka-samples/akka-sample-remote-scala/src/test/resources/reference.conf deleted file mode 100644 index 90492329b7..0000000000 --- a/akka-samples/akka-sample-remote-scala/src/test/resources/reference.conf +++ /dev/null @@ -1,4 +0,0 @@ -# Don't terminate ActorSystem in tests -akka.coordinated-shutdown.run-by-jvm-shutdown-hook = off -akka.coordinated-shutdown.terminate-actor-system = off -akka.cluster.run-coordinated-shutdown-when-down = off diff --git a/akka-samples/akka-sample-remote-scala/tutorial/index.html b/akka-samples/akka-sample-remote-scala/tutorial/index.html deleted file mode 100644 index a89e903982..0000000000 --- a/akka-samples/akka-sample-remote-scala/tutorial/index.html +++ /dev/null @@ -1,256 +0,0 @@ - - -Akka Remote Samples with Scala - - - - -
-

-In order to showcase the remote capabilities of Akka -we thought a remote calculator could do the trick. -This sample demonstrates both remote deployment and look-up of remote actors. -

-
- -
-

Lookup Remote Actors

-

-This sample involves two actor systems. -

- -
    -
  • CalculatorSystem listens on port 2552 and starts one actor, the -CalculatorActor -that provides a service for arithmetic operations.
  • -
  • LookupSystem listens on port 2553 and starts one actor, the -LookupActor -that sends operations to the remote calculator service.
  • -
- -

-Open LookupApplication.scala. -

- -

-There you see how the two actor systems and actors are started. In this first step they are running in the same JVM process, -but you can run them in separate processes as described later. Note that this changes nothing in the configuration or implementation. -

- -

-The two actor systems use different configuration, which is where the listen port is defined. -The CalculatorSystem uses calculator.conf -and the LookupSystem uses remotelookup.conf. -

- -

-Note that the configuration files also import the -common.conf. -This enables the remoting by installing the RemoteActorRefProvider and chooses the default remote transport. -Be sure to replace the default IP 127.0.0.1 with the real address the system is reachable -by if you deploy onto multiple machines! -

- -

-The CalculatorActor -does not illustrate anything exciting. More interesting is the -LookupActor. -It takes a String path as constructor parameter. This is the full path, including the remote -address of the calculator service. Observe how the actor system name of the path matches the remote system’s -name, as do IP and port number. Top-level actors are always created below the "/user" guardian, which supervises them. -

- -

-"akka.tcp://CalculatorSystem@127.0.0.1:2552/user/calculator"
-
- -

-First it sends an Identify message to the actor selection of the path. -The remote calculator actor will reply with ActorIdentity containing its ActorRef. -Identify is a built-in message that all Actors will understand and automatically reply to with a -ActorIdentity. If the identification fails it will be retried after the scheduled timeout -by the LookupActor. -

- -

-Note how none of the code is specific to remoting, this also applies when talking to a local actor which -might terminate and be recreated. That is what we call Location Transparency. -

- -

-Once it has the ActorRef of the remote service it can watch it. The remote system -might be shutdown and later started up again, then Terminated is received on the watching -side and it can retry the identification to establish a connection to the new remote system. -

- -
- -
-

Run the Lookup Sample

- -

-To run this sample, go to the Run -tab, and start the application main class sample.remote.calculator.LookupApplication if it is not already started. -

- -

-In the log pane you should see something like: -

- -

-Started LookupSystem
-Calculating 74 - 42
-Sub result: 74 - 42 = 32
-Calculating 15 + 71
-Add result: 15 + 71 = 86
-
- -

-The two actor systems are running in the same JVM process. It can be more interesting to run them in separate -processes. Stop the application in the Run tab and then open two -terminal windows. -

- -

-Start the CalculatorSystem in the first terminal window with the following command (on one line): -

- -

-<path to activator dir>/activator 
-  "run-main sample.remote.calculator.LookupApplication Calculator"		
-
- -

-Start the LookupSystem in the second terminal window with the following command (on one line): -

- -

-<path to activator dir>/activator 
-  "run-main sample.remote.calculator.LookupApplication Lookup"		
-
- -

-Thereafter you can try to shutdown the CalculatorSystem in the first terminal window with -'ctrl-c' and then start it again. In the second terminal window you should see the -failure detection and then how the successful calculation results are logged again when it has -established a connection to the new system. -

- -
- -
-

Create Remote Actors

-

-This sample involves two actor systems. -

- -
    -
  • CalculatorWorkerSystem listens on port 2552
  • -
  • CreationSystem listens on port 2554 and starts one actor, the -CreationActor -that creates remote calculator worker actors in the CalculatorWorkerSystem and sends operations to them.
  • -
- -

-Open CreationApplication.scala. -

- -

-There you see how the two actor systems and actors are started. In this first step they are running in the same JVM process, -but you can run them in separate processes as described later. -

- -

-The two actor systems use different configuration, which is where the listen port is defined. -The CalculatorWorkerSystem uses calculator.conf -and the CreationSystem uses remotecreation.conf. -

- -

-Note that the configuration files also import the -common.conf. -This enables the remoting by installing the RemoteActorRefProvider and chooses the default remote transport. -Be sure to replace the default IP 127.0.0.1 with the real address the system is reachable -by if you deploy onto multiple machines! -

- -

-The CreationActor -creates a child CalculatorActor -for each incoming MathOp message. The -configuration contains a deployment section that -matches these child actors and defines that the actors are to be deployed at the remote system. The wildcard (*) is needed -because the child actors are created with unique anonymous names. -

- -

-akka.actor.deployment {
-  /creationActor/* {
-    remote = "akka.tcp://CalculatorWorkerSystem@127.0.0.1:2552"
-  }
-}
-
- -

-Error handling, i.e. supervision, works exactly in the same way as if the child actor was a local child actor. -In addtion, in case of network failures or JVM crash the child will be terminated and automatically removed -from the parent even though they are located on different machines. -

- -
- -
-

Run the Creation Sample

- -

-To run this sample, go to the Run -tab, and start the application main class sample.remote.calculator.CreationApplication if it is not already started. -

- -

-In the log pane you should see something like: -

- -

-Started CreationSystem
-Calculating 7135 / 62
-Div result: 7135 / 62 = 115.08
-Calculating 0 * 9
-Mul result: 0 * 9 = 0
-
- -

-The two actor systems are running in the same JVM process. It can be more interesting to run them in separate -processes. Stop the application in the Run tab and then open two -terminal windows. -

- -

-Start the CalculatorWorkerSystem in the first terminal window with the following command (on one line): -

- -

-<path to activator dir>/activator 
-  "run-main sample.remote.calculator.CreationApplication CalculatorWorker"		
-
- -

-Start the CreationSystem in the second terminal window with the following command (on one line): -

- -

-<path to activator dir>/activator 
-  "run-main sample.remote.calculator.CreationApplication Creation"		
-
- -

-Thereafter you can try to shutdown the CalculatorWorkerSystem in the first terminal window with -'ctrl-c' and then start it again. In the second terminal window you should see the -failure detection and then how the successful calculation results are logged again when it has -established a connection to the new system. -

- -
- - - diff --git a/akka-samples/akka-sample-supervision-java-lambda/COPYING b/akka-samples/akka-sample-supervision-java-lambda/COPYING deleted file mode 100644 index 0e259d42c9..0000000000 --- a/akka-samples/akka-sample-supervision-java-lambda/COPYING +++ /dev/null @@ -1,121 +0,0 @@ -Creative Commons Legal Code - -CC0 1.0 Universal - - CREATIVE COMMONS CORPORATION IS NOT A LAW FIRM AND DOES NOT PROVIDE - LEGAL SERVICES. DISTRIBUTION OF THIS DOCUMENT DOES NOT CREATE AN - ATTORNEY-CLIENT RELATIONSHIP. CREATIVE COMMONS PROVIDES THIS - INFORMATION ON AN "AS-IS" BASIS. CREATIVE COMMONS MAKES NO WARRANTIES - REGARDING THE USE OF THIS DOCUMENT OR THE INFORMATION OR WORKS - PROVIDED HEREUNDER, AND DISCLAIMS LIABILITY FOR DAMAGES RESULTING FROM - THE USE OF THIS DOCUMENT OR THE INFORMATION OR WORKS PROVIDED - HEREUNDER. - -Statement of Purpose - -The laws of most jurisdictions throughout the world automatically confer -exclusive Copyright and Related Rights (defined below) upon the creator -and subsequent owner(s) (each and all, an "owner") of an original work of -authorship and/or a database (each, a "Work"). - -Certain owners wish to permanently relinquish those rights to a Work for -the purpose of contributing to a commons of creative, cultural and -scientific works ("Commons") that the public can reliably and without fear -of later claims of infringement build upon, modify, incorporate in other -works, reuse and redistribute as freely as possible in any form whatsoever -and for any purposes, including without limitation commercial purposes. -These owners may contribute to the Commons to promote the ideal of a free -culture and the further production of creative, cultural and scientific -works, or to gain reputation or greater distribution for their Work in -part through the use and efforts of others. - -For these and/or other purposes and motivations, and without any -expectation of additional consideration or compensation, the person -associating CC0 with a Work (the "Affirmer"), to the extent that he or she -is an owner of Copyright and Related Rights in the Work, voluntarily -elects to apply CC0 to the Work and publicly distribute the Work under its -terms, with knowledge of his or her Copyright and Related Rights in the -Work and the meaning and intended legal effect of CC0 on those rights. - -1. Copyright and Related Rights. A Work made available under CC0 may be -protected by copyright and related or neighboring rights ("Copyright and -Related Rights"). Copyright and Related Rights include, but are not -limited to, the following: - - i. the right to reproduce, adapt, distribute, perform, display, - communicate, and translate a Work; - ii. moral rights retained by the original author(s) and/or performer(s); -iii. publicity and privacy rights pertaining to a person's image or - likeness depicted in a Work; - iv. rights protecting against unfair competition in regards to a Work, - subject to the limitations in paragraph 4(a), below; - v. rights protecting the extraction, dissemination, use and reuse of data - in a Work; - vi. database rights (such as those arising under Directive 96/9/EC of the - European Parliament and of the Council of 11 March 1996 on the legal - protection of databases, and under any national implementation - thereof, including any amended or successor version of such - directive); and -vii. other similar, equivalent or corresponding rights throughout the - world based on applicable law or treaty, and any national - implementations thereof. - -2. Waiver. To the greatest extent permitted by, but not in contravention -of, applicable law, Affirmer hereby overtly, fully, permanently, -irrevocably and unconditionally waives, abandons, and surrenders all of -Affirmer's Copyright and Related Rights and associated claims and causes -of action, whether now known or unknown (including existing as well as -future claims and causes of action), in the Work (i) in all territories -worldwide, (ii) for the maximum duration provided by applicable law or -treaty (including future time extensions), (iii) in any current or future -medium and for any number of copies, and (iv) for any purpose whatsoever, -including without limitation commercial, advertising or promotional -purposes (the "Waiver"). Affirmer makes the Waiver for the benefit of each -member of the public at large and to the detriment of Affirmer's heirs and -successors, fully intending that such Waiver shall not be subject to -revocation, rescission, cancellation, termination, or any other legal or -equitable action to disrupt the quiet enjoyment of the Work by the public -as contemplated by Affirmer's express Statement of Purpose. - -3. Public License Fallback. Should any part of the Waiver for any reason -be judged legally invalid or ineffective under applicable law, then the -Waiver shall be preserved to the maximum extent permitted taking into -account Affirmer's express Statement of Purpose. In addition, to the -extent the Waiver is so judged Affirmer hereby grants to each affected -person a royalty-free, non transferable, non sublicensable, non exclusive, -irrevocable and unconditional license to exercise Affirmer's Copyright and -Related Rights in the Work (i) in all territories worldwide, (ii) for the -maximum duration provided by applicable law or treaty (including future -time extensions), (iii) in any current or future medium and for any number -of copies, and (iv) for any purpose whatsoever, including without -limitation commercial, advertising or promotional purposes (the -"License"). The License shall be deemed effective as of the date CC0 was -applied by Affirmer to the Work. Should any part of the License for any -reason be judged legally invalid or ineffective under applicable law, such -partial invalidity or ineffectiveness shall not invalidate the remainder -of the License, and in such case Affirmer hereby affirms that he or she -will not (i) exercise any of his or her remaining Copyright and Related -Rights in the Work or (ii) assert any associated claims and causes of -action with respect to the Work, in either case contrary to Affirmer's -express Statement of Purpose. - -4. Limitations and Disclaimers. - - a. No trademark or patent rights held by Affirmer are waived, abandoned, - surrendered, licensed or otherwise affected by this document. - b. Affirmer offers the Work as-is and makes no representations or - warranties of any kind concerning the Work, express, implied, - statutory or otherwise, including without limitation warranties of - title, merchantability, fitness for a particular purpose, non - infringement, or the absence of latent or other defects, accuracy, or - the present or absence of errors, whether or not discoverable, all to - the greatest extent permissible under applicable law. - c. Affirmer disclaims responsibility for clearing rights of other persons - that may apply to the Work or any use thereof, including without - limitation any person's Copyright and Related Rights in the Work. - Further, Affirmer disclaims responsibility for obtaining any necessary - consents, permissions or other rights required for any use of the - Work. - d. Affirmer understands and acknowledges that Creative Commons is not a - party to this document and has no duty or obligation with respect to - this CC0 or use of the Work. diff --git a/akka-samples/akka-sample-supervision-java-lambda/LICENSE b/akka-samples/akka-sample-supervision-java-lambda/LICENSE deleted file mode 100644 index 2a18b45589..0000000000 --- a/akka-samples/akka-sample-supervision-java-lambda/LICENSE +++ /dev/null @@ -1,10 +0,0 @@ -Activator Template by Lightbend - -Licensed under Public Domain (CC0) - -To the extent possible under law, the person who associated CC0 with -this Activator Tempate has waived all copyright and related or neighboring -rights to this Activator Template. - -You should have received a copy of the CC0 legalcode along with this -work. If not, see . diff --git a/akka-samples/akka-sample-supervision-java-lambda/activator.properties b/akka-samples/akka-sample-supervision-java-lambda/activator.properties deleted file mode 100644 index d85adf336e..0000000000 --- a/akka-samples/akka-sample-supervision-java-lambda/activator.properties +++ /dev/null @@ -1,7 +0,0 @@ -name=akka-supervision-java-lambda -title=Akka Supervision in Java with Lambdas -description=Illustrates supervision in Akka -tags=akka,java,java8,sample -authorName=Akka Team -authorLink=http://akka.io/ -sourceLink=https://github.com/akka/akka diff --git a/akka-samples/akka-sample-supervision-java-lambda/build.sbt b/akka-samples/akka-sample-supervision-java-lambda/build.sbt deleted file mode 100644 index 95746db778..0000000000 --- a/akka-samples/akka-sample-supervision-java-lambda/build.sbt +++ /dev/null @@ -1,19 +0,0 @@ -name := "akka-sample-supervision-java-lambda" - -version := "2.5-SNAPSHOT" - -scalaVersion := "2.11.8" - -javacOptions in compile ++= Seq("-encoding", "UTF-8", "-source", "1.8", "-target", "1.8", "-Xlint") - -javacOptions in doc ++= Seq("-encoding", "UTF-8", "-source", "1.8", "-Xdoclint:none") - -testOptions += Tests.Argument(TestFrameworks.JUnit, "-v", "-a") - -libraryDependencies ++= Seq( - "com.typesafe.akka" %% "akka-actor" % "2.5-SNAPSHOT", - "com.typesafe.akka" %% "akka-testkit" % "2.5-SNAPSHOT" % "test", - "junit" % "junit" % "4.12" % "test", - "com.novocode" % "junit-interface" % "0.11" % "test") - -licenses := Seq(("CC0", url("http://creativecommons.org/publicdomain/zero/1.0"))) diff --git a/akka-samples/akka-sample-supervision-java-lambda/pom.xml b/akka-samples/akka-sample-supervision-java-lambda/pom.xml deleted file mode 100644 index 948f331434..0000000000 --- a/akka-samples/akka-sample-supervision-java-lambda/pom.xml +++ /dev/null @@ -1,53 +0,0 @@ - - 4.0.0 - - - UTF-8 - - - sample - akka-sample-supervision-java-lambda - jar - 2.5-SNAPSHOT - - - - com.typesafe.akka - akka-actor_2.11 - 2.5-SNAPSHOT - - - com.typesafe.akka - akka-testkit_2.11 - 2.5-SNAPSHOT - - - junit - junit - 4.12 - test - - - - - - - org.apache.maven.plugins - maven-compiler-plugin - 3.1 - - 1.8 - 1.8 - true - - -Xlint - - - - - - - diff --git a/akka-samples/akka-sample-supervision-java-lambda/project/build.properties b/akka-samples/akka-sample-supervision-java-lambda/project/build.properties deleted file mode 100644 index 748703f770..0000000000 --- a/akka-samples/akka-sample-supervision-java-lambda/project/build.properties +++ /dev/null @@ -1 +0,0 @@ -sbt.version=0.13.7 diff --git a/akka-samples/akka-sample-supervision-java-lambda/src/main/java/supervision/ArithmeticService.java b/akka-samples/akka-sample-supervision-java-lambda/src/main/java/supervision/ArithmeticService.java deleted file mode 100644 index bbfc7d2d9d..0000000000 --- a/akka-samples/akka-sample-supervision-java-lambda/src/main/java/supervision/ArithmeticService.java +++ /dev/null @@ -1,75 +0,0 @@ -package supervision; - -import akka.actor.*; -import akka.japi.pf.DeciderBuilder; -import akka.japi.pf.ReceiveBuilder; -import scala.PartialFunction; -import scala.runtime.BoxedUnit; - -import java.util.HashMap; -import java.util.Map; - -import static supervision.FlakyExpressionCalculator.FlakinessException; -import static supervision.FlakyExpressionCalculator.Result; -import static supervision.FlakyExpressionCalculator.Position.Left; - -// A very simple service that accepts arithmetic expressions and tries to -// evaluate them. Since the calculation is dangerous (at least for the sake -// of this example) it is delegated to a worker actor of type -// FlakyExpressionCalculator. -class ArithmeticService extends AbstractLoggingActor { - - // Map of workers to the original actors requesting the calculation - Map pendingWorkers = new HashMap<>(); - - private SupervisorStrategy strategy = new OneForOneStrategy(false, DeciderBuilder. - match(FlakinessException.class, e -> { - log().warning("Evaluation of a top level expression failed, restarting."); - return SupervisorStrategy.restart(); - }). - match(ArithmeticException.class, e -> { - log().error("Evaluation failed because of: {}", e.getMessage()); - notifyConsumerFailure(sender(), e); - return SupervisorStrategy.stop(); - }). - match(Throwable.class, e -> { - log().error("Unexpected failure: {}", e.getMessage()); - notifyConsumerFailure(sender(), e); - return SupervisorStrategy.stop(); - }).build()); - @Override - public SupervisorStrategy supervisorStrategy() { - return strategy; - } - - private void notifyConsumerFailure(ActorRef worker, Throwable failure) { - // Status.Failure is a message type provided by the Akka library. The - // reason why it is used is because it is recognized by the "ask" pattern - // and the Future returned by ask will fail with the provided exception. - ActorRef pending = pendingWorkers.get(worker); - if (pending != null) { - pending.tell(new Status.Failure(failure), self()); - pendingWorkers.remove(worker); - } - } - - private void notifyConsumerSuccess(ActorRef worker, Integer result) { - ActorRef pending = pendingWorkers.get(worker); - if (pending != null) { - pending.tell(result, self()); - pendingWorkers.remove(worker); - } - } - - ArithmeticService() { - receive(ReceiveBuilder. - match(Expression.class, expr -> { - // We delegate the dangerous task of calculation to a worker, passing the - // expression as a constructor argument to the actor. - ActorRef worker = context().actorOf(FlakyExpressionCalculator.props(expr, Left)); - pendingWorkers.put(worker, sender()); - }). - match(Result.class, r -> notifyConsumerSuccess(sender(), r.getValue())).build() - ); - } -} diff --git a/akka-samples/akka-sample-supervision-java-lambda/src/main/java/supervision/Expression.java b/akka-samples/akka-sample-supervision-java-lambda/src/main/java/supervision/Expression.java deleted file mode 100644 index 73760d1c38..0000000000 --- a/akka-samples/akka-sample-supervision-java-lambda/src/main/java/supervision/Expression.java +++ /dev/null @@ -1,122 +0,0 @@ -package supervision; - -// Represents an arithmetic expression involving integer numbers -public interface Expression { - - public Expression getLeft(); - - public Expression getRight(); - - // Basic arithmetic operations that are supported by the ArithmeticService. Every - // operation except the constant value has a left and right side. For example - // the addition in (3 * 2) + (6 * 6) has the left side (3 * 2) and the right - // side (6 * 6). - public static abstract class AbstractExpression implements Expression { - private final Expression left; - private final Expression right; - private final String operator; - - protected AbstractExpression(Expression left, Expression right, String operator) { - this.left = left; - this.right = right; - this.operator = operator; - } - - public Expression getLeft() { - return left; - } - - public Expression getRight() { - return right; - } - - @Override - public boolean equals(Object o) { - if (this == o) return true; - if (!(o instanceof AbstractExpression)) return false; - - AbstractExpression that = (AbstractExpression) o; - - if (!left.equals(that.left)) return false; - if (!operator.equals(that.operator)) return false; - if (!right.equals(that.right)) return false; - - return true; - } - - @Override - public int hashCode() { - int result = left.hashCode(); - result = 31 * result + right.hashCode(); - result = 31 * result + operator.hashCode(); - return result; - } - - @Override - public String toString() { - return "(" + getLeft() + " " + operator + " " + getRight() + ")"; - } - } - - public static final class Add extends AbstractExpression { - public Add(Expression left, Expression right) { - super(left, right, "+"); - } - } - - public static final class Multiply extends AbstractExpression { - public Multiply(Expression left, Expression right) { - super(left, right, "*"); - } - } - - public static final class Divide extends AbstractExpression { - public Divide(Expression left, Expression right) { - super(left, right, "/"); - } - } - - public static final class Const implements Expression{ - private final int value; - - public Const(int value) { - this.value = value; - } - - @Override - public Expression getLeft() { - return this; - } - - @Override - public Expression getRight() { - return this; - } - - public int getValue() { - return value; - } - - @Override - public boolean equals(Object o) { - if (this == o) return true; - if (!(o instanceof Const)) return false; - - Const aConst = (Const) o; - - if (value != aConst.value) return false; - - return true; - } - - @Override - public int hashCode() { - return value; - } - - @Override - public String toString() { - return String.valueOf(value); - } - } -} diff --git a/akka-samples/akka-sample-supervision-java-lambda/src/main/java/supervision/FlakyExpressionCalculator.java b/akka-samples/akka-sample-supervision-java-lambda/src/main/java/supervision/FlakyExpressionCalculator.java deleted file mode 100644 index 4efb4361c8..0000000000 --- a/akka-samples/akka-sample-supervision-java-lambda/src/main/java/supervision/FlakyExpressionCalculator.java +++ /dev/null @@ -1,145 +0,0 @@ -package supervision; - -import akka.actor.*; -import akka.japi.pf.DeciderBuilder; -import akka.japi.pf.ReceiveBuilder; -import scala.PartialFunction; -import scala.runtime.BoxedUnit; - -import java.util.HashMap; -import java.util.Map; -import java.util.Set; -import java.util.concurrent.ThreadLocalRandom; -import java.util.stream.Collectors; -import java.util.stream.Stream; - -import static supervision.Expression.*; -import static supervision.FlakyExpressionCalculator.Position.*; - -public class FlakyExpressionCalculator extends AbstractLoggingActor { - - public static Props props(Expression expr, Position position) { - return Props.create(FlakyExpressionCalculator.class, expr, position); - } - - // Encodes the original position of a sub-expression in its parent expression - // Example: (4 / 2) has position Left in the original expression (4 / 2) * 3 - public static enum Position { - Left, Right - } - - public static class Result { - private final Expression originalExpression; - private final Integer value; - private final Position position; - - public Result(Expression originalExpression, Integer value, Position position) { - this.originalExpression = originalExpression; - this.value = value; - this.position = position; - } - - public Expression getOriginalExpression() { - return originalExpression; - } - - public Integer getValue() { - return value; - } - - public Position getPosition() { - return position; - } - } - - public static class FlakinessException extends RuntimeException { - static final long serialVersionUID = 1; - - public FlakinessException() { - super("Flakiness"); - } - } - - // This actor has the sole purpose of calculating a given expression and - // return the result to its parent. It takes an additional argument, - // myPosition, which is used to signal the parent which side of its - // expression has been calculated. - private final Expression expr; - private final Position myPosition; - - private Expression getExpr() { - return expr; - } - - private SupervisorStrategy strategy = new OneForOneStrategy(false, DeciderBuilder. - match(FlakinessException.class, e -> { - log().warning("Evaluation of {} failed, restarting.", getExpr()); - return SupervisorStrategy.restart(); - }). - matchAny(e -> SupervisorStrategy.escalate()).build()); - @Override - public SupervisorStrategy supervisorStrategy() { - return strategy; - } - - // The value of these variables will be reinitialized after every restart. - // The only stable data the actor has during restarts is those embedded in - // the Props when it was created. In this case expr, and myPosition. - Map results = new HashMap<>(); - Set expected = Stream.of(Left, Right).collect(Collectors.toSet()); - - @Override - public void preStart() { - if (expr instanceof Const) { - Integer value = ((Const) expr).getValue(); - context().parent().tell(new Result(expr, value, myPosition), self()); - // Don't forget to stop the actor after it has nothing more to do - context().stop(self()); - } - else { - context().actorOf(FlakyExpressionCalculator.props(expr.getLeft(), Left), "left"); - context().actorOf(FlakyExpressionCalculator.props(expr.getRight(), Right), "right"); - } - } - - public FlakyExpressionCalculator(Expression expr, Position myPosition) { - this.expr = expr; - this.myPosition = myPosition; - - receive(ReceiveBuilder. - match(Result.class, r -> expected.contains(r.getPosition()), r -> { - expected.remove(r.getPosition()); - results.put(r.getPosition(), r.getValue()); - if (results.size() == 2) { - // Sometimes we fail to calculate - flakiness(); - Integer result = evaluate(expr, results.get(Left),results.get(Right)); - log().info("Evaluated expression {} to value {}", expr, result); - context().parent().tell(new Result(expr, result, myPosition), self()); - // Don't forget to stop the actor after it has nothing more to do - context().stop(self()); - } - }).match(Result.class, r -> { - throw new IllegalStateException("Expected results for positions " + - expected.stream().map(Object::toString).collect(Collectors.joining(", ")) + - " but got position " + r.getPosition()); - }).build()); - } - - private Integer evaluate(Expression expr, Integer left, Integer right) { - if (expr instanceof Add) { - return left + right; - } else if( expr instanceof Multiply) { - return left * right; - } else if (expr instanceof Divide) { - return left / right; - } else { - throw new IllegalStateException("Unknown expression type " + expr.getClass()); - } - } - - private void flakiness() throws FlakinessException { - if (ThreadLocalRandom.current().nextDouble() < 0.2) - throw new FlakinessException(); - } -} diff --git a/akka-samples/akka-sample-supervision-java-lambda/src/main/java/supervision/Main.java b/akka-samples/akka-sample-supervision-java-lambda/src/main/java/supervision/Main.java deleted file mode 100644 index 199062af25..0000000000 --- a/akka-samples/akka-sample-supervision-java-lambda/src/main/java/supervision/Main.java +++ /dev/null @@ -1,38 +0,0 @@ -package supervision; - -import akka.actor.ActorRef; -import akka.actor.Props; -import akka.actor.ActorSystem; -import akka.util.Timeout; -import scala.concurrent.Await; -import scala.concurrent.duration.Duration; -import scala.concurrent.duration.FiniteDuration; -import java.util.concurrent.TimeUnit; - -import static supervision.Expression.*; -import static akka.pattern.Patterns.ask; -import static akka.japi.Util.classTag; - -public class Main { - - public static void main(String[] args) throws Exception { - ActorSystem system = ActorSystem.create("calculator-system"); - ActorRef calculatorService = - system.actorOf(Props.create(ArithmeticService.class), "arithmetic-service"); - - // (3 + 5) / (2 * (1 + 1)) - Expression task = new Divide( - new Add(new Const(3), new Const(5)), - new Multiply( - new Const(2), - new Add(new Const(1), new Const(1)) - ) - ); - - FiniteDuration duration = Duration.create(1, TimeUnit.SECONDS); - Integer result = Await.result(ask(calculatorService, task, new Timeout(duration)).mapTo(classTag(Integer.class)), duration); - System.out.println("Got result: " + result); - - Await.ready(system.terminate(), Duration.Inf()); - } -} diff --git a/akka-samples/akka-sample-supervision-java-lambda/src/test/java/supervision/ArithmeticServiceTest.java b/akka-samples/akka-sample-supervision-java-lambda/src/test/java/supervision/ArithmeticServiceTest.java deleted file mode 100644 index af721e8682..0000000000 --- a/akka-samples/akka-sample-supervision-java-lambda/src/test/java/supervision/ArithmeticServiceTest.java +++ /dev/null @@ -1,97 +0,0 @@ -package supervision; - -import akka.actor.ActorRef; -import akka.actor.ActorSystem; -import akka.actor.Props; -import akka.actor.Status; -import akka.testkit.JavaTestKit; -import org.junit.AfterClass; -import org.junit.BeforeClass; -import org.junit.Test; - -import java.util.stream.IntStream; -import static supervision.Expression.*; - -public class ArithmeticServiceTest { - static ActorSystem system; - - @BeforeClass - public static void setup() { - system = ActorSystem.create("BuncherTest"); - } - - @AfterClass - public static void tearDown() { - JavaTestKit.shutdownActorSystem(system); - system = null; - } - - @Test - public void TheArithmeticServiceShouldCalculateConstantExpressionsProperly(){ - new JavaTestKit(system) {{ - final ActorRef service = - system.actorOf(Props.create(ArithmeticService.class)); - final ActorRef probe = getRef(); - IntStream.range(-2, 3).forEach(x -> { - service.tell(new Const(x), probe); - expectMsgEquals(x); - }); - }}; - } - - @Test - public void TheArithmeticServiceShouldCalculateAdditionProperly(){ - new JavaTestKit(system) {{ - final ActorRef service = - system.actorOf(Props.create(ArithmeticService.class)); - final ActorRef probe = getRef(); - IntStream.range(-2, 3).forEach(x -> - IntStream.range(-2, 3).forEach(y -> { - service.tell(new Add(new Const(x), new Const(y)), probe); - expectMsgEquals(x + y); - }) - ); - }}; - } - - @Test - public void TheArithmeticServiceShouldCalculateMultiplicationAndDivisionProperly(){ - new JavaTestKit(system) {{ - final ActorRef service = - system.actorOf(Props.create(ArithmeticService.class)); - final ActorRef probe = getRef(); - IntStream.range(-2, 3).forEach(x -> - IntStream.range(-2, 3).forEach(y -> { - service.tell(new Multiply(new Const(x), new Const(y)), probe); - expectMsgEquals(x * y); - }) - ); - - // Skip zero in the second parameter - IntStream.range(-2, 3).forEach(x -> - IntStream.of(-2, -1, 1, 2).forEach(y -> { - service.tell(new Divide(new Const(x), new Const(y)), probe); - expectMsgEquals(x / y); - }) - ); - }}; - } - - @Test - public void TheArithmeticServiceShouldSurviveIllegalExpressions(){ - new JavaTestKit(system) {{ - final ActorRef service = - system.actorOf(Props.create(ArithmeticService.class)); - final ActorRef probe = getRef(); - - service.tell(new Divide(new Const(1), new Const(0)), probe); - expectMsgClass(Status.Failure.class); - - service.tell(new Add(null, new Const(0)), probe); - expectMsgClass(Status.Failure.class); - - service.tell(new Add(new Const(1), new Const(0)), probe); - expectMsgEquals(1); - }}; - } -} diff --git a/akka-samples/akka-sample-supervision-java-lambda/tutorial/index.html b/akka-samples/akka-sample-supervision-java-lambda/tutorial/index.html deleted file mode 100644 index fe3eccf48a..0000000000 --- a/akka-samples/akka-sample-supervision-java-lambda/tutorial/index.html +++ /dev/null @@ -1,254 +0,0 @@ - - - Actor Supervision Java with Lambda Support - - -
-

Quick Overview

- -

Congratulations! You have just created your first fault-resilient Akka - application, nice job!

- -

Let's start with an overview and discuss the problem we want - to solve. This tutorial application demonstrates the use of Akka - supervision hierarchies to implement reliable systems. This particular - example demonstrates a calculator service that calculates arithmetic - expressions. We will visit each of the components shortly, but you might - want to take a quick look at the components before we move on.

- -
    -
  • Expression.java - contains our "domain model", a very simple representation of - arithmetic expressions -
  • -
  • ArithmeticService.java is the entry point - for our calculation service -
  • -
  • FlakyExpressionCalculator.java is our - heavy-lifter, a worker actor that can evaluate an expression - concurrently -
  • -
  • Main.java - example code that starts up the calculator service and sends a few - jobs to it -
  • -
-
-
-

The Expression Model

- -

Our service deals with arithmetic expressions on integers involving - addition, multiplication and (integer) division. In - Expression.java - you can see a very simple model of these kind of expressions.

- -

Any arithmetic expression is a descendant of Expression, and - have a left and right side (Const is the only exception) - which is also an Expression.

- -

For example, the expression (3 + 5) / (2 * (1 + 1)) could be constructed - as:

- - -
-new Divide(
-  new Add(
-    new Const(3),
-    new Const(5)
-  ),        // (3 + 5)
-  new Multiply(
-    new Const(2),
-    new Add(
-      new Const(1),
-      new Const(1)
-    )       // (1 + 1)
-  )         // (2 * (1 + 1))
-);          // (3 + 5) / (2 * (1 + 1))
-
- -

Apart from the encoding of an expression and some pretty printing, our - model does not provide other services, so lets move on, and see how we - can calculate the result of such expressions.

-
-
-

Arithmetic Service

- -

Our entry point is the ArithmeticService actor that accepts arithmetic - expressions, calculates them and returns the result to the original - sender of the Expression.This - logic is implemented in the receive block. The actor - handles Expression messages and starts a worker for them, - carefully recording which worker belongs to which requester in the - pendingWorkers map. -

- -

Who calculates - the expression? As you see, on the reception of an - Expression message we create a FlakyExpressionCalculator - actor and pass the expression as a parameter to its Props. - What happens here is that we delegate the calculation work to a worker - actor because the work can be "dangerous". After the worker - finishes its job, it replies to its parent (in this - case ArithmeticService) with a Result - message. At this point the top level service actor looks up which - actor it needs to send the final result to, and forwards it the value - of the computation.

-
- -
-

The Dangers of Arithmetic

- -

At first, it might feel strange that we don't calculate the result - directly but we delegate it to a new actor. The reason for that, is that - we want to treat the calculation as a dangerous task and isolate its - execution in a different actor to keep the top level service safe.

- -

In our example we will see two kinds of failures

-
    -
  • FlakinessException is a dummy exception that we throw - randomly to simulate transient failures. We will assume that - flakiness is temporary, and retrying the calculation is enough to - eventually get rid of the failure. -
  • -
  • Fatal failures, like ArithmeticException that will not - go away no matter how many times we retry the task. Division by zero - is a good example, since it indicates that the expression is - invalid, and no amount of attempts to calculate it again will fix - it. -
  • -
- -

To handle these kind of failure modes differently we customized the - supervisor strategy of ArithmeticService. Our strategy here - is to restart the child when a recoverable error is detected (in our - case the dummy FlakinessException), but when arithmetic - errors happen — like division by zero — we have no hope to recover - and therefore we stop the worker. In addition, - we have to notify the original requester of the calculation job - about the failure.

- -

We used OneForOneStrategy, since we only want to act on the - failing child, not on all of our children at the same time.

- -

We set loggingEnabled to false, since we wanted to use our - custom logging instead of the built-in reporting.

- - - -
- -
- -

The Joy of Calculation

- -

We have now seen our Expression model, our fault modes - and how we deal with them at the top level, delegating the dangerous - work to child workers to isolate the failure, and setting - Stop or Restart directives depending on the - nature of the failure (fatal or transient). Now it's time to - calculate and visit FlakyExpressionCalculator.java! -

- -

Let's review first our evaluation strategy. When we are facing an - expression like ((4 * 4) / (3 + 1)) we might be tempted to calculate (4 - * 4) first, then (3 + 1), and then the final division. We can do better: - Let's calculate the two sides of the division in parallel!

- -

To achieve this, our worker delegates the calculation of the left and - right side of the expression it has been given to two child workers of - the same type (except in the case of constant, where it just sends its - value as Result to its parent. - This logic is in preStart() - since this is the code that will be executed when an actor starts (and - during restarts if the postRestart() is not - overridden).

- -

Since any of the sides of the original expression can finish before the - other, we have to indicate somehow which side has been calculated, that - is why we pass a Position as an argument to workers which - they will put in their Result which they send after the - calculation finished successfully.

- -
-
- - -

Failing Calculations

- -

As you might have observed, we added a method called - flakiness() that sometimes just misbehaves - (throws a FlakinessException). - This simulates a transient failure. Let's see how our - FlakyExpressionCalculator deals with failure situations.

- -

A supervisor strategy is applied to the children of an actor. Since our - children are actually workers for calculating the left and right side of - our subexpression, we have to think what different failures mean for - us.

- -

If we encounter a FlakinessException it indicates that one - of our workers - just made a hiccup and failed to calculate the answer. Since we know - this failure is recoverable, we just restart the responsible worker.

- -

In case of fatal failures we cannot really do anything ourselves. First - of all, it indicates that the expression is invalid so restart does not - help, second, we are not necessarily the top level worker for the - expression. When an unknown failure is encountered it - is escalated to the parent. The parent of this actor is either another - FlakyExpressionCalculator or the - ArithmeticService . Since the calculators all escalate, no - matter how deep the failure happened, the ArithmeticService - will decide on the fate of the job (in our case, stop it).

-
-
-

When to Split Work? A Small Detour.

- -

In our example we split expressions recursively and calculated the left - and right sides of each of the expressions. The question naturally - arises: do we gain anything here regarding performance?

- -

In this example more probably not. There is an additional overhead of - splitting up tasks and collecting results, and this case the actual - subtasks consist of simple arithmetic operations which are very fast. - To really gain in performance in practice, the actual subtasks have to - be more heavyweight than this — but the pattern will be the - same.

- -
-
-

Where to go from here?

- -

After getting comfortable with the code, you can test your - understanding by trying to solve the following small exercises:

-
    -
  • Add flakiness() to various places in the calculator and - see what happens -
  • -
  • Try devising more calculation intensive nested jobs instead of - arithmetic expressions (for example transformations of a text - document) where parallelism improves performance -
  • -
- -

You should also visit

- -
- - - - diff --git a/project/AkkaBuild.scala b/project/AkkaBuild.scala index 75cf696d73..5fa9599b55 100644 --- a/project/AkkaBuild.scala +++ b/project/AkkaBuild.scala @@ -24,8 +24,6 @@ object AkkaBuild extends Build { // Load system properties from a file to make configuration from Jenkins easier loadSystemProperties("project/akka-build.properties") - override def buildLoaders = BuildLoader.transform(Sample.buildTransformer) :: Nil - val enableMiMa = true val parallelExecutionByDefault = false // TODO: enable this once we're sure it does not break things @@ -83,7 +81,6 @@ object AkkaBuild extends Build { id = "akka-scala-nightly", base = file("akka-scala-nightly"), // remove dependencies that we have to build ourselves (Scala STM) - // samples don't work with dbuild right now aggregate = aggregatedProjects diff List[ProjectReference](agent, docs) ).disablePlugins(ValidatePullRequest, MimaPlugin) @@ -259,12 +256,13 @@ object AkkaBuild extends Build { id = "akka-docs", base = file("akka-docs"), dependencies = Seq( - actor, + actor, cluster, clusterMetrics, slf4j, agent, camel, osgi, persistenceTck, persistenceQuery, distributedData, stream, + clusterTools % "compile;test->test", testkit % "compile;test->test", - remote % "compile;test->test", cluster, clusterMetrics, slf4j, agent, camel, osgi, - persistence % "compile;provided->provided;test->test", persistenceTck, persistenceQuery, - typed % "compile;test->test", distributedData, - stream, streamTestkit % "compile;test->test" + remote % "compile;test->test", + persistence % "compile;provided->provided;test->test", + typed % "compile;test->test", + streamTestkit % "compile;test->test" ) ) @@ -274,70 +272,6 @@ object AkkaBuild extends Build { dependencies = Seq(remote, remoteTests % "test->test", cluster, clusterTools, persistence % "compile;test->provided") ).configs(MultiJvm) - lazy val samplesSettings = parentSettings - - lazy val samples = Project( - id = "akka-samples", - base = file("akka-samples"), - // FIXME osgiDiningHakkersSampleMavenTest temporarily removed from aggregate due to #16703 - aggregate = if (!Sample.CliOptions.aggregateSamples) Nil else - Seq(sampleCamelJava, sampleCamelScala, sampleClusterJava, sampleClusterScala, sampleFsmScala, sampleFsmJavaLambda, - sampleMainJava, sampleMainScala, sampleMainJavaLambda, sampleMultiNodeScala, - samplePersistenceJava, samplePersistenceScala, samplePersistenceJavaLambda, - sampleRemoteJava, sampleRemoteScala, sampleSupervisionJavaLambda, - sampleDistributedDataScala, sampleDistributedDataJava) - ) - .settings(samplesSettings: _*) - .disablePlugins(MimaPlugin) - - lazy val sampleCamelJava = Sample.project("akka-sample-camel-java") - lazy val sampleCamelScala = Sample.project("akka-sample-camel-scala") - - lazy val sampleClusterJava = Sample.project("akka-sample-cluster-java") - lazy val sampleClusterScala = Sample.project("akka-sample-cluster-scala") - - lazy val sampleFsmScala = Sample.project("akka-sample-fsm-scala") - lazy val sampleFsmJavaLambda = Sample.project("akka-sample-fsm-java-lambda") - - lazy val sampleMainJava = Sample.project("akka-sample-main-java") - lazy val sampleMainScala = Sample.project("akka-sample-main-scala") - lazy val sampleMainJavaLambda = Sample.project("akka-sample-main-java-lambda") - - lazy val sampleMultiNodeScala = Sample.project("akka-sample-multi-node-scala") - - lazy val samplePersistenceJava = Sample.project("akka-sample-persistence-java") - lazy val samplePersistenceScala = Sample.project("akka-sample-persistence-scala") - lazy val samplePersistenceJavaLambda = Sample.project("akka-sample-persistence-java-lambda") - - lazy val sampleRemoteJava = Sample.project("akka-sample-remote-java") - lazy val sampleRemoteScala = Sample.project("akka-sample-remote-scala") - - lazy val sampleSupervisionJavaLambda = Sample.project("akka-sample-supervision-java-lambda") - - lazy val sampleDistributedDataScala = Sample.project("akka-sample-distributed-data-scala") - lazy val sampleDistributedDataJava = Sample.project("akka-sample-distributed-data-java") - - lazy val osgiDiningHakkersSampleMavenTest = Project( - id = "akka-sample-osgi-dining-hakkers-maven-test", - base = file("akka-samples/akka-sample-osgi-dining-hakkers-maven-test") - ) - .settings( - publishArtifact := false, - // force publication of artifacts to local maven repo, so latest versions can be used when running maven tests - compile in Compile <<= - (publishM2 in actor, publishM2 in testkit, publishM2 in remote, publishM2 in cluster, publishM2 in osgi, - publishM2 in slf4j, publishM2 in persistence, publishM2 in stream, publishM2 in protobuf, compile in Compile) map - ((_, _, _, _, _, _, _, _, _, c) => c), - test in Test ~= { x => { - def executeMvnCommands(failureMessage: String, commands: String*) = { - if ({List("sh", "-c", commands.mkString("cd akka-samples/akka-sample-osgi-dining-hakkers; mvn ", " ", "")) !} != 0) - throw new Exception(failureMessage) - } - executeMvnCommands("Osgi sample Dining hakkers test failed", "clean", "install") - }} - ) - .settings(dontPublishSettings: _*) - val dontPublishSettings = Seq( publishSigned := (), publish := (), diff --git a/project/Dependencies.scala b/project/Dependencies.scala index cb9e662e4d..4ec33c3da6 100644 --- a/project/Dependencies.scala +++ b/project/Dependencies.scala @@ -49,7 +49,7 @@ object Dependencies { val slf4jApi = "org.slf4j" % "slf4j-api" % slf4jVersion // MIT - // mirrored in OSGi sample + // mirrored in OSGi sample https://github.com/akka/akka-samples/tree/master/akka-sample-osgi-dining-hakkers val osgiCore = "org.osgi" % "org.osgi.core" % "4.3.1" // ApacheV2 val osgiCompendium= "org.osgi" % "org.osgi.compendium" % "4.3.1" // ApacheV2 diff --git a/project/Doc.scala b/project/Doc.scala index 26dd759d34..f283b9858d 100644 --- a/project/Doc.scala +++ b/project/Doc.scala @@ -120,7 +120,7 @@ object UnidocRoot extends AutoPlugin { override lazy val projectSettings = CliOptions.genjavadocEnabled.ifTrue(scalaJavaUnidocSettings).getOrElse(scalaUnidocSettings) ++ - settings(Seq(AkkaBuild.samples), Seq(AkkaBuild.remoteTests, AkkaBuild.benchJmh, AkkaBuild.protobuf, AkkaBuild.osgiDiningHakkersSampleMavenTest, AkkaBuild.akkaScalaNightly)) + settings(Seq(), Seq(AkkaBuild.remoteTests, AkkaBuild.benchJmh, AkkaBuild.protobuf, AkkaBuild.akkaScalaNightly)) } /** diff --git a/project/Sample.scala b/project/Sample.scala deleted file mode 100644 index 3df2e85e3f..0000000000 --- a/project/Sample.scala +++ /dev/null @@ -1,92 +0,0 @@ -/** - * Copyright (C) 2016-2017 Lightbend Inc. - */ -package akka - -import sbt._ -import sbt.Keys._ - -object Sample { - - object CliOptions { - /** - * Aggregated sample builds are transformed by swapping library dependencies to project ones. - * This does work play well with dbuild and breaks scala community build. Therefore it was made - * optional. - * - * Default: true - */ - val aggregateSamples = sys.props.getOrElse("akka.build.aggregateSamples", "false").toBoolean - } - - final val akkaOrganization = "com.typesafe.akka" - - def buildTransformer = (ti: BuildLoader.TransformInfo) => ti.base.name match { - case s if s.startsWith("akka-sample") => - ti.unit.copy( - loadedDefinitions = ti.unit.definitions.copy( - projects = libraryToProjectDeps(ti.unit.definitions.projects))) - case _ => ti.unit - } - - def project(name: String) = - ProjectRef(file(s"akka-samples/$name"), name) - - private def libraryToProjectDeps(projects: Seq[Project]) = - projects.map(addProjectDependencies andThen excludeLibraryDependencies andThen enableAutoPlugins) - - private val addProjectDependencies = (project: Project) => - project.settings( - buildDependencies := { - val projectDependencies = libraryDependencies.value.collect { - case module if module.organization == akkaOrganization => ProjectRef(file("").toURI, module.name) - } - val dependencies = buildDependencies.value - val classpathWithProjectDependencies = dependencies.classpath.map { - case (proj, deps) if proj.project == project.id => - // add project dependency for every akka library dependency - (proj, deps ++ projectDependencies.map(ResolvedClasspathDependency(_, None))) - case (project, deps) => (project, deps) - } - BuildDependencies(classpathWithProjectDependencies, dependencies.aggregate) - } - ) - - private val excludeLibraryDependencies = (project: Project) => - project.settings( - libraryDependencies := libraryDependencies.value.map { - case module if module.organization == akkaOrganization => - /** - * Exclude self, so it is still possible to know what project dependencies to add. - * This leaves all transitive dependencies (such as typesafe-config library). - * However it means that if a sample uses typesafe-config library it must have a - * library dependency which has a direct transitive dependency to typesafe-config. - */ - module.excludeAll(ExclusionRule(organization=module.organization)) - case module => module - } - ) - - /** - * AutoPlugins are not enabled for externally loaded projects. - * This adds required settings from the AutoPlugins. - * - * Every AutoPlugin that is also meant to be applied to the - * transformed sample projects should have its settings added here. - */ - private val enableAutoPlugins = (project: Project) => - project.settings(( - Publish.projectSettings ++ - ValidatePullRequest.projectSettings - ): _*).configs(ValidatePullRequest.ValidatePR) - - private implicit class RichLoadedDefinitions(ld: LoadedDefinitions) { - def copy(projects: Seq[Project]) = - new LoadedDefinitions(ld.base, ld.target, ld.loader, ld.builds, projects, ld.buildNames) - } - - private implicit class RichBuildUnit(bu: BuildUnit) { - def copy(loadedDefinitions: LoadedDefinitions) = - new BuildUnit(bu.uri, bu.localBase, loadedDefinitions, bu.plugins) - } -} diff --git a/project/SphinxDoc.scala b/project/SphinxDoc.scala index 7cbe840bce..0c5fb91e7b 100644 --- a/project/SphinxDoc.scala +++ b/project/SphinxDoc.scala @@ -68,7 +68,8 @@ object SphinxDoc { }), "sigarVersion" -> Dependencies.Compile.sigar.revision, "sigarLoaderVersion" -> Dependencies.Compile.Provided.sigarLoader.revision, - "github" -> GitHub.url(v) + "github" -> GitHub.url(v), + "samples" -> "http://github.com/akka/akka-samples" ) }, preprocess <<= (sourceDirectory, target in preprocess, cacheDirectory, preprocessExts, preprocessVars, streams) map {