diff --git a/.gitignore b/.gitignore index 0863ff8a05..b92e37e421 100755 --- a/.gitignore +++ b/.gitignore @@ -71,3 +71,5 @@ tm*.lck tm*.log tm.out worker*.log +*-shim.sbt + diff --git a/akka-docs/rst/additional/code/osgi/Activator.scala b/akka-docs/rst/additional/code/osgi/Activator.scala index 4f432452c3..8ae0c7a2cc 100644 --- a/akka-docs/rst/additional/code/osgi/Activator.scala +++ b/akka-docs/rst/additional/code/osgi/Activator.scala @@ -3,7 +3,7 @@ package docs.osgi case object SomeMessage class SomeActor extends akka.actor.Actor { - def receive = { case SomeMessage ⇒ } + def receive = { case SomeMessage => } } //#Activator diff --git a/akka-docs/rst/common/code/docs/circuitbreaker/CircuitBreakerDocSpec.scala b/akka-docs/rst/common/code/docs/circuitbreaker/CircuitBreakerDocSpec.scala index 994a3e3619..bb1febe795 100644 --- a/akka-docs/rst/common/code/docs/circuitbreaker/CircuitBreakerDocSpec.scala +++ b/akka-docs/rst/common/code/docs/circuitbreaker/CircuitBreakerDocSpec.scala @@ -35,9 +35,9 @@ class DangerousActor extends Actor with ActorLogging { def dangerousCall: String = "This really isn't that dangerous of a call after all" def receive = { - case "is my middle name" ⇒ + case "is my middle name" => breaker.withCircuitBreaker(Future(dangerousCall)) pipeTo sender - case "block for me" ⇒ + case "block for me" => sender ! breaker.withSyncCircuitBreaker(dangerousCall) } //#circuit-breaker-usage diff --git a/akka-docs/rst/conf.py b/akka-docs/rst/conf.py index bd57106031..78a55a985c 100644 --- a/akka-docs/rst/conf.py +++ b/akka-docs/rst/conf.py @@ -65,7 +65,7 @@ epub_cover = ("../_sphinx/static/akka.png", "") def setup(app): from sphinx.util.texescape import tex_replacements - tex_replacements.append((u'⇒', ur'\(\Rightarrow\)')) + tex_replacements.append((u'=>', ur'\(\Rightarrow\)')) latex_paper_size = 'a4' latex_font_size = '10pt' diff --git a/akka-docs/rst/java/camel.rst b/akka-docs/rst/java/camel.rst index 28fe70e6c2..7be0c5aad4 100644 --- a/akka-docs/rst/java/camel.rst +++ b/akka-docs/rst/java/camel.rst @@ -312,7 +312,7 @@ to do other work) and resume processing when the response is ready. This is currently the case for a `subset of components`_ such as the `Jetty component`_. All other Camel components can still be used, of course, but they will cause allocation of a thread for the duration of an in-out message exchange. There's -also a :ref:`camel-async-example-java` that implements both, an asynchronous +also :ref:`camel-examples-java` that implements both, an asynchronous consumer and an asynchronous producer, with the jetty component. If the used Camel component is blocking it might be necessary to use a separate @@ -469,116 +469,18 @@ __ https://svn.apache.org/repos/asf/camel/tags/camel-2.8.0/camel-core/src/main/j Examples ======== -.. _camel-async-example-java: +The `Typesafe Activator `_ +tutorial named `Akka Camel Samples with Java `_ +contains 3 samples: -Asynchronous routing and transformation example ------------------------------------------------ + * Asynchronous routing and transformation - This example demonstrates how to implement consumer and + producer actors that support :ref:`camel-asynchronous-routing-java` with their Camel endpoints. + + * Custom Camel route - Demonstrates the combined usage of a ``Producer`` and a + ``Consumer`` actor as well as the inclusion of a custom Camel route. -This example demonstrates how to implement consumer and producer actors that -support :ref:`camel-asynchronous-routing-java` with their Camel endpoints. The sample -application transforms the content of the Akka homepage, http://akka.io, by -replacing every occurrence of *Akka* with *AKKA*. To run this example, add -a Boot class that starts the actors. After starting -the :ref:`microkernel-java`, direct the browser to http://localhost:8875 and the -transformed Akka homepage should be displayed. Please note that this example -will probably not work if you're behind an HTTP proxy. - -The following figure gives an overview how the example actors interact with -external systems and with each other. A browser sends a GET request to -http://localhost:8875 which is the published endpoint of the ``HttpConsumer`` -actor. The ``HttpConsumer`` actor forwards the requests to the ``HttpProducer`` -actor which retrieves the Akka homepage from http://akka.io. The retrieved HTML -is then forwarded to the ``HttpTransformer`` actor which replaces all occurrences -of *Akka* with *AKKA*. The transformation result is sent back the HttpConsumer -which finally returns it to the browser. - -.. image:: ../images/camel-async-interact.png - -Implementing the example actor classes and wiring them together is rather easy -as shown in the following snippet. - -.. includecode:: code/docs/camel/sample/http/HttpConsumer.java#HttpExample -.. includecode:: code/docs/camel/sample/http/HttpProducer.java#HttpExample -.. includecode:: code/docs/camel/sample/http/HttpTransformer.java#HttpExample -.. includecode:: code/docs/camel/sample/http/HttpSample.java#HttpExample - -The `jetty endpoints`_ of HttpConsumer and HttpProducer support asynchronous -in-out message exchanges and do not allocate threads for the full duration of -the exchange. This is achieved by using `Jetty continuations`_ on the -consumer-side and by using `Jetty's asynchronous HTTP client`_ on the producer -side. The following high-level sequence diagram illustrates that. - -.. _jetty endpoints: http://camel.apache.org/jetty.html -.. _Jetty continuations: http://wiki.eclipse.org/Jetty/Feature/Continuations -.. _Jetty's asynchronous HTTP client: http://wiki.eclipse.org/Jetty/Tutorial/HttpClient - -.. image:: ../images/camel-async-sequence.png - -Custom Camel route example --------------------------- - -This section also demonstrates the combined usage of a ``Producer`` and a -``Consumer`` actor as well as the inclusion of a custom Camel route. The -following figure gives an overview. - -.. image:: ../images/camel-custom-route.png - -* A consumer actor receives a message from an HTTP client - -* It forwards the message to another actor that transforms the message (encloses - the original message into hyphens) - -* The transformer actor forwards the transformed message to a producer actor - -* The producer actor sends the message to a custom Camel route beginning at the - ``direct:welcome`` endpoint - -* A processor (transformer) in the custom Camel route prepends "Welcome" to the - original message and creates a result message - -* The producer actor sends the result back to the consumer actor which returns - it to the HTTP client - - -The consumer, transformer and -producer actor implementations are as follows. - -.. includecode:: code/docs/camel/sample/route/Consumer3.java#CustomRouteExample -.. includecode:: code/docs/camel/sample/route/Transformer.java#CustomRouteExample -.. includecode:: code/docs/camel/sample/route/Producer1.java#CustomRouteExample -.. includecode:: code/docs/camel/sample/route/CustomRouteSample.java#CustomRouteExample - -The producer actor knows where to reply the message to because the consumer and -transformer actors have forwarded the original sender reference as well. The -application configuration and the route starting from direct:welcome are done in the code above. - -To run the example, add the lines shown in the example to a Boot class and the start the :ref:`microkernel-java` and POST a message to -``http://localhost:8877/camel/welcome``. - -.. code-block:: none - - curl -H "Content-Type: text/plain" -d "Anke" http://localhost:8877/camel/welcome - -The response should be: - -.. code-block:: none - - Welcome - Anke - - -Quartz Scheduler Example ------------------------- - -Here is an example showing how simple is to implement a cron-style scheduler by -using the Camel Quartz component in Akka. - -The following example creates a "timer" actor which fires a message every 2 -seconds: - -.. includecode:: code/docs/camel/sample/quartz/MyQuartzActor.java#QuartzExample -.. includecode:: code/docs/camel/sample/quartz/QuartzSample.java#QuartzExample - -For more information about the Camel Quartz component, see here: -http://camel.apache.org/quartz.html + * Quartz Scheduler Example - Showing how simple is to implement a cron-style scheduler by + using the Camel Quartz component Additional Resources ==================== diff --git a/akka-docs/rst/java/cluster-usage.rst b/akka-docs/rst/java/cluster-usage.rst index 8dd2db1661..4e003aa082 100644 --- a/akka-docs/rst/java/cluster-usage.rst +++ b/akka-docs/rst/java/cluster-usage.rst @@ -23,15 +23,12 @@ The Akka cluster is a separate jar file. Make sure that you have the following d A Simple Cluster Example ^^^^^^^^^^^^^^^^^^^^^^^^ -The following small program together with its configuration starts an ``ActorSystem`` -with the Cluster enabled. It joins the cluster and logs some membership events. +The following configuration enables the ``Cluster`` extension to be used. +It joins the cluster and an actor subscribes to cluster membership events and logs them. -Try it out: +The ``application.conf`` configuration looks like this: -1. Add the following ``application.conf`` in your project, place it in ``src/main/resources``: - - -.. includecode:: ../../../akka-samples/akka-sample-cluster/src/main/resources/application.conf#cluster +.. includecode:: ../../../akka-samples/akka-sample-cluster-java/src/main/resources/application.conf To enable cluster capabilities in your Akka project you should, at a minimum, add the :ref:`remoting-java` settings, but with ``akka.cluster.ClusterActorRefProvider``. @@ -42,49 +39,17 @@ The seed nodes are configured contact points for initial, automatic, join of the Note that if you are going to start the nodes on different machines you need to specify the ip-addresses or host names of the machines in ``application.conf`` instead of ``127.0.0.1`` -2. Add the following main program to your project, place it in ``src/main/java``: +An actor that uses the cluster extension may look like this: -.. literalinclude:: ../../../akka-samples/akka-sample-cluster/src/main/java/sample/cluster/simple/japi/SimpleClusterApp.java +.. literalinclude:: ../../../akka-samples/akka-sample-cluster-java/src/main/java/sample/cluster/simple/SimpleClusterListener.java :language: java -3. Start the first seed node. Open a terminal window and run (one line):: +The actor registers itself as subscriber of certain cluster events. It gets notified with a snapshot event, ``CurrentClusterState`` +that holds full state information of the cluster. After that it receives events for changes that happen in the cluster. - mvn exec:java -Dexec.mainClass="sample.cluster.simple.japi.SimpleClusterApp" \ - -Dexec.args="2551" - -2551 corresponds to the port of the first seed-nodes element in the configuration. -In the log output you see that the cluster node has been started and changed status to 'Up'. - -4. Start the second seed node. Open another terminal window and run:: - - mvn exec:java -Dexec.mainClass="sample.cluster.simple.japi.SimpleClusterApp" \ - -Dexec.args="2552" - - -2552 corresponds to the port of the second seed-nodes element in the configuration. -In the log output you see that the cluster node has been started and joins the other seed node -and becomes a member of the cluster. Its status changed to 'Up'. - -Switch over to the first terminal window and see in the log output that the member joined. - -5. Start another node. Open a maven session in yet another terminal window and run:: - - mvn exec:java -Dexec.mainClass="sample.cluster.simple.japi.SimpleClusterApp" - -Now you don't need to specify the port number, and it will use a random available port. -It joins one of the configured seed nodes. Look at the log output in the different terminal -windows. - -Start even more nodes in the same way, if you like. - -6. Shut down one of the nodes by pressing 'ctrl-c' in one of the terminal windows. -The other nodes will detect the failure after a while, which you can see in the log -output in the other terminals. - -Look at the source code of the program again. What it does is to create an actor -and register it as subscriber of certain cluster events. It gets notified with -an snapshot event, ``CurrentClusterState`` that holds full state information of -the cluster. After that it receives events for changes that happen in the cluster. +The easiest way to run this example yourself is to download `Typesafe Activator `_ +and open the tutorial named `Akka Cluster Samples with Java `_. +It contains instructions of how to run the SimpleClusterApp. Joining to Seed Nodes ^^^^^^^^^^^^^^^^^^^^^ @@ -237,17 +202,13 @@ backend workers, which performs the transformation job, and sends the result bac the original client. New backend nodes, as well as new frontend nodes, can be added or removed to the cluster dynamically. -In this example the following imports are used: - -.. includecode:: ../../../akka-samples/akka-sample-cluster/src/main/java/sample/cluster/transformation/japi/TransformationBackend.java#imports - Messages: -.. includecode:: ../../../akka-samples/akka-sample-cluster/src/main/java/sample/cluster/transformation/japi/TransformationMessages.java#messages +.. includecode:: ../../../akka-samples/akka-sample-cluster-java/src/main/java/sample/cluster/transformation/TransformationMessages.java#messages The backend worker that performs the transformation job: -.. includecode:: ../../../akka-samples/akka-sample-cluster/src/main/java/sample/cluster/transformation/japi/TransformationBackend.java#backend +.. includecode:: ../../../akka-samples/akka-sample-cluster-java/src/main/java/sample/cluster/transformation/TransformationBackend.java#backend Note that the ``TransformationBackend`` actor subscribes to cluster events to detect new, potential, frontend nodes, and send them a registration message so that they know @@ -255,36 +216,17 @@ that they can use the backend worker. The frontend that receives user jobs and delegates to one of the registered backend workers: -.. includecode:: ../../../akka-samples/akka-sample-cluster/src/main/java/sample/cluster/transformation/japi/TransformationFrontend.java#frontend +.. includecode:: ../../../akka-samples/akka-sample-cluster-java/src/main/java/sample/cluster/transformation/TransformationFrontend.java#frontend Note that the ``TransformationFrontend`` actor watch the registered backend -to be able to remove it from its list of availble backend workers. +to be able to remove it from its list of available backend workers. Death watch uses the cluster failure detector for nodes in the cluster, i.e. it detects network failures and JVM crashes, in addition to graceful termination of watched actor. -This example is included in ``akka-samples/akka-sample-cluster`` and you can try it by copying the -`source <@github@/akka-samples/akka-sample-cluster>`_ to your -maven project, defined as in :ref:`cluster_simple_example_java`. -Run it by starting nodes in different terminal windows. For example, starting 2 -frontend nodes and 3 backend nodes:: - - mvn exec:java \ - -Dexec.mainClass="sample.cluster.transformation.japi.TransformationFrontendMain" \ - -Dexec.args="2551" - - mvn exec:java \ - -Dexec.mainClass="sample.cluster.transformation.japi.TransformationBackendMain" \ - -Dexec.args="2552" - - mvn exec:java \ - -Dexec.mainClass="sample.cluster.transformation.japi.TransformationBackendMain" - - mvn exec:java \ - -Dexec.mainClass="sample.cluster.transformation.japi.TransformationBackendMain" - - mvn exec:java \ - -Dexec.mainClass="sample.cluster.transformation.japi.TransformationFrontendMain" +The `Typesafe Activator `_ tutorial named +`Akka Cluster Samples with Java `_. +contains the full source code and instructions of how to run the **Worker Dial-in Example**. Node Roles ^^^^^^^^^^ @@ -307,18 +249,18 @@ members have joined, and the cluster has reached a certain size. With a configuration option you can define required number of members before the leader changes member status of 'Joining' members to 'Up'. -.. includecode:: ../../../akka-samples/akka-sample-cluster/src/main/resources/factorial.conf#min-nr-of-members +.. includecode:: ../../../akka-samples/akka-sample-cluster-java/src/main/resources/factorial.conf#min-nr-of-members In a similar way you can define required number of members of a certain role before the leader changes member status of 'Joining' members to 'Up'. -.. includecode:: ../../../akka-samples/akka-sample-cluster/src/main/resources/factorial.conf#role-min-nr-of-members +.. includecode:: ../../../akka-samples/akka-sample-cluster-java/src/main/resources/factorial.conf#role-min-nr-of-members You can start the actors in a ``registerOnMemberUp`` callback, which will be invoked when the current member status is changed tp 'Up', i.e. the cluster has at least the defined number of members. -.. includecode:: ../../../akka-samples/akka-sample-cluster/src/main/java/sample/cluster/factorial/japi/FactorialFrontendMain.java#registerOnUp +.. includecode:: ../../../akka-samples/akka-sample-cluster-java/src/main/java/sample/cluster/factorial/FactorialFrontendMain.java#registerOnUp This callback can be used for other things than starting actors. @@ -448,7 +390,7 @@ Router with Group of Routees When using a ``Group`` you must start the routee actors on the cluster member nodes. That is not done by the router. The configuration for a group looks like this: -.. includecode:: ../../../akka-samples/akka-sample-cluster/src/multi-jvm/scala/sample/cluster/stats/StatsSampleSpec.scala#router-lookup-config +.. includecode:: ../../../akka-samples/akka-sample-cluster-java/src/multi-jvm/scala/sample/cluster/stats/StatsSampleSpec.scala#router-lookup-config .. note:: @@ -466,7 +408,7 @@ to a high value will result in new routees added to the router when nodes join t The same type of router could also have been defined in code: -.. includecode:: ../../../akka-samples/akka-sample-cluster/src/main/java/sample/cluster/stats/japi/StatsService.java#router-lookup-in-code +.. includecode:: ../../../akka-samples/akka-sample-cluster-java/src/main/java/sample/cluster/stats/Extra.java#router-lookup-in-code See :ref:`cluster_configuration_java` section for further descriptions of the settings. @@ -482,23 +424,19 @@ to count number of characters in each word to a separate worker, a routee of a r The character count for each word is sent back to an aggregator that calculates the average number of characters per word when all results have been collected. -In this example we use the following imports: - -.. includecode:: ../../../akka-samples/akka-sample-cluster/src/main/java/sample/cluster/stats/japi/StatsService.java#imports - Messages: -.. includecode:: ../../../akka-samples/akka-sample-cluster/src/main/java/sample/cluster/stats/japi/StatsMessages.java#messages +.. includecode:: ../../../akka-samples/akka-sample-cluster-java/src/main/java/sample/cluster/stats/StatsMessages.java#messages The worker that counts number of characters in each word: -.. includecode:: ../../../akka-samples/akka-sample-cluster/src/main/java/sample/cluster/stats/japi/StatsWorker.java#worker +.. includecode:: ../../../akka-samples/akka-sample-cluster-java/src/main/java/sample/cluster/stats/StatsWorker.java#worker The service that receives text from users and splits it up into words, delegates to workers and aggregates: -.. includecode:: ../../../akka-samples/akka-sample-cluster/src/main/java/sample/cluster/stats/japi/StatsService.java#service +.. includecode:: ../../../akka-samples/akka-sample-cluster-java/src/main/java/sample/cluster/stats/StatsService.java#service -.. includecode:: ../../../akka-samples/akka-sample-cluster/src/main/java/sample/cluster/stats/japi/StatsAggregator.java#aggregator +.. includecode:: ../../../akka-samples/akka-sample-cluster-java/src/main/java/sample/cluster/stats/StatsAggregator.java#aggregator Note, nothing cluster specific so far, just plain actors. @@ -506,31 +444,14 @@ Note, nothing cluster specific so far, just plain actors. All nodes start ``StatsService`` and ``StatsWorker`` actors. Remember, routees are the workers in this case. The router is configured with ``routees.paths``: -.. includecode:: ../../../akka-samples/akka-sample-cluster/src/main/resources/application.conf#config-router-lookup +.. includecode:: ../../../akka-samples/akka-sample-cluster-java/src/main/resources/stats1.conf#config-router-lookup This means that user requests can be sent to ``StatsService`` on any node and it will use -``StatsWorker`` on all nodes. There can only be one worker per node, but that worker could easily -fan out to local children if more parallelism is needed. +``StatsWorker`` on all nodes. -This example is included in ``akka-samples/akka-sample-cluster`` and you can try it by copying the -`source <@github@/akka-samples/akka-sample-cluster>`_ to your -maven project, defined as in :ref:`cluster_simple_example_java`. -Run it by starting nodes in different terminal windows. For example, starting 3 -service nodes and 1 client:: - - mvn exec:java \ - -Dexec.mainClass="sample.cluster.stats.japi.StatsSampleMain" \ - -Dexec.args="2551" - - mvn exec:java \ - -Dexec.mainClass="sample.cluster.stats.japi.StatsSampleMain" \ - -Dexec.args="2552" - - mvn exec:java \ - -Dexec.mainClass="sample.cluster.stats.japi.StatsSampleMain" - - mvn exec:java \ - -Dexec.mainClass="sample.cluster.stats.japi.StatsSampleMain" +The `Typesafe Activator `_ tutorial named +`Akka Cluster Samples with Java `_. +contains the full source code and instructions of how to run the **Router Example with Group of Routees**. Router with Pool of Remote Deployed Routees ------------------------------------------- @@ -538,7 +459,7 @@ Router with Pool of Remote Deployed Routees When using a ``Pool`` with routees created and deployed on the cluster member nodes the configuration for a router looks like this: -.. includecode:: ../../../akka-samples/akka-sample-cluster/src/multi-jvm/scala/sample/cluster/stats/StatsSampleSingleMasterSpec.scala#router-deploy-config +.. includecode:: ../../../akka-samples/akka-sample-cluster-java/src/multi-jvm/scala/sample/cluster/stats/StatsSampleSingleMasterSpec.scala#router-deploy-config It is possible to limit the deployment of routees to member nodes tagged with a certain role by specifying ``use-role``. @@ -550,7 +471,7 @@ the cluster. The same type of router could also have been defined in code: -.. includecode:: ../../../akka-samples/akka-sample-cluster/src/main/java/sample/cluster/stats/japi/StatsService.java#router-deploy-in-code +.. includecode:: ../../../akka-samples/akka-sample-cluster-java/src/main/java/sample/cluster/stats/Extra.java#router-deploy-in-code See :ref:`cluster_configuration_java` section for further descriptions of the settings. @@ -561,44 +482,23 @@ Let's take a look at how to use a cluster aware router on single master node tha and deploys workers. To keep track of a single master we use the :ref:`cluster-singleton` in the contrib module. The ``ClusterSingletonManager`` is started on each node. -.. includecode:: ../../../akka-samples/akka-sample-cluster/src/main/java/sample/cluster/stats/japi/StatsSampleOneMasterMain.java#create-singleton-manager +.. includecode:: ../../../akka-samples/akka-sample-cluster-java/src/main/java/sample/cluster/stats/StatsSampleOneMasterMain.java#create-singleton-manager We also need an actor on each node that keeps track of where current single master exists and delegates jobs to the ``StatsService``. -.. includecode:: ../../../akka-samples/akka-sample-cluster/src/main/java/sample/cluster/stats/japi/StatsFacade.java#facade +.. includecode:: ../../../akka-samples/akka-sample-cluster-java/src/main/java/sample/cluster/stats/StatsFacade.java#facade The ``StatsFacade`` receives text from users and delegates to the current ``StatsService``, the single master. It listens to cluster events to lookup the ``StatsService`` on the oldest node. All nodes start ``StatsFacade`` and the ``ClusterSingletonManager``. The router is now configured like this: -.. includecode:: ../../../akka-samples/akka-sample-cluster/src/main/resources/application.conf#config-router-deploy +.. includecode:: ../../../akka-samples/akka-sample-cluster-java/src/main/resources/stats2.conf#config-router-deploy -This example is included in ``akka-samples/akka-sample-cluster`` and you can try it by copying the -`source <@github@/akka-samples/akka-sample-cluster>`_ to your -maven project, defined as in :ref:`cluster_simple_example_java`. Also add the `akka-contrib` dependency -to your pom.xml. - -Run it by starting nodes in different terminal windows. For example, starting 3 -service nodes and 1 client:: - - mvn exec:java \ - -Dexec.mainClass="sample.cluster.stats.japi.StatsSampleOneMasterMain" \ - -Dexec.args="2551" - - mvn exec:java \ - -Dexec.mainClass="sample.cluster.stats.japi.StatsSampleOneMasterMain" \ - -Dexec.args="2552" - - mvn exec:java \ - -Dexec.mainClass="sample.cluster.stats.japi.StatsSampleOneMasterClientMain" - - mvn exec:java \ - -Dexec.mainClass="sample.cluster.stats.japi.StatsSampleOneMasterMain" - - -.. note:: The above example will be simplified when the cluster handles automatic actor partitioning. +The `Typesafe Activator `_ tutorial named +`Akka Cluster Samples with Java `_. +contains the full source code and instructions of how to run the **Router Example with Pool of Remote Deployed Routees**. Cluster Metrics ^^^^^^^^^^^^^^^ @@ -637,63 +537,40 @@ It can be configured to use a specific MetricsSelector to produce the probabilit The collected metrics values are smoothed with `exponential weighted moving average `_. In the :ref:`cluster_configuration_java` you can adjust how quickly past data is decayed compared to new data. -Let's take a look at this router in action. - -In this example the following imports are used: - -.. includecode:: ../../../akka-samples/akka-sample-cluster/src/main/java/sample/cluster/factorial/japi/FactorialBackend.java#imports +Let's take a look at this router in action. What can be more demanding than calculating factorials? The backend worker that performs the factorial calculation: -.. includecode:: ../../../akka-samples/akka-sample-cluster/src/main/java/sample/cluster/factorial/japi/FactorialBackend.java#backend +.. includecode:: ../../../akka-samples/akka-sample-cluster-java/src/main/java/sample/cluster/factorial/FactorialBackend.java#backend The frontend that receives user jobs and delegates to the backends via the router: -.. includecode:: ../../../akka-samples/akka-sample-cluster/src/main/java/sample/cluster/factorial/japi/FactorialFrontend.java#frontend +.. includecode:: ../../../akka-samples/akka-sample-cluster-java/src/main/java/sample/cluster/factorial/FactorialFrontend.java#frontend As you can see, the router is defined in the same way as other routers, and in this case it is configured as follows: -.. includecode:: ../../../akka-samples/akka-sample-cluster/src/main/resources/application.conf#adaptive-router +.. includecode:: ../../../akka-samples/akka-sample-cluster-java/src/main/resources/factorial.conf#adaptive-router It is only router type ``adaptive`` and the ``metrics-selector`` that is specific to this router, other things work in the same way as other routers. The same type of router could also have been defined in code: -.. includecode:: ../../../akka-samples/akka-sample-cluster/src/main/java/sample/cluster/factorial/japi/FactorialFrontend.java#router-lookup-in-code +.. includecode:: ../../../akka-samples/akka-sample-cluster-java/src/main/java/sample/cluster/factorial/Extra.java#router-lookup-in-code -.. includecode:: ../../../akka-samples/akka-sample-cluster/src/main/java/sample/cluster/factorial/japi/FactorialFrontend.java#router-deploy-in-code - -This example is included in ``akka-samples/akka-sample-cluster`` and you can try it by copying the -`source <@github@/akka-samples/akka-sample-cluster>`_ to your -maven project, defined as in :ref:`cluster_simple_example_java`. -Run it by starting nodes in different terminal windows. For example, starting 3 backend nodes and -one frontend:: - - mvn exec:java \ - -Dexec.mainClass="sample.cluster.factorial.japi.FactorialBackendMain" \ - -Dexec.args="2551" - - mvn exec:java \ - -Dexec.mainClass="sample.cluster.factorial.japi.FactorialBackendMain" \ - -Dexec.args="2552" - - mvn exec:java \ - -Dexec.mainClass="sample.cluster.factorial.japi.FactorialBackendMain" - - mvn exec:java \ - -Dexec.mainClass="sample.cluster.factorial.japi.FactorialFrontendMain" - -Press ctrl-c in the terminal window of the frontend to stop the factorial calculations. +.. includecode:: ../../../akka-samples/akka-sample-cluster-java/src/main/java/sample/cluster/factorial/Extra.java#router-deploy-in-code +The `Typesafe Activator `_ tutorial named +`Akka Cluster Samples with Java `_. +contains the full source code and instructions of how to run the **Adaptive Load Balancing** sample. Subscribe to Metrics Events --------------------------- It is possible to subscribe to the metrics events directly to implement other functionality. -.. includecode:: ../../../akka-samples/akka-sample-cluster/src/main/java/sample/cluster/factorial/japi/MetricsListener.java#metrics-listener +.. includecode:: ../../../akka-samples/akka-sample-cluster-java/src/main/java/sample/cluster/factorial/MetricsListener.java#metrics-listener Custom Metrics Collector ------------------------ diff --git a/akka-docs/rst/java/code/docs/camel/sample/http/HttpSample.java b/akka-docs/rst/java/code/docs/camel/sample/http/HttpSample.java deleted file mode 100644 index 2143c33158..0000000000 --- a/akka-docs/rst/java/code/docs/camel/sample/http/HttpSample.java +++ /dev/null @@ -1,23 +0,0 @@ -package docs.camel.sample.http; - -import akka.actor.*; - -public class HttpSample { - public static void main(String[] args) { - //#HttpExample - // Create the actors. this can be done in a Boot class so you can - // run the example in the MicroKernel. Just add the three lines below - // to your boot class. - ActorSystem system = ActorSystem.create("some-system"); - - final ActorRef httpTransformer = system.actorOf( - Props.create(HttpTransformer.class)); - - final ActorRef httpProducer = system.actorOf( - Props.create(HttpProducer.class, httpTransformer)); - - final ActorRef httpConsumer = system.actorOf( - Props.create(HttpConsumer.class, httpProducer)); - //#HttpExample - } -} diff --git a/akka-docs/rst/java/code/docs/camel/sample/route/CustomRouteBuilder.java b/akka-docs/rst/java/code/docs/camel/sample/route/CustomRouteBuilder.java deleted file mode 100644 index ad68d38609..0000000000 --- a/akka-docs/rst/java/code/docs/camel/sample/route/CustomRouteBuilder.java +++ /dev/null @@ -1,18 +0,0 @@ -package docs.camel.sample.route; - -//#CustomRouteExample -import org.apache.camel.Exchange; -import org.apache.camel.Processor; -import org.apache.camel.builder.RouteBuilder; - -public class CustomRouteBuilder extends RouteBuilder{ - public void configure() throws Exception { - from("direct:welcome").process(new Processor(){ - public void process(Exchange exchange) throws Exception { - exchange.getOut().setBody(String.format("Welcome %s", - exchange.getIn().getBody())); - } - }); - } -} -//#CustomRouteExample diff --git a/akka-docs/rst/java/code/docs/camel/sample/route/CustomRouteSample.java b/akka-docs/rst/java/code/docs/camel/sample/route/CustomRouteSample.java deleted file mode 100644 index f4a3760542..0000000000 --- a/akka-docs/rst/java/code/docs/camel/sample/route/CustomRouteSample.java +++ /dev/null @@ -1,23 +0,0 @@ -package docs.camel.sample.route; - -import akka.actor.*; -import akka.camel.CamelExtension; - -public class CustomRouteSample { - @SuppressWarnings("unused") - public static void main(String[] args) { - try { - //#CustomRouteExample - // the below lines can be added to a Boot class, so that you can run the - // example from a MicroKernel - ActorSystem system = ActorSystem.create("some-system"); - final ActorRef producer = system.actorOf(Props.create(Producer1.class)); - final ActorRef mediator = system.actorOf(Props.create(Transformer.class, producer)); - final ActorRef consumer = system.actorOf(Props.create(Consumer3.class, mediator)); - CamelExtension.get(system).context().addRoutes(new CustomRouteBuilder()); - //#CustomRouteExample - } catch (Exception e) { - e.printStackTrace(); - } - } -} diff --git a/akka-docs/rst/java/code/docs/camel/sample/route/Producer1.java b/akka-docs/rst/java/code/docs/camel/sample/route/Producer1.java deleted file mode 100644 index 4f937fc95f..0000000000 --- a/akka-docs/rst/java/code/docs/camel/sample/route/Producer1.java +++ /dev/null @@ -1,10 +0,0 @@ -package docs.camel.sample.route; -//#CustomRouteExample -import akka.camel.javaapi.UntypedProducerActor; - -public class Producer1 extends UntypedProducerActor{ - public String getEndpointUri() { - return "direct:welcome"; - } -} -//#CustomRouteExample diff --git a/akka-docs/rst/java/hello-world.rst b/akka-docs/rst/java/hello-world.rst index 5ba984e1a2..faa9e36aa9 100644 --- a/akka-docs/rst/java/hello-world.rst +++ b/akka-docs/rst/java/hello-world.rst @@ -2,42 +2,17 @@ The Obligatory Hello World ########################## -Since every programming paradigm needs to solve the tough problem of printing a -well-known greeting to the console we’ll introduce you to the actor-based -version. +The actor based version of the tough problem of printing a +well-known greeting to the console is introduced in a `Typesafe Activator `_ +tutorial named `Akka Main in Java `_. -.. includecode:: ../java/code/docs/actor/japi/HelloWorld.java#hello-world - -The ``HelloWorld`` actor is the application’s “main” class; when it terminates -the application will shut down—more on that later. The main business logic -happens in the :meth:`preStart` method, where a ``Greeter`` actor is created -and instructed to issue that greeting we crave for. When the greeter is done it -will tell us so by sending back a message, and when that message has been -received it will be passed into the :meth:`onReceive` method where we can -conclude the demonstration by stopping the ``HelloWorld`` actor. You will be -very curious to see how the ``Greeter`` actor performs the actual task: - -.. includecode:: ../java/code/docs/actor/japi/Greeter.java#greeter - -This is extremely simple now: after its creation this actor will not do -anything until someone sends it a message, and if that happens to be an -invitation to greet the world then the ``Greeter`` complies and informs the -requester that the deed has been done. - -As a Java developer you will probably want to tell us that there is no -``static public void main(...)`` anywhere in these classes, so how do we run -this program? The answer is that the appropriate :meth:`main` method is -implemented in the generic launcher class :class:`akka.Main` which expects only +The tutorial illustrates the generic launcher class :class:`akka.Main` which expects only one command line argument: the class name of the application’s main actor. This main method will then create the infrastructure needed for running the actors, start the given main actor and arrange for the whole application to shut down -once the main actor terminates. Thus you will be able to run the above code -with a command similar to the following:: +once the main actor terminates. - java -classpath akka.Main com.example.HelloWorld - -This conveniently assumes placement of the above class definitions in package -``com.example`` and it further assumes that you have the required JAR files for -``scala-library`` and ``akka-actor`` available. The easiest would be to manage -these dependencies with a build tool, see :ref:`build-tool`. +There is also another `Typesafe Activator `_ +tutorial in the same problem domain that is named `Hello Akka! `_. +It describes the basics of Akka in more depth. diff --git a/akka-docs/rst/java/remoting.rst b/akka-docs/rst/java/remoting.rst index f9535cb6c2..0aafdadfd2 100644 --- a/akka-docs/rst/java/remoting.rst +++ b/akka-docs/rst/java/remoting.rst @@ -246,110 +246,14 @@ This is also done via configuration:: This configuration setting will clone the actor “aggregation” 10 times and deploy it evenly distributed across the two given target nodes. -Description of the Remoting Sample -^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ +.. _remote-sample-java: -There is a more extensive remote example that comes with the Akka distribution. -Please have a look here for more information: `Remote Sample -<@github@/akka-samples/akka-sample-remote>`_ -This sample demonstrates both, remote deployment and look-up of remote actors. -First, let us have a look at the common setup for both scenarios (this is -``common.conf``): +Remoting Sample +^^^^^^^^^^^^^^^ -.. includecode:: ../../../akka-samples/akka-sample-remote/src/main/resources/common.conf - -This enables the remoting by installing the :class:`RemoteActorRefProvider` and -chooses the default remote transport. All other options will be set -specifically for each show case. - -.. note:: - - Be sure to replace the default IP 127.0.0.1 with the real address the system - is reachable by if you deploy onto multiple machines! - -.. _remote-lookup-sample-java: - -Remote Lookup -------------- - -In order to look up a remote actor, that one must be created first. For this -purpose, we configure an actor system to listen on port 2552 (this is a snippet -from ``application.conf``): - -.. includecode:: ../../../akka-samples/akka-sample-remote/src/main/resources/application.conf - :include: calculator - -Then the actor must be created. For all code which follows, assume these imports: - -.. includecode:: ../../../akka-samples/akka-sample-remote/src/main/java/sample/remote/calculator/java/JLookupApplication.java - :include: imports - -The actor doing the work will be this one: - -.. includecode:: ../../../akka-samples/akka-sample-remote/src/main/java/sample/remote/calculator/java/JSimpleCalculatorActor.java - :include: actor - -and we start it within an actor system using the above configuration - -.. includecode:: ../../../akka-samples/akka-sample-remote/src/main/java/sample/remote/calculator/java/JCalculatorApplication.java - :include: setup - -With the service actor up and running, we may look it up from another actor -system, which will be configured to use port 2553 (this is a snippet from -``application.conf``). - -.. includecode:: ../../../akka-samples/akka-sample-remote/src/main/resources/application.conf - :include: remotelookup - -The actor which will query the calculator is a quite simple one for demonstration purposes - -.. includecode:: ../../../akka-samples/akka-sample-remote/src/main/java/sample/remote/calculator/java/JLookupActor.java - :include: actor - -and it is created from an actor system using the aforementioned client’s config. - -.. includecode:: ../../../akka-samples/akka-sample-remote/src/main/java/sample/remote/calculator/java/JLookupApplication.java - :include: setup - -Requests which come in via ``doSomething`` will be sent to the client actor, -which will use the actor reference that was identified earlier. Observe how the actor -system name using in ``actorSelection`` matches the remote system’s name, as do IP -and port number. Top-level actors are always created below the ``"/user"`` -guardian, which supervises them. - -Remote Deployment ------------------ - -Creating remote actors instead of looking them up is not visible in the source -code, only in the configuration file. This section is used in this scenario -(this is a snippet from ``application.conf``): - -.. includecode:: ../../../akka-samples/akka-sample-remote/src/main/resources/application.conf - :include: remotecreation - -For all code which follows, assume these imports: - -.. includecode:: ../../../akka-samples/akka-sample-remote/src/main/java/sample/remote/calculator/java/JLookupApplication.java - :include: imports - -The server actor can multiply or divide numbers: - -.. includecode:: ../../../akka-samples/akka-sample-remote/src/main/java/sample/remote/calculator/java/JAdvancedCalculatorActor.java - :include: actor - -The client actor looks like in the previous example - -.. includecode:: ../../../akka-samples/akka-sample-remote/src/main/java/sample/remote/calculator/java/JCreationActor.java - :include: actor - -but the setup uses only ``actorOf``: - -.. includecode:: ../../../akka-samples/akka-sample-remote/src/main/java/sample/remote/calculator/java/JCreationApplication.java - :include: setup - -Observe how the name of the server actor matches the deployment given in the -configuration file, which will transparently delegate the actor creation to the -remote node. +There is a more extensive remote example that comes with `Typesafe Activator `_. +The tutorial named `Akka Remote Samples with Java `_ +demonstrates both remote deployment and look-up of remote actors. Pluggable transport support --------------------------- diff --git a/akka-docs/rst/java/untyped-actors.rst b/akka-docs/rst/java/untyped-actors.rst index 2b06e31fab..6b978b8fca 100644 --- a/akka-docs/rst/java/untyped-actors.rst +++ b/akka-docs/rst/java/untyped-actors.rst @@ -445,7 +445,7 @@ Remote actor addresses may also be looked up, if :ref:`remoting ` .. includecode:: code/docs/actor/UntypedActorDocTest.java#selection-remote -An example demonstrating remote actor look-up is given in :ref:`remote-lookup-sample-java`. +An example demonstrating remote actor look-up is given in :ref:`remote-sample-java`. .. note:: diff --git a/akka-docs/rst/scala/actors.rst b/akka-docs/rst/scala/actors.rst index 15585e0f04..4c423eb972 100644 --- a/akka-docs/rst/scala/actors.rst +++ b/akka-docs/rst/scala/actors.rst @@ -528,7 +528,7 @@ Remote actor addresses may also be looked up, if :ref:`remoting .. includecode:: code/docs/actor/ActorDocSpec.scala#selection-remote -An example demonstrating actor look-up is given in :ref:`remote-lookup-sample-scala`. +An example demonstrating actor look-up is given in :ref:`remote-sample-scala`. .. note:: diff --git a/akka-docs/rst/scala/camel.rst b/akka-docs/rst/scala/camel.rst index 5bce74953a..a3f94baf8b 100644 --- a/akka-docs/rst/scala/camel.rst +++ b/akka-docs/rst/scala/camel.rst @@ -308,7 +308,7 @@ to do other work) and resume processing when the response is ready. This is currently the case for a `subset of components`_ such as the `Jetty component`_. All other Camel components can still be used, of course, but they will cause allocation of a thread for the duration of an in-out message exchange. There's -also a :ref:`camel-async-example` that implements both, an asynchronous +also :ref:`camel-examples` that implements both, an asynchronous consumer and an asynchronous producer, with the jetty component. If the used Camel component is blocking it might be necessary to use a separate @@ -463,110 +463,19 @@ __ https://svn.apache.org/repos/asf/camel/tags/camel-2.8.0/camel-core/src/main/j Examples ======== -.. _camel-async-example: +The `Typesafe Activator `_ +tutorial named `Akka Camel Samples with Scala `_ +contains 3 samples: -Asynchronous routing and transformation example ------------------------------------------------ + * Asynchronous routing and transformation - This example demonstrates how to implement consumer and + producer actors that support :ref:`camel-asynchronous-routing` with their Camel endpoints. + + * Custom Camel route - Demonstrates the combined usage of a ``Producer`` and a + ``Consumer`` actor as well as the inclusion of a custom Camel route. -This example demonstrates how to implement consumer and producer actors that -support :ref:`camel-asynchronous-routing` with their Camel endpoints. The sample -application transforms the content of the Akka homepage, http://akka.io, by -replacing every occurrence of *Akka* with *AKKA*. To run this example, add -a Boot class that starts the actors. After starting -the :ref:`microkernel-scala`, direct the browser to http://localhost:8875 and the -transformed Akka homepage should be displayed. Please note that this example -will probably not work if you're behind an HTTP proxy. + * Quartz Scheduler Example - Showing how simple is to implement a cron-style scheduler by + using the Camel Quartz component -The following figure gives an overview how the example actors interact with -external systems and with each other. A browser sends a GET request to -http://localhost:8875 which is the published endpoint of the ``HttpConsumer`` -actor. The ``HttpConsumer`` actor forwards the requests to the ``HttpProducer`` -actor which retrieves the Akka homepage from http://akka.io. The retrieved HTML -is then forwarded to the ``HttpTransformer`` actor which replaces all occurrences -of *Akka* with *AKKA*. The transformation result is sent back the HttpConsumer -which finally returns it to the browser. - -.. image:: ../images/camel-async-interact.png - -Implementing the example actor classes and wiring them together is rather easy -as shown in the following snippet. - -.. includecode:: code/docs/camel/HttpExample.scala#HttpExample - -The `jetty endpoints`_ of HttpConsumer and HttpProducer support asynchronous -in-out message exchanges and do not allocate threads for the full duration of -the exchange. This is achieved by using `Jetty continuations`_ on the -consumer-side and by using `Jetty's asynchronous HTTP client`_ on the producer -side. The following high-level sequence diagram illustrates that. - -.. _jetty endpoints: http://camel.apache.org/jetty.html -.. _Jetty continuations: http://wiki.eclipse.org/Jetty/Feature/Continuations -.. _Jetty's asynchronous HTTP client: http://wiki.eclipse.org/Jetty/Tutorial/HttpClient - -.. image:: ../images/camel-async-sequence.png - -Custom Camel route example --------------------------- - -This section also demonstrates the combined usage of a ``Producer`` and a -``Consumer`` actor as well as the inclusion of a custom Camel route. The -following figure gives an overview. - -.. image:: ../images/camel-custom-route.png - -* A consumer actor receives a message from an HTTP client - -* It forwards the message to another actor that transforms the message (encloses - the original message into hyphens) - -* The transformer actor forwards the transformed message to a producer actor - -* The producer actor sends the message to a custom Camel route beginning at the - ``direct:welcome`` endpoint - -* A processor (transformer) in the custom Camel route prepends "Welcome" to the - original message and creates a result message - -* The producer actor sends the result back to the consumer actor which returns - it to the HTTP client - - -The consumer, transformer and -producer actor implementations are as follows. - -.. includecode:: code/docs/camel/CustomRouteExample.scala#CustomRouteExample - - -The producer actor knows where to reply the message to because the consumer and -transformer actors have forwarded the original sender reference as well. The -application configuration and the route starting from direct:welcome are done in the code above. - -To run the example, add the lines shown in the example to a Boot class and the start the :ref:`microkernel-scala` and POST a message to -``http://localhost:8877/camel/welcome``. - -.. code-block:: none - - curl -H "Content-Type: text/plain" -d "Anke" http://localhost:8877/camel/welcome - -The response should be: - -.. code-block:: none - - Welcome - Anke - - -Quartz Scheduler Example ------------------------- - -Here is an example showing how simple is to implement a cron-style scheduler by -using the Camel Quartz component in Akka. - -The following example creates a "timer" actor which fires a message every 2 -seconds: - -.. includecode:: code/docs/camel/QuartzExample.scala#Quartz - -For more information about the Camel Quartz component, see here: -http://camel.apache.org/quartz.html Additional Resources ==================== diff --git a/akka-docs/rst/scala/cluster-usage.rst b/akka-docs/rst/scala/cluster-usage.rst index aeba1ec383..72fd751ada 100644 --- a/akka-docs/rst/scala/cluster-usage.rst +++ b/akka-docs/rst/scala/cluster-usage.rst @@ -17,15 +17,12 @@ The Akka cluster is a separate jar file. Make sure that you have the following d A Simple Cluster Example ^^^^^^^^^^^^^^^^^^^^^^^^ -The following small program together with its configuration starts an ``ActorSystem`` -with the Cluster enabled. It joins the cluster and logs some membership events. +The following configuration enables the ``Cluster`` extension to be used. +It joins the cluster and an actor subscribes to cluster membership events and logs them. -Try it out: +The ``application.conf`` configuration looks like this: -1. Add the following ``application.conf`` in your project, place it in ``src/main/resources``: - - -.. includecode:: ../../../akka-samples/akka-sample-cluster/src/main/resources/application.conf#cluster +.. includecode:: ../../../akka-samples/akka-sample-cluster-scala/src/main/resources/application.conf To enable cluster capabilities in your Akka project you should, at a minimum, add the :ref:`remoting-scala` settings, but with ``akka.cluster.ClusterActorRefProvider``. @@ -36,48 +33,17 @@ The seed nodes are configured contact points for initial, automatic, join of the Note that if you are going to start the nodes on different machines you need to specify the ip-addresses or host names of the machines in ``application.conf`` instead of ``127.0.0.1`` -2. Add the following main program to your project, place it in ``src/main/scala``: +An actor that uses the cluster extension may look like this: -.. literalinclude:: ../../../akka-samples/akka-sample-cluster/src/main/scala/sample/cluster/simple/SimpleClusterApp.scala +.. literalinclude:: ../../../akka-samples/akka-sample-cluster-scala/src/main/scala/sample/cluster/simple/SimpleClusterListener.scala :language: scala +The actor registers itself as subscriber of certain cluster events. It gets notified with a snapshot event, ``CurrentClusterState`` +that holds full state information of the cluster. After that it receives events for changes that happen in the cluster. -3. Start the first seed node. Open a sbt session in one terminal window and run:: - - run-main sample.cluster.simple.SimpleClusterApp 2551 - -2551 corresponds to the port of the first seed-nodes element in the configuration. -In the log output you see that the cluster node has been started and changed status to 'Up'. - -4. Start the second seed node. Open a sbt session in another terminal window and run:: - - run-main sample.cluster.simple.SimpleClusterApp 2552 - - -2552 corresponds to the port of the second seed-nodes element in the configuration. -In the log output you see that the cluster node has been started and joins the other seed node -and becomes a member of the cluster. Its status changed to 'Up'. - -Switch over to the first terminal window and see in the log output that the member joined. - -5. Start another node. Open a sbt session in yet another terminal window and run:: - - run-main sample.cluster.simple.SimpleClusterApp - -Now you don't need to specify the port number, and it will use a random available port. -It joins one of the configured seed nodes. Look at the log output in the different terminal -windows. - -Start even more nodes in the same way, if you like. - -6. Shut down one of the nodes by pressing 'ctrl-c' in one of the terminal windows. -The other nodes will detect the failure after a while, which you can see in the log -output in the other terminals. - -Look at the source code of the program again. What it does is to create an actor -and register it as subscriber of certain cluster events. It gets notified with -an snapshot event, ``CurrentClusterState`` that holds full state information of -the cluster. After that it receives events for changes that happen in the cluster. +The easiest way to run this example yourself is to download `Typesafe Activator `_ +and open the tutorial named `Akka Cluster Samples with Scala `_. +It contains instructions of how to run the SimpleClusterApp. Joining to Seed Nodes ^^^^^^^^^^^^^^^^^^^^^ @@ -230,17 +196,13 @@ backend workers, which performs the transformation job, and sends the result bac the original client. New backend nodes, as well as new frontend nodes, can be added or removed to the cluster dynamically. -In this example the following imports are used: - -.. includecode:: ../../../akka-samples/akka-sample-cluster/src/main/scala/sample/cluster/transformation/TransformationSample.scala#imports - Messages: -.. includecode:: ../../../akka-samples/akka-sample-cluster/src/main/scala/sample/cluster/transformation/TransformationSample.scala#messages +.. includecode:: ../../../akka-samples/akka-sample-cluster-scala/src/main/scala/sample/cluster/transformation/TransformationMessages.scala#messages The backend worker that performs the transformation job: -.. includecode:: ../../../akka-samples/akka-sample-cluster/src/main/scala/sample/cluster/transformation/TransformationSample.scala#backend +.. includecode:: ../../../akka-samples/akka-sample-cluster-scala/src/main/scala/sample/cluster/transformation/TransformationBackend.scala#backend Note that the ``TransformationBackend`` actor subscribes to cluster events to detect new, potential, frontend nodes, and send them a registration message so that they know @@ -248,31 +210,17 @@ that they can use the backend worker. The frontend that receives user jobs and delegates to one of the registered backend workers: -.. includecode:: ../../../akka-samples/akka-sample-cluster/src/main/scala/sample/cluster/transformation/TransformationSample.scala#frontend +.. includecode:: ../../../akka-samples/akka-sample-cluster-scala/src/main/scala/sample/cluster/transformation/TransformationFrontend.scala#frontend Note that the ``TransformationFrontend`` actor watch the registered backend -to be able to remove it from its list of availble backend workers. +to be able to remove it from its list of available backend workers. Death watch uses the cluster failure detector for nodes in the cluster, i.e. it detects network failures and JVM crashes, in addition to graceful termination of watched actor. -This example is included in ``akka-samples/akka-sample-cluster`` -and you can try by starting nodes in different terminal windows. For example, starting 2 -frontend nodes and 3 backend nodes:: - - sbt - - project akka-sample-cluster - - run-main sample.cluster.transformation.TransformationFrontend 2551 - - run-main sample.cluster.transformation.TransformationBackend 2552 - - run-main sample.cluster.transformation.TransformationBackend - - run-main sample.cluster.transformation.TransformationBackend - - run-main sample.cluster.transformation.TransformationFrontend +The `Typesafe Activator `_ tutorial named +`Akka Cluster Samples with Scala `_. +contains the full source code and instructions of how to run the **Worker Dial-in Example**. Node Roles ^^^^^^^^^^ @@ -295,18 +243,18 @@ members have joined, and the cluster has reached a certain size. With a configuration option you can define required number of members before the leader changes member status of 'Joining' members to 'Up'. -.. includecode:: ../../../akka-samples/akka-sample-cluster/src/main/resources/factorial.conf#min-nr-of-members +.. includecode:: ../../../akka-samples/akka-sample-cluster-scala/src/main/resources/factorial.conf#min-nr-of-members In a similar way you can define required number of members of a certain role before the leader changes member status of 'Joining' members to 'Up'. -.. includecode:: ../../../akka-samples/akka-sample-cluster/src/main/resources/factorial.conf#role-min-nr-of-members +.. includecode:: ../../../akka-samples/akka-sample-cluster-scala/src/main/resources/factorial.conf#role-min-nr-of-members You can start the actors in a ``registerOnMemberUp`` callback, which will be invoked when the current member status is changed tp 'Up', i.e. the cluster has at least the defined number of members. -.. includecode:: ../../../akka-samples/akka-sample-cluster/src/main/scala/sample/cluster/factorial/FactorialSample.scala#registerOnUp +.. includecode:: ../../../akka-samples/akka-sample-cluster-scala/src/main/scala/sample/cluster/factorial/FactorialFrontend.scala#registerOnUp This callback can be used for other things than starting actors. @@ -439,7 +387,7 @@ Router with Group of Routees When using a ``Group`` you must start the routee actors on the cluster member nodes. That is not done by the router. The configuration for a group looks like this: -.. includecode:: ../../../akka-samples/akka-sample-cluster/src/multi-jvm/scala/sample/cluster/stats/StatsSampleSpec.scala#router-lookup-config +.. includecode:: ../../../akka-samples/akka-sample-cluster-scala/src/multi-jvm/scala/sample/cluster/stats/StatsSampleSpec.scala#router-lookup-config .. note:: @@ -457,7 +405,7 @@ to a high value will result in new routees added to the router when nodes join t The same type of router could also have been defined in code: -.. includecode:: ../../../akka-samples/akka-sample-cluster/src/main/scala/sample/cluster/stats/StatsSample.scala#router-lookup-in-code +.. includecode:: ../../../akka-samples/akka-sample-cluster-scala/src/main/scala/sample/cluster/stats/Extra.scala#router-lookup-in-code See :ref:`cluster_configuration_scala` section for further descriptions of the settings. @@ -473,21 +421,17 @@ to count number of characters in each word to a separate worker, a routee of a r The character count for each word is sent back to an aggregator that calculates the average number of characters per word when all results have been collected. -In this example we use the following imports: - -.. includecode:: ../../../akka-samples/akka-sample-cluster/src/main/scala/sample/cluster/stats/StatsSample.scala#imports - Messages: -.. includecode:: ../../../akka-samples/akka-sample-cluster/src/main/scala/sample/cluster/stats/StatsSample.scala#messages +.. includecode:: ../../../akka-samples/akka-sample-cluster-scala/src/main/scala/sample/cluster/stats/StatsMessages.scala#messages The worker that counts number of characters in each word: -.. includecode:: ../../../akka-samples/akka-sample-cluster/src/main/scala/sample/cluster/stats/StatsSample.scala#worker +.. includecode:: ../../../akka-samples/akka-sample-cluster-scala/src/main/scala/sample/cluster/stats/StatsWorker.scala#worker The service that receives text from users and splits it up into words, delegates to workers and aggregates: -.. includecode:: ../../../akka-samples/akka-sample-cluster/src/main/scala/sample/cluster/stats/StatsSample.scala#service +.. includecode:: ../../../akka-samples/akka-sample-cluster-scala/src/main/scala/sample/cluster/stats/StatsService.scala#service Note, nothing cluster specific so far, just plain actors. @@ -495,27 +439,14 @@ Note, nothing cluster specific so far, just plain actors. All nodes start ``StatsService`` and ``StatsWorker`` actors. Remember, routees are the workers in this case. The router is configured with ``routees.paths``: -.. includecode:: ../../../akka-samples/akka-sample-cluster/src/main/resources/application.conf#config-router-lookup +.. includecode:: ../../../akka-samples/akka-sample-cluster-scala/src/main/resources/stats1.conf#config-router-lookup This means that user requests can be sent to ``StatsService`` on any node and it will use -``StatsWorker`` on all nodes. There can only be one worker per node, but that worker could easily -fan out to local children if more parallelism is needed. +``StatsWorker`` on all nodes. -This example is included in ``akka-samples/akka-sample-cluster`` -and you can try by starting nodes in different terminal windows. For example, starting 3 -service nodes and 1 client:: - - sbt - - project akka-sample-cluster - - run-main sample.cluster.stats.StatsSample 2551 - - run-main sample.cluster.stats.StatsSample 2552 - - run-main sample.cluster.stats.StatsSampleClient - - run-main sample.cluster.stats.StatsSample +The `Typesafe Activator `_ tutorial named +`Akka Cluster Samples with Scala `_. +contains the full source code and instructions of how to run the **Router Example with Group of Routees**. Router with Pool of Remote Deployed Routees ------------------------------------------- @@ -523,7 +454,7 @@ Router with Pool of Remote Deployed Routees When using a ``Pool`` with routees created and deployed on the cluster member nodes the configuration for a router looks like this: -.. includecode:: ../../../akka-samples/akka-sample-cluster/src/multi-jvm/scala/sample/cluster/stats/StatsSampleSingleMasterSpec.scala#router-deploy-config +.. includecode:: ../../../akka-samples/akka-sample-cluster-scala/src/multi-jvm/scala/sample/cluster/stats/StatsSampleSingleMasterSpec.scala#router-deploy-config It is possible to limit the deployment of routees to member nodes tagged with a certain role by specifying ``use-role``. @@ -535,7 +466,7 @@ the cluster. The same type of router could also have been defined in code: -.. includecode:: ../../../akka-samples/akka-sample-cluster/src/main/scala/sample/cluster/stats/StatsSample.scala#router-deploy-in-code +.. includecode:: ../../../akka-samples/akka-sample-cluster-scala/src/main/scala/sample/cluster/stats/Extra.scala#router-deploy-in-code See :ref:`cluster_configuration_scala` section for further descriptions of the settings. @@ -546,35 +477,23 @@ Let's take a look at how to use a cluster aware router on single master node tha and deploys workers. To keep track of a single master we use the :ref:`cluster-singleton` in the contrib module. The ``ClusterSingletonManager`` is started on each node. -.. includecode:: ../../../akka-samples/akka-sample-cluster/src/main/scala/sample/cluster/stats/StatsSample.scala#create-singleton-manager +.. includecode:: ../../../akka-samples/akka-sample-cluster-scala/src/main/scala/sample/cluster/stats/StatsSampleOneMaster.scala#create-singleton-manager We also need an actor on each node that keeps track of where current single master exists and delegates jobs to the ``StatsService``. -.. includecode:: ../../../akka-samples/akka-sample-cluster/src/main/scala/sample/cluster/stats/StatsSample.scala#facade +.. includecode:: ../../../akka-samples/akka-sample-cluster-scala/src/main/scala/sample/cluster/stats/StatsFacade.scala#facade The ``StatsFacade`` receives text from users and delegates to the current ``StatsService``, the single master. It listens to cluster events to lookup the ``StatsService`` on the oldest node. All nodes start ``StatsFacade`` and the ``ClusterSingletonManager``. The router is now configured like this: -.. includecode:: ../../../akka-samples/akka-sample-cluster/src/main/resources/application.conf#config-router-deploy - - -This example is included in ``akka-samples/akka-sample-cluster`` -and you can try by starting nodes in different terminal windows. For example, starting 3 -service nodes and 1 client:: - - run-main sample.cluster.stats.StatsSampleOneMaster 2551 - - run-main sample.cluster.stats.StatsSampleOneMaster 2552 - - run-main sample.cluster.stats.StatsSampleOneMasterClient - - run-main sample.cluster.stats.StatsSampleOneMaster - -.. note:: The above example will be simplified when the cluster handles automatic actor partitioning. +.. includecode:: ../../../akka-samples/akka-sample-cluster-scala/src/main/resources/stats2.conf#config-router-deploy +The `Typesafe Activator `_ tutorial named +`Akka Cluster Samples with Scala `_. +contains the full source code and instructions of how to run the **Router Example with Pool of Remote Deployed Routees**. Cluster Metrics ^^^^^^^^^^^^^^^ @@ -609,57 +528,40 @@ It can be configured to use a specific MetricsSelector to produce the probabilit The collected metrics values are smoothed with `exponential weighted moving average `_. In the :ref:`cluster_configuration_scala` you can adjust how quickly past data is decayed compared to new data. -Let's take a look at this router in action. - -In this example the following imports are used: - -.. includecode:: ../../../akka-samples/akka-sample-cluster/src/main/scala/sample/cluster/factorial/FactorialSample.scala#imports +Let's take a look at this router in action. What can be more demanding than calculating factorials? The backend worker that performs the factorial calculation: -.. includecode:: ../../../akka-samples/akka-sample-cluster/src/main/scala/sample/cluster/factorial/FactorialSample.scala#backend +.. includecode:: ../../../akka-samples/akka-sample-cluster-scala/src/main/scala/sample/cluster/factorial/FactorialBackend.scala#backend The frontend that receives user jobs and delegates to the backends via the router: -.. includecode:: ../../../akka-samples/akka-sample-cluster/src/main/scala/sample/cluster/factorial/FactorialSample.scala#frontend +.. includecode:: ../../../akka-samples/akka-sample-cluster-scala/src/main/scala/sample/cluster/factorial/FactorialFrontend.scala#frontend As you can see, the router is defined in the same way as other routers, and in this case it is configured as follows: -.. includecode:: ../../../akka-samples/akka-sample-cluster/src/main/resources/application.conf#adaptive-router +.. includecode:: ../../../akka-samples/akka-sample-cluster-scala/src/main/resources/factorial.conf#adaptive-router It is only router type ``adaptive`` and the ``metrics-selector`` that is specific to this router, other things work in the same way as other routers. The same type of router could also have been defined in code: -.. includecode:: ../../../akka-samples/akka-sample-cluster/src/main/scala/sample/cluster/factorial/FactorialSample.scala#router-lookup-in-code +.. includecode:: ../../../akka-samples/akka-sample-cluster-scala/src/main/scala/sample/cluster/factorial/Extra.scala#router-lookup-in-code -.. includecode:: ../../../akka-samples/akka-sample-cluster/src/main/scala/sample/cluster/factorial/FactorialSample.scala#router-deploy-in-code +.. includecode:: ../../../akka-samples/akka-sample-cluster-scala/src/main/scala/sample/cluster/factorial/Extra.scala#router-deploy-in-code -This example is included in ``akka-samples/akka-sample-cluster`` -and you can try by starting nodes in different terminal windows. For example, starting 3 backend nodes and one frontend:: - - sbt - - project akka-sample-cluster - - run-main sample.cluster.factorial.FactorialBackend 2551 - - run-main sample.cluster.factorial.FactorialBackend 2552 - - run-main sample.cluster.factorial.FactorialBackend - - run-main sample.cluster.factorial.FactorialFrontend - -Press ctrl-c in the terminal window of the frontend to stop the factorial calculations. +The `Typesafe Activator `_ tutorial named +`Akka Cluster Samples with Scala `_. +contains the full source code and instructions of how to run the **Adaptive Load Balancing** sample. Subscribe to Metrics Events --------------------------- It is possible to subscribe to the metrics events directly to implement other functionality. -.. includecode:: ../../../akka-samples/akka-sample-cluster/src/main/scala/sample/cluster/factorial/FactorialSample.scala#metrics-listener +.. includecode:: ../../../akka-samples/akka-sample-cluster-scala/src/main/scala/sample/cluster/factorial/MetricsListener.scala#metrics-listener Custom Metrics Collector ------------------------ @@ -679,14 +581,14 @@ add the ``sbt-multi-jvm`` plugin and the dependency to ``akka-multi-node-testkit First, as described in :ref:`multi-node-testing`, we need some scaffolding to configure the ``MultiNodeSpec``. Define the participating roles and their :ref:`cluster_configuration_scala` in an object extending ``MultiNodeConfig``: -.. includecode:: ../../../akka-samples/akka-sample-cluster/src/multi-jvm/scala/sample/cluster/stats/StatsSampleSpec.scala +.. includecode:: ../../../akka-samples/akka-sample-cluster-scala/src/multi-jvm/scala/sample/cluster/stats/StatsSampleSpec.scala :include: MultiNodeConfig :exclude: router-lookup-config Define one concrete test class for each role/node. These will be instantiated on the different nodes (JVMs). They can be implemented differently, but often they are the same and extend an abstract test class, as illustrated here. -.. includecode:: ../../../akka-samples/akka-sample-cluster/src/multi-jvm/scala/sample/cluster/stats/StatsSampleSpec.scala#concrete-tests +.. includecode:: ../../../akka-samples/akka-sample-cluster-scala/src/multi-jvm/scala/sample/cluster/stats/StatsSampleSpec.scala#concrete-tests Note the naming convention of these classes. The name of the classes must end with ``MultiJvmNode1``, ``MultiJvmNode2`` and so on. It is possible to define another suffix to be used by the ``sbt-multi-jvm``, but the default should be @@ -694,18 +596,18 @@ fine in most cases. Then the abstract ``MultiNodeSpec``, which takes the ``MultiNodeConfig`` as constructor parameter. -.. includecode:: ../../../akka-samples/akka-sample-cluster/src/multi-jvm/scala/sample/cluster/stats/StatsSampleSpec.scala#abstract-test +.. includecode:: ../../../akka-samples/akka-sample-cluster-scala/src/multi-jvm/scala/sample/cluster/stats/StatsSampleSpec.scala#abstract-test Most of this can of course be extracted to a separate trait to avoid repeating this in all your tests. Typically you begin your test by starting up the cluster and let the members join, and create some actors. That can be done like this: -.. includecode:: ../../../akka-samples/akka-sample-cluster/src/multi-jvm/scala/sample/cluster/stats/StatsSampleSpec.scala#startup-cluster +.. includecode:: ../../../akka-samples/akka-sample-cluster-scala/src/multi-jvm/scala/sample/cluster/stats/StatsSampleSpec.scala#startup-cluster From the test you interact with the cluster using the ``Cluster`` extension, e.g. ``join``. -.. includecode:: ../../../akka-samples/akka-sample-cluster/src/multi-jvm/scala/sample/cluster/stats/StatsSampleSpec.scala#join +.. includecode:: ../../../akka-samples/akka-sample-cluster-scala/src/multi-jvm/scala/sample/cluster/stats/StatsSampleSpec.scala#join Notice how the `testActor` from :ref:`testkit ` is added as :ref:`subscriber ` to cluster changes and then waiting for certain events, such as in this case all members becoming 'Up'. @@ -713,7 +615,7 @@ to cluster changes and then waiting for certain events, such as in this case all The above code was running for all roles (JVMs). ``runOn`` is a convenient utility to declare that a certain block of code should only run for a specific role. -.. includecode:: ../../../akka-samples/akka-sample-cluster/src/multi-jvm/scala/sample/cluster/stats/StatsSampleSpec.scala#test-statsService +.. includecode:: ../../../akka-samples/akka-sample-cluster-scala/src/multi-jvm/scala/sample/cluster/stats/StatsSampleSpec.scala#test-statsService Once again we take advantage of the facilities in :ref:`testkit ` to verify expected behavior. Here using ``testActor`` as sender (via ``ImplicitSender``) and verifing the reply with ``expectMsgPF``. @@ -721,7 +623,7 @@ Here using ``testActor`` as sender (via ``ImplicitSender``) and verifing the rep In the above code you can see ``node(third)``, which is useful facility to get the root actor reference of the actor system for a specific role. This can also be used to grab the ``akka.actor.Address`` of that node. -.. includecode:: ../../../akka-samples/akka-sample-cluster/src/multi-jvm/scala/sample/cluster/stats/StatsSampleSpec.scala#addresses +.. includecode:: ../../../akka-samples/akka-sample-cluster-scala/src/multi-jvm/scala/sample/cluster/stats/StatsSampleSpec.scala#addresses .. _cluster_jmx_scala: diff --git a/akka-docs/rst/scala/code/docs/actor/ActorDocSpec.scala b/akka-docs/rst/scala/code/docs/actor/ActorDocSpec.scala index b0573d6995..ccc61aa5cc 100644 --- a/akka-docs/rst/scala/code/docs/actor/ActorDocSpec.scala +++ b/akka-docs/rst/scala/code/docs/actor/ActorDocSpec.scala @@ -26,8 +26,8 @@ import scala.concurrent.Await class MyActor extends Actor { val log = Logging(context.system, this) def receive = { - case "test" ⇒ log.info("received test") - case _ ⇒ log.info("received unknown message") + case "test" => log.info("received test") + case _ => log.info("received unknown message") } } //#my-actor @@ -40,14 +40,14 @@ class FirstActor extends Actor { val child = context.actorOf(Props[MyActor], name = "myChild") //#plus-some-behavior def receive = { - case x ⇒ sender ! x + case x => sender ! x } //#plus-some-behavior } //#context-actorOf class ActorWithArgs(arg: String) extends Actor { - def receive = { case _ ⇒ () } + def receive = { case _ => () } } class DemoActorWrapper extends Actor { @@ -64,7 +64,7 @@ class DemoActorWrapper extends Actor { class DemoActor(magicNumber: Int) extends Actor { def receive = { - case x: Int ⇒ sender ! (x + magicNumber) + case x: Int => sender ! (x + magicNumber) } } @@ -79,10 +79,10 @@ class DemoActorWrapper extends Actor { class AnonymousActor extends Actor { //#anonymous-actor def receive = { - case m: DoIt ⇒ + case m: DoIt => context.actorOf(Props(new Actor { def receive = { - case DoIt(msg) ⇒ + case DoIt(msg) => val replyMsg = doSomeDangerousWork(msg) sender ! replyMsg context.stop(self) @@ -112,13 +112,13 @@ class Hook extends Actor { class ReplyException extends Actor { def receive = { - case _ ⇒ + case _ => //#reply-exception try { val result = operation() sender ! result } catch { - case e: Exception ⇒ + case e: Exception => sender ! akka.actor.Status.Failure(e) throw e } @@ -136,10 +136,10 @@ class Swapper extends Actor { val log = Logging(system, this) def receive = { - case Swap ⇒ + case Swap => log.info("Hi") become({ - case Swap ⇒ + case Swap => log.info("Ho") unbecome() // resets the latest 'become' (just for fun) }, discardOld = false) // push on top instead of replace @@ -166,7 +166,7 @@ abstract class GenericActor extends Actor { // generic message handler def genericMessageHandler: Receive = { - case event ⇒ printf("generic: %s\n", event) + case event => printf("generic: %s\n", event) } def receive = specificMessageHandler orElse genericMessageHandler @@ -174,7 +174,7 @@ abstract class GenericActor extends Actor { class SpecificActor extends GenericActor { def specificMessageHandler = { - case event: MyMsg ⇒ printf("specific: %s\n", event.subject) + case event: MyMsg => printf("specific: %s\n", event.subject) } } @@ -190,7 +190,7 @@ class ActorDocSpec extends AkkaSpec(Map("akka.loglevel" -> "INFO")) { import context._ val myActor = actorOf(Props[MyActor], name = "myactor") def receive = { - case x ⇒ myActor ! x + case x => myActor ! x } } //#import-context @@ -207,17 +207,17 @@ class ActorDocSpec extends AkkaSpec(Map("akka.loglevel" -> "INFO")) { // TODO: convert docs to AkkaSpec(Map(...)) val filter = EventFilter.custom { - case e: Logging.Info ⇒ true - case _ ⇒ false + case e: Logging.Info => true + case _ => false } system.eventStream.publish(TestEvent.Mute(filter)) system.eventStream.subscribe(testActor, classOf[Logging.Info]) myActor ! "test" - expectMsgPF(1 second) { case Logging.Info(_, _, "received test") ⇒ true } + expectMsgPF(1 second) { case Logging.Info(_, _, "received test") => true } myActor ! "unknown" - expectMsgPF(1 second) { case Logging.Info(_, _, "received unknown message") ⇒ true } + expectMsgPF(1 second) { case Logging.Info(_, _, "received unknown message") => true } system.eventStream.unsubscribe(testActor) system.eventStream.publish(TestEvent.UnMute(filter)) @@ -245,7 +245,7 @@ class ActorDocSpec extends AkkaSpec(Map("akka.loglevel" -> "INFO")) { //#creating-props-deprecated // DEPRECATED: old case class signature val props4 = Props( - creator = { () ⇒ new MyActor }, + creator = { () => new MyActor }, dispatcher = "my-dispatcher") // DEPRECATED due to duplicate functionality with Props.apply() @@ -273,8 +273,8 @@ class ActorDocSpec extends AkkaSpec(Map("akka.loglevel" -> "INFO")) { "creating actor with IndirectActorProducer" in { class Echo(name: String) extends Actor { def receive = { - case n: Int ⇒ sender ! name - case message ⇒ + case n: Int => sender ! name + case message => val target = testActor //#forward target forward message @@ -348,10 +348,10 @@ class ActorDocSpec extends AkkaSpec(Map("akka.loglevel" -> "INFO")) { // To set an initial delay context.setReceiveTimeout(30 milliseconds) def receive = { - case "Hello" ⇒ + case "Hello" => // To set in a response to a message context.setReceiveTimeout(100 milliseconds) - case ReceiveTimeout ⇒ + case ReceiveTimeout => // To turn it off context.setReceiveTimeout(Duration.Undefined) throw new RuntimeException("Receive timed out") @@ -364,18 +364,18 @@ class ActorDocSpec extends AkkaSpec(Map("akka.loglevel" -> "INFO")) { class HotSwapActor extends Actor { import context._ def angry: Receive = { - case "foo" ⇒ sender ! "I am already angry?" - case "bar" ⇒ become(happy) + case "foo" => sender ! "I am already angry?" + case "bar" => become(happy) } def happy: Receive = { - case "bar" ⇒ sender ! "I am already happy :-)" - case "foo" ⇒ become(angry) + case "bar" => sender ! "I am already happy :-)" + case "foo" => become(angry) } def receive = { - case "foo" ⇒ become(angry) - case "bar" ⇒ become(happy) + case "foo" => become(angry) + case "bar" => become(happy) } } //#hot-swap-actor @@ -389,16 +389,16 @@ class ActorDocSpec extends AkkaSpec(Map("akka.loglevel" -> "INFO")) { import akka.actor.Stash class ActorWithProtocol extends Actor with Stash { def receive = { - case "open" ⇒ + case "open" => unstashAll() context.become({ - case "write" ⇒ // do writing... - case "close" ⇒ + case "write" => // do writing... + case "close" => unstashAll() context.unbecome() - case msg ⇒ stash() + case msg => stash() }, discardOld = false) // stack on top instead of replacing - case msg ⇒ stash() + case msg => stash() } } //#stash @@ -415,9 +415,9 @@ class ActorDocSpec extends AkkaSpec(Map("akka.loglevel" -> "INFO")) { var lastSender = system.deadLetters def receive = { - case "kill" ⇒ + case "kill" => context.stop(child); lastSender = sender - case Terminated(`child`) ⇒ lastSender ! "finished" + case Terminated(`child`) => lastSender ! "finished" } } //#watch @@ -457,15 +457,15 @@ class ActorDocSpec extends AkkaSpec(Map("akka.loglevel" -> "INFO")) { context.actorSelection("/user/another") ! Identify(identifyId) def receive = { - case ActorIdentity(`identifyId`, Some(ref)) ⇒ + case ActorIdentity(`identifyId`, Some(ref)) => context.watch(ref) context.become(active(ref)) - case ActorIdentity(`identifyId`, None) ⇒ context.stop(self) + case ActorIdentity(`identifyId`, None) => context.stop(self) } def active(another: ActorRef): Actor.Receive = { - case Terminated(`another`) ⇒ context.stop(self) + case Terminated(`another`) => context.stop(self) } } //#identify @@ -490,7 +490,7 @@ class ActorDocSpec extends AkkaSpec(Map("akka.loglevel" -> "INFO")) { // the actor has been stopped } catch { // the actor wasn't stopped within 5 seconds - case e: akka.pattern.AskTimeoutException ⇒ + case e: akka.pattern.AskTimeoutException => } //#gracefulStop } @@ -507,9 +507,9 @@ class ActorDocSpec extends AkkaSpec(Map("akka.loglevel" -> "INFO")) { val f: Future[Result] = for { - x ← ask(actorA, Request).mapTo[Int] // call pattern directly - s ← (actorB ask Request).mapTo[String] // call by implicit conversion - d ← (actorC ? Request).mapTo[Double] // call by symbolic name + x <- ask(actorA, Request).mapTo[Int] // call pattern directly + s <- (actorB ask Request).mapTo[String] // call by implicit conversion + d <- (actorC ? Request).mapTo[Double] // call by symbolic name } yield Result(x, s, d) f pipeTo actorD // .. or .. @@ -519,12 +519,12 @@ class ActorDocSpec extends AkkaSpec(Map("akka.loglevel" -> "INFO")) { class Replier extends Actor { def receive = { - case ref: ActorRef ⇒ + case ref: ActorRef => //#reply-with-sender sender.tell("reply", context.parent) // replies will go back to parent sender.!("reply")(context.parent) // alternative syntax (beware of the parens!) //#reply-with-sender - case x ⇒ + case x => //#reply-without-sender sender ! x // replies will go to this actor //#reply-without-sender @@ -547,8 +547,8 @@ class ActorDocSpec extends AkkaSpec(Map("akka.loglevel" -> "INFO")) { "using ActorDSL outside of akka.actor package" in { import akka.actor.ActorDSL._ actor(new Act { - superviseWith(OneForOneStrategy() { case _ ⇒ Stop; Restart; Resume; Escalate }) - superviseWith(AllForOneStrategy() { case _ ⇒ Stop; Restart; Resume; Escalate }) + superviseWith(OneForOneStrategy() { case _ => Stop; Restart; Resume; Escalate }) + superviseWith(AllForOneStrategy() { case _ => Stop; Restart; Resume; Escalate }) }) } @@ -561,9 +561,9 @@ class ActorDocSpec extends AkkaSpec(Map("akka.loglevel" -> "INFO")) { private var pfsOption: Option[Vector[PF]] = Some(Vector.empty) - private def mapPfs[C](f: Vector[PF] ⇒ (Option[Vector[PF]], C)): C = { + private def mapPfs[C](f: Vector[PF] => (Option[Vector[PF]], C)): C = { pfsOption.fold(throw new IllegalStateException("Already built"))(f) match { - case (newPfsOption, result) ⇒ { + case (newPfsOption, result) => { pfsOption = newPfsOption result } @@ -571,10 +571,10 @@ class ActorDocSpec extends AkkaSpec(Map("akka.loglevel" -> "INFO")) { } def +=(pf: PF): Unit = - mapPfs { case pfs ⇒ (Some(pfs :+ pf), ()) } + mapPfs { case pfs => (Some(pfs :+ pf), ()) } def result(): PF = - mapPfs { case pfs ⇒ (None, pfs.foldLeft[PF](Map.empty) { _ orElse _ }) } + mapPfs { case pfs => (None, pfs.foldLeft[PF](Map.empty) { _ orElse _ }) } } trait ComposableActor extends Actor { @@ -584,13 +584,13 @@ class ActorDocSpec extends AkkaSpec(Map("akka.loglevel" -> "INFO")) { trait TheirComposableActor extends ComposableActor { receiveBuilder += { - case "foo" ⇒ sender ! "foo received" + case "foo" => sender ! "foo received" } } class MyComposableActor extends TheirComposableActor { receiveBuilder += { - case "bar" ⇒ sender ! "bar received" + case "bar" => sender ! "bar received" } } //#receive-orElse2 diff --git a/akka-docs/rst/scala/code/docs/actor/FSMDocSpec.scala b/akka-docs/rst/scala/code/docs/actor/FSMDocSpec.scala index 40d5817eac..7cf5c65d58 100644 --- a/akka-docs/rst/scala/code/docs/actor/FSMDocSpec.scala +++ b/akka-docs/rst/scala/code/docs/actor/FSMDocSpec.scala @@ -5,7 +5,7 @@ package docs.actor import language.postfixOps -import akka.testkit.{ AkkaSpec ⇒ MyFavoriteTestFrameWorkPlusAkkaTestKit } +import akka.testkit.{ AkkaSpec => MyFavoriteTestFrameWorkPlusAkkaTestKit } import akka.util.ByteString //#test-code @@ -46,23 +46,23 @@ class FSMDocSpec extends MyFavoriteTestFrameWorkPlusAkkaTestKit { //#when-syntax when(Idle) { - case Event(SetTarget(ref), Uninitialized) ⇒ + case Event(SetTarget(ref), Uninitialized) => stay using Todo(ref, Vector.empty) } //#when-syntax //#transition-elided onTransition { - case Active -> Idle ⇒ + case Active -> Idle => stateData match { - case Todo(ref, queue) ⇒ ref ! Batch(queue) + case Todo(ref, queue) => ref ! Batch(queue) } } //#transition-elided //#when-syntax when(Active, stateTimeout = 1 second) { - case Event(Flush | StateTimeout, t: Todo) ⇒ + case Event(Flush | StateTimeout, t: Todo) => goto(Idle) using t.copy(queue = Vector.empty) } //#when-syntax @@ -70,10 +70,10 @@ class FSMDocSpec extends MyFavoriteTestFrameWorkPlusAkkaTestKit { //#unhandled-elided whenUnhandled { // common code for both states - case Event(Queue(obj), t @ Todo(_, v)) ⇒ + case Event(Queue(obj), t @ Todo(_, v)) => goto(Active) using t.copy(queue = v :+ obj) - case Event(e, s) ⇒ + case Event(e, s) => log.warning("received unhandled request {} in state {}/{}", e, stateName, s) stay } @@ -99,16 +99,16 @@ class FSMDocSpec extends MyFavoriteTestFrameWorkPlusAkkaTestKit { //#modifier-syntax when(SomeState) { - case Event(msg, _) ⇒ + case Event(msg, _) => goto(Processing) using (newData) forMax (5 seconds) replying (WillDo) } //#modifier-syntax //#transition-syntax onTransition { - case Idle -> Active ⇒ setTimer("timeout", Tick, 1 second, true) - case Active -> _ ⇒ cancelTimer("timeout") - case x -> Idle ⇒ log.info("entering Idle from " + x) + case Idle -> Active => setTimer("timeout", Tick, 1 second, true) + case Active -> _ => cancelTimer("timeout") + case x -> Idle => log.info("entering Idle from " + x) } //#transition-syntax @@ -122,7 +122,7 @@ class FSMDocSpec extends MyFavoriteTestFrameWorkPlusAkkaTestKit { //#stop-syntax when(Error) { - case Event("stop", _) ⇒ + case Event("stop", _) => // do cleanup ... stop() } @@ -130,38 +130,38 @@ class FSMDocSpec extends MyFavoriteTestFrameWorkPlusAkkaTestKit { //#transform-syntax when(SomeState)(transform { - case Event(bytes: ByteString, read) ⇒ stay using (read + bytes.length) + case Event(bytes: ByteString, read) => stay using (read + bytes.length) } using { - case s @ FSM.State(state, read, timeout, stopReason, replies) if read > 1000 ⇒ + case s @ FSM.State(state, read, timeout, stopReason, replies) if read > 1000 => goto(Processing) }) //#transform-syntax //#alt-transform-syntax val processingTrigger: PartialFunction[State, State] = { - case s @ FSM.State(state, read, timeout, stopReason, replies) if read > 1000 ⇒ + case s @ FSM.State(state, read, timeout, stopReason, replies) if read > 1000 => goto(Processing) } when(SomeState)(transform { - case Event(bytes: ByteString, read) ⇒ stay using (read + bytes.length) + case Event(bytes: ByteString, read) => stay using (read + bytes.length) } using processingTrigger) //#alt-transform-syntax //#termination-syntax onTermination { - case StopEvent(FSM.Normal, state, data) ⇒ // ... - case StopEvent(FSM.Shutdown, state, data) ⇒ // ... - case StopEvent(FSM.Failure(cause), state, data) ⇒ // ... + case StopEvent(FSM.Normal, state, data) => // ... + case StopEvent(FSM.Shutdown, state, data) => // ... + case StopEvent(FSM.Failure(cause), state, data) => // ... } //#termination-syntax //#unhandled-syntax whenUnhandled { - case Event(x: X, data) ⇒ + case Event(x: X, data) => log.info("Received unhandled event: " + x) stay - case Event(msg, _) ⇒ + case Event(msg, _) => log.warning("Received unknown event: " + msg) goto(Error) } @@ -175,7 +175,7 @@ class FSMDocSpec extends MyFavoriteTestFrameWorkPlusAkkaTestKit { //#body-elided override def logDepth = 12 onTermination { - case StopEvent(FSM.Failure(_), state, data) ⇒ + case StopEvent(FSM.Failure(_), state, data) => val lastEvents = getLog.mkString("\n\t") log.warning("Failure in state " + state + " with data " + data + "\n" + "Events leading up to this point:\n\t" + lastEvents) diff --git a/akka-docs/rst/scala/code/docs/actor/FaultHandlingDocSample.scala b/akka-docs/rst/scala/code/docs/actor/FaultHandlingDocSample.scala index 303331e966..52d96d91ef 100644 --- a/akka-docs/rst/scala/code/docs/actor/FaultHandlingDocSample.scala +++ b/akka-docs/rst/scala/code/docs/actor/FaultHandlingDocSample.scala @@ -49,14 +49,14 @@ class Listener extends Actor with ActorLogging { context.setReceiveTimeout(15 seconds) def receive = { - case Progress(percent) ⇒ + case Progress(percent) => log.info("Current progress: {} %", percent) if (percent >= 100.0) { log.info("That's all, shutting down") context.system.shutdown() } - case ReceiveTimeout ⇒ + case ReceiveTimeout => // No progress within 15 seconds, ServiceUnavailable log.error("Shutting down due to unavailable service") context.system.shutdown() @@ -83,7 +83,7 @@ class Worker extends Actor with ActorLogging { // Stop the CounterService child if it throws ServiceUnavailable override val supervisorStrategy = OneForOneStrategy() { - case _: CounterService.ServiceUnavailable ⇒ Stop + case _: CounterService.ServiceUnavailable => Stop } // The sender of the initial Start message will continuously be notified @@ -94,18 +94,18 @@ class Worker extends Actor with ActorLogging { import context.dispatcher // Use this Actors' Dispatcher as ExecutionContext def receive = LoggingReceive { - case Start if progressListener.isEmpty ⇒ + case Start if progressListener.isEmpty => progressListener = Some(sender) context.system.scheduler.schedule(Duration.Zero, 1 second, self, Do) - case Do ⇒ + case Do => counterService ! Increment(1) counterService ! Increment(1) counterService ! Increment(1) // Send current progress to the initial sender counterService ? GetCurrentCount map { - case CurrentCount(_, count) ⇒ Progress(100.0 * count / totalCount) + case CurrentCount(_, count) => Progress(100.0 * count / totalCount) } pipeTo progressListener.get } } @@ -135,7 +135,7 @@ class CounterService extends Actor { // After 3 restarts within 5 seconds it will be stopped. override val supervisorStrategy = OneForOneStrategy(maxNrOfRetries = 3, withinTimeRange = 5 seconds) { - case _: Storage.StorageException ⇒ Restart + case _: Storage.StorageException => Restart } val key = self.path.name @@ -166,21 +166,21 @@ class CounterService extends Actor { def receive = LoggingReceive { - case Entry(k, v) if k == key && counter == None ⇒ + case Entry(k, v) if k == key && counter == None => // Reply from Storage of the initial value, now we can create the Counter val c = context.actorOf(Props(classOf[Counter], key, v)) counter = Some(c) // Tell the counter to use current storage c ! UseStorage(storage) // and send the buffered backlog to the counter - for ((replyTo, msg) ← backlog) c.tell(msg, sender = replyTo) + for ((replyTo, msg) <- backlog) c.tell(msg, sender = replyTo) backlog = IndexedSeq.empty - case msg @ Increment(n) ⇒ forwardOrPlaceInBacklog(msg) + case msg @ Increment(n) => forwardOrPlaceInBacklog(msg) - case msg @ GetCurrentCount ⇒ forwardOrPlaceInBacklog(msg) + case msg @ GetCurrentCount => forwardOrPlaceInBacklog(msg) - case Terminated(actorRef) if Some(actorRef) == storage ⇒ + case Terminated(actorRef) if Some(actorRef) == storage => // After 3 restarts the storage child is stopped. // We receive Terminated because we watch the child, see initStorage. storage = None @@ -189,7 +189,7 @@ class CounterService extends Actor { // Try to re-establish storage after while context.system.scheduler.scheduleOnce(10 seconds, self, Reconnect) - case Reconnect ⇒ + case Reconnect => // Re-establish storage after the scheduled delay initStorage() } @@ -199,8 +199,8 @@ class CounterService extends Actor { // the counter. Before that we place the messages in a backlog, to be sent // to the counter when it is initialized. counter match { - case Some(c) ⇒ c forward msg - case None ⇒ + case Some(c) => c forward msg + case None => if (backlog.size >= MaxBacklog) throw new ServiceUnavailable( "CounterService not available, lack of initial value") @@ -230,15 +230,15 @@ class Counter(key: String, initialValue: Long) extends Actor { var storage: Option[ActorRef] = None def receive = LoggingReceive { - case UseStorage(s) ⇒ + case UseStorage(s) => storage = s storeCount() - case Increment(n) ⇒ + case Increment(n) => count += n storeCount() - case GetCurrentCount ⇒ + case GetCurrentCount => sender ! CurrentCount(key, count) } @@ -271,8 +271,8 @@ class Storage extends Actor { val db = DummyDB def receive = LoggingReceive { - case Store(Entry(key, count)) ⇒ db.save(key, count) - case Get(key) ⇒ sender ! Entry(key, db.load(key).getOrElse(0L)) + case Store(Entry(key, count)) => db.save(key, count) + case Get(key) => sender ! Entry(key, db.load(key).getOrElse(0L)) } } diff --git a/akka-docs/rst/scala/code/docs/actor/FaultHandlingDocSpec.scala b/akka-docs/rst/scala/code/docs/actor/FaultHandlingDocSpec.scala index 8c9d940ede..a4cc3bdd27 100644 --- a/akka-docs/rst/scala/code/docs/actor/FaultHandlingDocSpec.scala +++ b/akka-docs/rst/scala/code/docs/actor/FaultHandlingDocSpec.scala @@ -26,15 +26,15 @@ object FaultHandlingDocSpec { override val supervisorStrategy = OneForOneStrategy(maxNrOfRetries = 10, withinTimeRange = 1 minute) { - case _: ArithmeticException ⇒ Resume - case _: NullPointerException ⇒ Restart - case _: IllegalArgumentException ⇒ Stop - case _: Exception ⇒ Escalate + case _: ArithmeticException => Resume + case _: NullPointerException => Restart + case _: IllegalArgumentException => Stop + case _: Exception => Escalate } //#strategy def receive = { - case p: Props ⇒ sender ! context.actorOf(p) + case p: Props => sender ! context.actorOf(p) } } //#supervisor @@ -48,15 +48,15 @@ object FaultHandlingDocSpec { override val supervisorStrategy = OneForOneStrategy(maxNrOfRetries = 10, withinTimeRange = 1 minute) { - case _: ArithmeticException ⇒ Resume - case _: NullPointerException ⇒ Restart - case _: IllegalArgumentException ⇒ Stop - case _: Exception ⇒ Escalate + case _: ArithmeticException => Resume + case _: NullPointerException => Restart + case _: IllegalArgumentException => Stop + case _: Exception => Escalate } //#strategy2 def receive = { - case p: Props ⇒ sender ! context.actorOf(p) + case p: Props => sender ! context.actorOf(p) } // override default to kill all children during restart override def preRestart(cause: Throwable, msg: Option[Any]) {} @@ -71,9 +71,9 @@ object FaultHandlingDocSpec { override val supervisorStrategy = OneForOneStrategy(maxNrOfRetries = 10, withinTimeRange = 1 minute) { - case _: ArithmeticException ⇒ Resume - case t ⇒ - super.supervisorStrategy.decider.applyOrElse(t, (_: Any) ⇒ Escalate) + case _: ArithmeticException => Resume + case t => + super.supervisorStrategy.decider.applyOrElse(t, (_: Any) => Escalate) } //#default-strategy-fallback @@ -85,9 +85,9 @@ object FaultHandlingDocSpec { class Child extends Actor { var state = 0 def receive = { - case ex: Exception ⇒ throw ex - case x: Int ⇒ state = x - case "get" ⇒ sender ! state + case ex: Exception => throw ex + case x: Int => state = x + case "get" => sender ! state } } //#child @@ -133,7 +133,7 @@ class FaultHandlingDocSpec extends AkkaSpec with ImplicitSender { //#stop watch(child) // have testActor watch “child” child ! new IllegalArgumentException // break it - expectMsgPF() { case Terminated(`child`) ⇒ () } + expectMsgPF() { case Terminated(`child`) => () } //#stop } EventFilter[Exception]("CRASH", occurrences = 2) intercept { @@ -147,7 +147,7 @@ class FaultHandlingDocSpec extends AkkaSpec with ImplicitSender { child2 ! new Exception("CRASH") // escalate failure expectMsgPF() { - case t @ Terminated(`child2`) if t.existenceConfirmed ⇒ () + case t @ Terminated(`child2`) if t.existenceConfirmed => () } //#escalate-kill //#escalate-restart diff --git a/akka-docs/rst/scala/code/docs/actor/InitializationDocSpec.scala b/akka-docs/rst/scala/code/docs/actor/InitializationDocSpec.scala index 01930e6c23..20af68a6db 100644 --- a/akka-docs/rst/scala/code/docs/actor/InitializationDocSpec.scala +++ b/akka-docs/rst/scala/code/docs/actor/InitializationDocSpec.scala @@ -10,7 +10,7 @@ object InitializationDocSpec { class PreStartInitExample extends Actor { override def receive = { - case _ ⇒ // Ignore + case _ => // Ignore } //#preStartInit @@ -37,14 +37,14 @@ object InitializationDocSpec { var initializeMe: Option[String] = None override def receive = { - case "init" ⇒ + case "init" => initializeMe = Some("Up and running") context.become(initialized, discardOld = true) } def initialized: Receive = { - case "U OK?" ⇒ initializeMe foreach { sender ! _ } + case "U OK?" => initializeMe foreach { sender ! _ } } //#messageInit diff --git a/akka-docs/rst/scala/code/docs/actor/IntroDocSpec.scala b/akka-docs/rst/scala/code/docs/actor/IntroDocSpec.scala deleted file mode 100644 index 42ca19e51d..0000000000 --- a/akka-docs/rst/scala/code/docs/actor/IntroDocSpec.scala +++ /dev/null @@ -1,50 +0,0 @@ -/** - * Copyright (C) 2009-2013 Typesafe Inc. - */ - -package docs.actor - -import akka.testkit.AkkaSpec - -//#hello-world -import akka.actor.Actor -import akka.actor.Props - -class HelloWorld extends Actor { - - override def preStart(): Unit = { - // create the greeter actor - val greeter = context.actorOf(Props[Greeter], "greeter") - // tell it to perform the greeting - greeter ! Greeter.Greet - } - - def receive = { - // when the greeter is done, stop this actor and with it the application - case Greeter.Done ⇒ context.stop(self) - } -} -//#hello-world - -//#greeter -object Greeter { - case object Greet - case object Done -} - -class Greeter extends Actor { - def receive = { - case Greeter.Greet ⇒ - println("Hello World!") - sender ! Greeter.Done - } -} -//#greeter - -class IntroDocSpec extends AkkaSpec { - - "demonstrate HelloWorld" in { - expectTerminated(watch(system.actorOf(Props[HelloWorld]))) - } - -} \ No newline at end of file diff --git a/akka-docs/rst/scala/code/docs/actor/SchedulerDocSpec.scala b/akka-docs/rst/scala/code/docs/actor/SchedulerDocSpec.scala index f4de04aac2..e915ab09c3 100644 --- a/akka-docs/rst/scala/code/docs/actor/SchedulerDocSpec.scala +++ b/akka-docs/rst/scala/code/docs/actor/SchedulerDocSpec.scala @@ -43,7 +43,7 @@ class SchedulerDocSpec extends AkkaSpec(Map("akka.loglevel" -> "INFO")) { val Tick = "tick" class TickActor extends Actor { def receive = { - case Tick ⇒ //Do something + case Tick => //Do something } } val tickActor = system.actorOf(Props(classOf[TickActor], this)) diff --git a/akka-docs/rst/scala/code/docs/actor/TypedActorDocSpec.scala b/akka-docs/rst/scala/code/docs/actor/TypedActorDocSpec.scala index a483f04d95..b6c1ce1901 100644 --- a/akka-docs/rst/scala/code/docs/actor/TypedActorDocSpec.scala +++ b/akka-docs/rst/scala/code/docs/actor/TypedActorDocSpec.scala @@ -12,7 +12,7 @@ import org.scalatest.matchers.MustMatchers import akka.testkit._ //Mr funny man avoids printing to stdout AND keeping docs alright -import java.lang.String.{ valueOf ⇒ println } +import java.lang.String.{ valueOf => println } import akka.actor.ActorRef //#typed-actor-iface @@ -91,7 +91,7 @@ class TypedActorDocSpec extends AkkaSpec(Map("akka.loglevel" -> "INFO")) { //#typed-actor-extension-tools } catch { - case e: Exception ⇒ //dun care + case e: Exception => //dun care } } @@ -160,7 +160,7 @@ class TypedActorDocSpec extends AkkaSpec(Map("akka.loglevel" -> "INFO")) { //Use "childSquarer" as a Squarer //#typed-actor-hierarchy } catch { - case e: Exception ⇒ //ignore + case e: Exception => //ignore } } diff --git a/akka-docs/rst/scala/code/docs/actor/UnnestedReceives.scala b/akka-docs/rst/scala/code/docs/actor/UnnestedReceives.scala index 0ec35fe9ee..5e6df5a5ad 100644 --- a/akka-docs/rst/scala/code/docs/actor/UnnestedReceives.scala +++ b/akka-docs/rst/scala/code/docs/actor/UnnestedReceives.scala @@ -34,16 +34,16 @@ class UnnestedReceives extends Actor { } def receive = { - case 'Replay ⇒ //Our first message should be a 'Replay message, all others are invalid + case 'Replay => //Our first message should be a 'Replay message, all others are invalid allOldMessages() foreach process //Process all old messages/events become { //Switch behavior to look for the GoAhead signal - case 'GoAhead ⇒ //When we get the GoAhead signal we process all our buffered messages/events + case 'GoAhead => //When we get the GoAhead signal we process all our buffered messages/events queue foreach process queue.clear become { //Then we change behaviour to process incoming messages/events as they arrive - case msg ⇒ process(msg) + case msg => process(msg) } - case msg ⇒ //While we haven't gotten the GoAhead signal, buffer all incoming messages + case msg => //While we haven't gotten the GoAhead signal, buffer all incoming messages queue += msg //Here you have full control, you can handle overflow etc } } diff --git a/akka-docs/rst/scala/code/docs/actor/mailbox/DurableMailboxDocSpec.scala b/akka-docs/rst/scala/code/docs/actor/mailbox/DurableMailboxDocSpec.scala index df8782790d..84eeb14a52 100644 --- a/akka-docs/rst/scala/code/docs/actor/mailbox/DurableMailboxDocSpec.scala +++ b/akka-docs/rst/scala/code/docs/actor/mailbox/DurableMailboxDocSpec.scala @@ -17,7 +17,7 @@ import akka.actor.{ Actor, ExtendedActorSystem } class MyActor extends Actor { def receive = { - case x ⇒ + case x => } } @@ -61,8 +61,8 @@ class MyMailboxType(systemSettings: ActorSystem.Settings, config: Config) override def create(owner: Option[ActorRef], system: Option[ActorSystem]): MessageQueue = (owner zip system) headOption match { - case Some((o, s: ExtendedActorSystem)) ⇒ new MyMessageQueue(o, s) - case _ ⇒ + case Some((o, s: ExtendedActorSystem)) => new MyMessageQueue(o, s) + case _ => throw new IllegalArgumentException("requires an owner " + "(i.e. does not work with BalancingDispatcher)") } diff --git a/akka-docs/rst/scala/code/docs/agent/AgentDocSpec.scala b/akka-docs/rst/scala/code/docs/agent/AgentDocSpec.scala index b977f823bd..cfd15f0fe0 100644 --- a/akka-docs/rst/scala/code/docs/agent/AgentDocSpec.scala +++ b/akka-docs/rst/scala/code/docs/agent/AgentDocSpec.scala @@ -58,7 +58,7 @@ class AgentDocSpec extends AkkaSpec { agent send (_ * 2) //#send - def longRunningOrBlockingFunction = (i: Int) ⇒ i * 1 // Just for the example code + def longRunningOrBlockingFunction = (i: Int) => i * 1 // Just for the example code def someExecutionContext() = scala.concurrent.ExecutionContext.Implicits.global // Just for the example code //#send-off // the ExecutionContext you want to run the function on @@ -81,7 +81,7 @@ class AgentDocSpec extends AkkaSpec { val f3: Future[Int] = agent alter (_ * 2) //#alter - def longRunningOrBlockingFunction = (i: Int) ⇒ i * 1 // Just for the example code + def longRunningOrBlockingFunction = (i: Int) => i * 1 // Just for the example code def someExecutionContext() = ExecutionContext.global // Just for the example code //#alter-off @@ -102,7 +102,7 @@ class AgentDocSpec extends AkkaSpec { import scala.concurrent.stm._ def transfer(from: Agent[Int], to: Agent[Int], amount: Int): Boolean = { - atomic { txn ⇒ + atomic { txn => if (from.get < amount) false else { from send (_ - amount) @@ -133,19 +133,19 @@ class AgentDocSpec extends AkkaSpec { val agent2 = Agent(5) // uses foreach - for (value ← agent1) + for (value <- agent1) println(value) // uses map - val agent3 = for (value ← agent1) yield value + 1 + val agent3 = for (value <- agent1) yield value + 1 // or using map directly val agent4 = agent1 map (_ + 1) // uses flatMap val agent5 = for { - value1 ← agent1 - value2 ← agent2 + value1 <- agent1 + value2 <- agent2 } yield value1 + value2 //#monadic-example diff --git a/akka-docs/rst/scala/code/docs/camel/Consumers.scala b/akka-docs/rst/scala/code/docs/camel/Consumers.scala index 81d40ebe22..f48534e29c 100644 --- a/akka-docs/rst/scala/code/docs/camel/Consumers.scala +++ b/akka-docs/rst/scala/code/docs/camel/Consumers.scala @@ -15,7 +15,7 @@ object Consumers { def endpointUri = "file:data/input/actor" def receive = { - case msg: CamelMessage ⇒ println("received %s" format msg.bodyAs[String]) + case msg: CamelMessage => println("received %s" format msg.bodyAs[String]) } } //#Consumer1 @@ -28,7 +28,7 @@ object Consumers { def endpointUri = "jetty:http://localhost:8877/camel/default" def receive = { - case msg: CamelMessage ⇒ sender ! ("Hello %s" format msg.bodyAs[String]) + case msg: CamelMessage => sender ! ("Hello %s" format msg.bodyAs[String]) } } //#Consumer2 @@ -45,7 +45,7 @@ object Consumers { def endpointUri = "jms:queue:test" def receive = { - case msg: CamelMessage ⇒ + case msg: CamelMessage => sender ! Ack // on success // .. @@ -65,7 +65,7 @@ object Consumers { def endpointUri = "jetty:http://localhost:8877/camel/default" override def replyTimeout = 500 millis def receive = { - case msg: CamelMessage ⇒ sender ! ("Hello %s" format msg.bodyAs[String]) + case msg: CamelMessage => sender ! ("Hello %s" format msg.bodyAs[String]) } } //#Consumer4 diff --git a/akka-docs/rst/scala/code/docs/camel/CustomRoute.scala b/akka-docs/rst/scala/code/docs/camel/CustomRoute.scala index 09a608ff55..6c720376f1 100644 --- a/akka-docs/rst/scala/code/docs/camel/CustomRoute.scala +++ b/akka-docs/rst/scala/code/docs/camel/CustomRoute.scala @@ -18,9 +18,9 @@ object CustomRoute { import akka.camel._ class Responder extends Actor { def receive = { - case msg: CamelMessage ⇒ + case msg: CamelMessage => sender ! (msg.mapBody { - body: String ⇒ "received %s" format body + body: String => "received %s" format body }) } } @@ -47,9 +47,9 @@ object CustomRoute { class ErrorThrowingConsumer(override val endpointUri: String) extends Consumer { def receive = { - case msg: CamelMessage ⇒ throw new Exception("error: %s" format msg.body) + case msg: CamelMessage => throw new Exception("error: %s" format msg.body) } - override def onRouteDefinition = (rd) ⇒ rd.onException(classOf[Exception]). + override def onRouteDefinition = (rd) => rd.onException(classOf[Exception]). handled(true).transform(Builder.exceptionMessage).end final override def preRestart(reason: Throwable, message: Option[Any]) { diff --git a/akka-docs/rst/scala/code/docs/camel/CustomRouteExample.scala b/akka-docs/rst/scala/code/docs/camel/CustomRouteExample.scala deleted file mode 100644 index 49cac853c5..0000000000 --- a/akka-docs/rst/scala/code/docs/camel/CustomRouteExample.scala +++ /dev/null @@ -1,53 +0,0 @@ -package docs.camel - -object CustomRouteExample { - { - //#CustomRouteExample - import akka.actor.{ Actor, ActorRef, Props, ActorSystem } - import akka.camel.{ CamelMessage, Consumer, Producer, CamelExtension } - import org.apache.camel.builder.RouteBuilder - import org.apache.camel.{ Exchange, Processor } - - class Consumer3(transformer: ActorRef) extends Actor with Consumer { - def endpointUri = "jetty:http://0.0.0.0:8877/camel/welcome" - - def receive = { - // Forward a string representation of the message body to transformer - case msg: CamelMessage ⇒ transformer.forward(msg.bodyAs[String]) - } - } - - class Transformer(producer: ActorRef) extends Actor { - def receive = { - // example: transform message body "foo" to "- foo -" and forward result - // to producer - case msg: CamelMessage ⇒ - producer.forward(msg.mapBody((body: String) ⇒ "- %s -" format body)) - } - } - - class Producer1 extends Actor with Producer { - def endpointUri = "direct:welcome" - } - - class CustomRouteBuilder extends RouteBuilder { - def configure { - from("direct:welcome").process(new Processor() { - def process(exchange: Exchange) { - // Create a 'welcome' message from the input message - exchange.getOut.setBody("Welcome %s" format exchange.getIn.getBody) - } - }) - } - } - // the below lines can be added to a Boot class, so that you can run the - // example from a MicroKernel - val system = ActorSystem("some-system") - val producer = system.actorOf(Props[Producer1]) - val mediator = system.actorOf(Props(classOf[Transformer], producer)) - val consumer = system.actorOf(Props(classOf[Consumer3], mediator)) - CamelExtension(system).context.addRoutes(new CustomRouteBuilder) - //#CustomRouteExample - } - -} diff --git a/akka-docs/rst/scala/code/docs/camel/HttpExample.scala b/akka-docs/rst/scala/code/docs/camel/HttpExample.scala deleted file mode 100644 index a2fd16944b..0000000000 --- a/akka-docs/rst/scala/code/docs/camel/HttpExample.scala +++ /dev/null @@ -1,52 +0,0 @@ -package docs.camel - -object HttpExample { - - { - //#HttpExample - import org.apache.camel.Exchange - import akka.actor.{ Actor, ActorRef, Props, ActorSystem } - import akka.camel.{ Producer, CamelMessage, Consumer } - import akka.actor.Status.Failure - - class HttpConsumer(producer: ActorRef) extends Consumer { - def endpointUri = "jetty:http://0.0.0.0:8875/" - - def receive = { - case msg ⇒ producer forward msg - } - } - - class HttpProducer(transformer: ActorRef) extends Actor with Producer { - def endpointUri = "jetty://http://akka.io/?bridgeEndpoint=true" - - override def transformOutgoingMessage(msg: Any) = msg match { - case msg: CamelMessage ⇒ msg.copy(headers = msg.headers ++ - msg.headers(Set(Exchange.HTTP_PATH))) - } - - override def routeResponse(msg: Any) { transformer forward msg } - } - - class HttpTransformer extends Actor { - def receive = { - case msg: CamelMessage ⇒ - sender ! (msg.mapBody { body: Array[Byte] ⇒ - new String(body).replaceAll("Akka ", "AKKA ") - }) - case msg: Failure ⇒ sender ! msg - } - } - - // Create the actors. this can be done in a Boot class so you can - // run the example in the MicroKernel. Just add the three lines below - // to your boot class. - val system = ActorSystem("some-system") - val httpTransformer = system.actorOf(Props[HttpTransformer]) - val httpProducer = system.actorOf(Props(classOf[HttpProducer], httpTransformer)) - val httpConsumer = system.actorOf(Props(classOf[HttpConsumer], httpProducer)) - //#HttpExample - - } - -} diff --git a/akka-docs/rst/scala/code/docs/camel/Introduction.scala b/akka-docs/rst/scala/code/docs/camel/Introduction.scala index 14ef99f30f..68918ffcbd 100644 --- a/akka-docs/rst/scala/code/docs/camel/Introduction.scala +++ b/akka-docs/rst/scala/code/docs/camel/Introduction.scala @@ -15,8 +15,8 @@ object Introduction { def endpointUri = "mina2:tcp://localhost:6200?textline=true" def receive = { - case msg: CamelMessage ⇒ { /* ... */ } - case _ ⇒ { /* ... */ } + case msg: CamelMessage => { /* ... */ } + case _ => { /* ... */ } } } @@ -35,8 +35,8 @@ object Introduction { def endpointUri = "jetty:http://localhost:8877/example" def receive = { - case msg: CamelMessage ⇒ { /* ... */ } - case _ ⇒ { /* ... */ } + case msg: CamelMessage => { /* ... */ } + case _ => { /* ... */ } } } //#Consumer @@ -85,8 +85,8 @@ object Introduction { def endpointUri = "mina2:tcp://localhost:6200?textline=true" def receive = { - case msg: CamelMessage ⇒ { /* ... */ } - case _ ⇒ { /* ... */ } + case msg: CamelMessage => { /* ... */ } + case _ => { /* ... */ } } } val system = ActorSystem("some-system") diff --git a/akka-docs/rst/scala/code/docs/camel/Producers.scala b/akka-docs/rst/scala/code/docs/camel/Producers.scala index c9c69a86c0..2269905059 100644 --- a/akka-docs/rst/scala/code/docs/camel/Producers.scala +++ b/akka-docs/rst/scala/code/docs/camel/Producers.scala @@ -33,7 +33,7 @@ object Producers { class ResponseReceiver extends Actor { def receive = { - case msg: CamelMessage ⇒ + case msg: CamelMessage => // do something with the forwarded response } } @@ -61,11 +61,11 @@ object Producers { def endpointUri = uri def upperCase(msg: CamelMessage) = msg.mapBody { - body: String ⇒ body.toUpperCase + body: String => body.toUpperCase } override def transformOutgoingMessage(msg: Any) = msg match { - case msg: CamelMessage ⇒ upperCase(msg) + case msg: CamelMessage => upperCase(msg) } } //#TransformOutgoingMessage @@ -106,7 +106,7 @@ object Producers { import akka.actor.Actor class MyActor extends Actor { def receive = { - case msg ⇒ + case msg => val template = CamelExtension(context.system).template template.sendBody("direct:news", msg) } @@ -118,7 +118,7 @@ object Producers { import akka.actor.Actor class MyActor extends Actor { def receive = { - case msg ⇒ + case msg => val template = CamelExtension(context.system).template sender ! template.requestBody("direct:news", msg) } diff --git a/akka-docs/rst/scala/code/docs/camel/PublishSubscribe.scala b/akka-docs/rst/scala/code/docs/camel/PublishSubscribe.scala index 2263a02277..b786a3d9ce 100644 --- a/akka-docs/rst/scala/code/docs/camel/PublishSubscribe.scala +++ b/akka-docs/rst/scala/code/docs/camel/PublishSubscribe.scala @@ -9,7 +9,7 @@ object PublishSubscribe { def endpointUri = uri def receive = { - case msg: CamelMessage ⇒ println("%s received: %s" format (name, msg.body)) + case msg: CamelMessage => println("%s received: %s" format (name, msg.body)) } } @@ -25,7 +25,7 @@ object PublishSubscribe { def endpointUri = uri def receive = { - case msg: CamelMessage ⇒ { + case msg: CamelMessage => { publisher ! msg.bodyAs[String] sender ! ("message published") } diff --git a/akka-docs/rst/scala/code/docs/camel/QuartzExample.scala b/akka-docs/rst/scala/code/docs/camel/QuartzExample.scala deleted file mode 100644 index f0ad04be57..0000000000 --- a/akka-docs/rst/scala/code/docs/camel/QuartzExample.scala +++ /dev/null @@ -1,30 +0,0 @@ -package docs.camel - -object QuartzExample { - //#Quartz - import akka.actor.{ ActorSystem, Props } - - import akka.camel.{ Consumer } - - class MyQuartzActor extends Consumer { - - def endpointUri = "quartz://example?cron=0/2+*+*+*+*+?" - - def receive = { - - case msg ⇒ println("==============> received %s " format msg) - - } // end receive - - } // end MyQuartzActor - - object MyQuartzActor { - - def main(str: Array[String]) { - val system = ActorSystem("my-quartz-system") - system.actorOf(Props[MyQuartzActor]) - } // end main - - } // end MyQuartzActor - //#Quartz -} diff --git a/akka-docs/rst/scala/code/docs/channels/ChannelDocSpec.scala b/akka-docs/rst/scala/code/docs/channels/ChannelDocSpec.scala index c0e0bede9e..05c2eb5dfc 100644 --- a/akka-docs/rst/scala/code/docs/channels/ChannelDocSpec.scala +++ b/akka-docs/rst/scala/code/docs/channels/ChannelDocSpec.scala @@ -32,7 +32,7 @@ object ChannelDocSpec { class Child extends Actor with Channels[(Stats, Nothing) :+: TNil, (Request, Reply) :+: TNil] { - channel[Request] { (x, snd) ⇒ + channel[Request] { (x, snd) => parentChannel <-!- Stats(x) snd <-!- CommandSuccess } @@ -43,9 +43,9 @@ object ChannelDocSpec { val child = createChild(new Child) - channel[GetChild.type] { (_, snd) ⇒ ChildRef(child) -!-> snd } + channel[GetChild.type] { (_, snd) => ChildRef(child) -!-> snd } - channel[Stats] { (x, _) ⇒ + channel[Stats] { (x, _) => // collect some stats } } @@ -89,10 +89,10 @@ class ChannelDocSpec extends AkkaSpec { "demonstrate channels creation" ignore { //#declaring-channels class AC extends Actor with Channels[TNil, (Request, Reply) :+: TNil] { - channel[Request] { (req, snd) ⇒ + channel[Request] { (req, snd) => req match { - case Command("ping") ⇒ snd <-!- CommandSuccess - case _ ⇒ + case Command("ping") => snd <-!- CommandSuccess + case _ => } } } @@ -100,8 +100,8 @@ class ChannelDocSpec extends AkkaSpec { //#declaring-subchannels class ACSub extends Actor with Channels[TNil, (Request, Reply) :+: TNil] { - channel[Command] { (cmd, snd) ⇒ snd <-!- CommandSuccess } - channel[Request] { (req, snd) ⇒ + channel[Command] { (cmd, snd) => snd <-!- CommandSuccess } + channel[Request] { (req, snd) => if (ThreadLocalRandom.current.nextBoolean) snd <-!- CommandSuccess else snd <-!- CommandFailure("no luck") } @@ -159,17 +159,17 @@ class ChannelDocSpec extends AkkaSpec { //#become channel[Request] { - case (Command("close"), snd) ⇒ - channel[T1] { (t, s) ⇒ t -?-> target -!-> s } + case (Command("close"), snd) => + channel[T1] { (t, s) => t -?-> target -!-> s } snd <-!- CommandSuccess - case (Command("open"), snd) ⇒ - channel[T1] { (_, _) ⇒ } + case (Command("open"), snd) => + channel[T1] { (_, _) => } snd <-!- CommandSuccess } //#become - channel[T1] { (t, snd) ⇒ t -?-> target -!-> snd } + channel[T1] { (t, snd) => t -?-> target -!-> snd } } //#forwarding diff --git a/akka-docs/rst/scala/code/docs/dataflow/DataflowDocSpec.scala b/akka-docs/rst/scala/code/docs/dataflow/DataflowDocSpec.scala index ebec2c54a4..69112a60e4 100644 --- a/akka-docs/rst/scala/code/docs/dataflow/DataflowDocSpec.scala +++ b/akka-docs/rst/scala/code/docs/dataflow/DataflowDocSpec.scala @@ -64,7 +64,7 @@ class DataflowDocSpec extends WordSpec with MustMatchers { //#for-vs-flow val f1, f2 = Future { 1 } - val usingFor = for { v1 ← f1; v2 ← f2 } yield v1 + v2 + val usingFor = for { v1 <- f1; v2 <- f2 } yield v1 + v2 val usingFlow = flow { f1() + f2() } usingFor onComplete println diff --git a/akka-docs/rst/scala/code/docs/dispatcher/DispatcherDocSpec.scala b/akka-docs/rst/scala/code/docs/dispatcher/DispatcherDocSpec.scala index 777ecfa2dc..d48448685b 100644 --- a/akka-docs/rst/scala/code/docs/dispatcher/DispatcherDocSpec.scala +++ b/akka-docs/rst/scala/code/docs/dispatcher/DispatcherDocSpec.scala @@ -200,22 +200,22 @@ object DispatcherDocSpec { // Create a new PriorityGenerator, lower prio means more important PriorityGenerator { // 'highpriority messages should be treated first if possible - case 'highpriority ⇒ 0 + case 'highpriority => 0 // 'lowpriority messages should be treated last if possible - case 'lowpriority ⇒ 2 + case 'lowpriority => 2 // PoisonPill when no other left - case PoisonPill ⇒ 3 + case PoisonPill => 3 // We default to 1, which is in between high and low - case otherwise ⇒ 1 + case otherwise => 1 }) //#prio-mailbox class MyActor extends Actor { def receive = { - case x ⇒ + case x => } } @@ -232,7 +232,7 @@ object DispatcherDocSpec { with RequiresMessageQueue[MyUnboundedMessageQueueSemantics] { //#require-mailbox-on-actor def receive = { - case _ ⇒ + case _ => } //#require-mailbox-on-actor // ... @@ -319,7 +319,7 @@ class DispatcherDocSpec extends AkkaSpec(DispatcherDocSpec.config) { self ! PoisonPill def receive = { - case x ⇒ log.info(x.toString) + case x => log.info(x.toString) } } val a = system.actorOf(Props(classOf[Logger], this).withDispatcher( @@ -338,7 +338,7 @@ class DispatcherDocSpec extends AkkaSpec(DispatcherDocSpec.config) { //#prio-dispatcher watch(a) - expectMsgPF() { case Terminated(`a`) ⇒ () } + expectMsgPF() { case Terminated(`a`) => () } } } diff --git a/akka-docs/rst/scala/code/docs/event/LoggingDocSpec.scala b/akka-docs/rst/scala/code/docs/event/LoggingDocSpec.scala index cb4b37101e..bd74123fa7 100644 --- a/akka-docs/rst/scala/code/docs/event/LoggingDocSpec.scala +++ b/akka-docs/rst/scala/code/docs/event/LoggingDocSpec.scala @@ -22,8 +22,8 @@ object LoggingDocSpec { reason.getMessage, message.getOrElse("")) } def receive = { - case "test" ⇒ log.info("Received test") - case x ⇒ log.warning("Received unknown message: {}", x) + case "test" => log.info("Received test") + case x => log.warning("Received unknown message: {}", x) } } //#my-actor @@ -34,7 +34,7 @@ object LoggingDocSpec { val log = Logging(this) def receive = { - case _ ⇒ { + case _ => { //#mdc val mdc = Map("requestId" -> 1234, "visitorId" -> 5678) log.mdc(mdc) @@ -60,14 +60,14 @@ object LoggingDocSpec { reqId += 1 val always = Map("requestId" -> reqId) val perMessage = currentMessage match { - case r: Req ⇒ Map("visitorId" -> r.visitorId) - case _ ⇒ Map() + case r: Req => Map("visitorId" -> r.visitorId) + case _ => Map() } always ++ perMessage } def receive: Receive = { - case r: Req ⇒ { + case r: Req => { log.info(s"Starting new request: ${r.work}") } } @@ -85,11 +85,11 @@ object LoggingDocSpec { class MyEventListener extends Actor { def receive = { - case InitializeLogger(_) ⇒ sender ! LoggerInitialized - case Error(cause, logSource, logClass, message) ⇒ // ... - case Warning(logSource, logClass, message) ⇒ // ... - case Info(logSource, logClass, message) ⇒ // ... - case Debug(logSource, logClass, message) ⇒ // ... + case InitializeLogger(_) => sender ! LoggerInitialized + case Error(cause, logSource, logClass, message) => // ... + case Warning(logSource, logClass, message) => // ... + case Info(logSource, logClass, message) => // ... + case Debug(logSource, logClass, message) => // ... } } //#my-event-listener @@ -140,7 +140,7 @@ class LoggingDocSpec extends AkkaSpec { class Listener extends Actor { def receive = { - case d: DeadLetter ⇒ println(d) + case d: DeadLetter => println(d) } } val listener = system.actorOf(Props(classOf[Listener], this)) diff --git a/akka-docs/rst/scala/code/docs/extension/ExtensionDocSpec.scala b/akka-docs/rst/scala/code/docs/extension/ExtensionDocSpec.scala index 5fe6fb4e98..4f186d0f61 100644 --- a/akka-docs/rst/scala/code/docs/extension/ExtensionDocSpec.scala +++ b/akka-docs/rst/scala/code/docs/extension/ExtensionDocSpec.scala @@ -60,7 +60,7 @@ object ExtensionDocSpec { class MyActor extends Actor { def receive = { - case someMessage ⇒ + case someMessage => CountExtension(context.system).increment() } } @@ -68,12 +68,12 @@ object ExtensionDocSpec { //#extension-usage-actor-trait - trait Counting { self: Actor ⇒ + trait Counting { self: Actor => def increment() = CountExtension(context.system).increment() } class MyCounterActor extends Actor with Counting { def receive = { - case someMessage ⇒ increment() + case someMessage => increment() } } //#extension-usage-actor-trait diff --git a/akka-docs/rst/scala/code/docs/extension/SettingsExtensionDocSpec.scala b/akka-docs/rst/scala/code/docs/extension/SettingsExtensionDocSpec.scala index cbc6298350..5008027a29 100644 --- a/akka-docs/rst/scala/code/docs/extension/SettingsExtensionDocSpec.scala +++ b/akka-docs/rst/scala/code/docs/extension/SettingsExtensionDocSpec.scala @@ -65,7 +65,7 @@ object SettingsExtensionDocSpec { //#extension-usage-actor def receive = { - case someMessage ⇒ + case someMessage => } def connect(dbUri: String, circuitBreakerTimeout: Duration) = { diff --git a/akka-docs/rst/scala/code/docs/future/FutureDocSpec.scala b/akka-docs/rst/scala/code/docs/future/FutureDocSpec.scala index 485eb3d4ff..ed72e1a2d8 100644 --- a/akka-docs/rst/scala/code/docs/future/FutureDocSpec.scala +++ b/akka-docs/rst/scala/code/docs/future/FutureDocSpec.scala @@ -18,9 +18,9 @@ object FutureDocSpec { class MyActor extends Actor { def receive = { - case x: String ⇒ sender ! x.toUpperCase - case x: Int if x < 0 ⇒ sender ! Status.Failure(new ArithmeticException("Negative values not supported")) - case x: Int ⇒ sender ! x + case x: String => sender ! x.toUpperCase + case x: Int if x < 0 => sender ! Status.Failure(new ArithmeticException("Negative values not supported")) + case x: Int => sender ! x } } @@ -29,7 +29,7 @@ object FutureDocSpec { class OddActor extends Actor { var n = 1 def receive = { - case GetNext ⇒ + case GetNext => sender ! n n += 2 } @@ -40,7 +40,7 @@ class FutureDocSpec extends AkkaSpec { import FutureDocSpec._ import system.dispatcher - val println: PartialFunction[Any, Unit] = { case _ ⇒ } + val println: PartialFunction[Any, Unit] = { case _ => } "demonstrate usage custom ExecutionContext" in { val yourExecutorServiceGoesHere = java.util.concurrent.Executors.newSingleThreadExecutor() @@ -112,7 +112,7 @@ class FutureDocSpec extends AkkaSpec { val f1 = Future { "Hello" + "World" } - val f2 = f1 map { x ⇒ + val f2 = f1 map { x => x.length } f2 foreach println @@ -128,8 +128,8 @@ class FutureDocSpec extends AkkaSpec { "Hello" + "World" } val f2 = Future.successful(3) - val f3 = f1 map { x ⇒ - f2 map { y ⇒ + val f3 = f1 map { x => + f2 map { y => x.length * y } } @@ -144,8 +144,8 @@ class FutureDocSpec extends AkkaSpec { "Hello" + "World" } val f2 = Future.successful(3) - val f3 = f1 flatMap { x ⇒ - f2 map { y ⇒ + val f3 = f1 flatMap { x => + f2 map { y => x.length * y } } @@ -164,7 +164,7 @@ class FutureDocSpec extends AkkaSpec { val failedFilter = future1.filter(_ % 2 == 1).recover { // When filter fails, it will have a java.util.NoSuchElementException - case m: NoSuchElementException ⇒ 0 + case m: NoSuchElementException => 0 } failedFilter foreach println @@ -178,9 +178,9 @@ class FutureDocSpec extends AkkaSpec { "demonstrate usage of for comprehension" in { //#for-comprehension val f = for { - a ← Future(10 / 2) // 10 / 2 = 5 - b ← Future(a + 1) // 5 + 1 = 6 - c ← Future(a - 1) // 5 - 1 = 4 + a <- Future(10 / 2) // 10 / 2 = 5 + b <- Future(a + 1) // 5 + 1 = 6 + c <- Future(a - 1) // 5 - 1 = 4 if c > 3 // Future.filter } yield b * c // 6 * 4 = 24 @@ -232,9 +232,9 @@ class FutureDocSpec extends AkkaSpec { val f2 = ask(actor2, msg2) val f3 = for { - a ← f1.mapTo[Int] - b ← f2.mapTo[Int] - c ← ask(actor3, (a + b)).mapTo[Int] + a <- f1.mapTo[Int] + b <- f2.mapTo[Int] + c <- ask(actor3, (a + b)).mapTo[Int] } yield c f3 foreach println @@ -262,7 +262,7 @@ class FutureDocSpec extends AkkaSpec { "demonstrate usage of sequence" in { //#sequence - val futureList = Future.sequence((1 to 100).toList.map(x ⇒ Future(x * 2 - 1))) + val futureList = Future.sequence((1 to 100).toList.map(x => Future(x * 2 - 1))) val oddSum = futureList.map(_.sum) oddSum foreach println //#sequence @@ -271,7 +271,7 @@ class FutureDocSpec extends AkkaSpec { "demonstrate usage of traverse" in { //#traverse - val futureList = Future.traverse((1 to 100).toList)(x ⇒ Future(x * 2 - 1)) + val futureList = Future.traverse((1 to 100).toList)(x => Future(x * 2 - 1)) val oddSum = futureList.map(_.sum) oddSum foreach println //#traverse @@ -281,7 +281,7 @@ class FutureDocSpec extends AkkaSpec { "demonstrate usage of fold" in { //#fold // Create a sequence of Futures - val futures = for (i ← 1 to 1000) yield Future(i * 2) + val futures = for (i <- 1 to 1000) yield Future(i * 2) val futureSum = Future.fold(futures)(0)(_ + _) futureSum foreach println //#fold @@ -291,7 +291,7 @@ class FutureDocSpec extends AkkaSpec { "demonstrate usage of reduce" in { //#reduce // Create a sequence of Futures - val futures = for (i ← 1 to 1000) yield Future(i * 2) + val futures = for (i <- 1 to 1000) yield Future(i * 2) val futureSum = Future.reduce(futures)(_ + _) futureSum foreach println //#reduce @@ -304,7 +304,7 @@ class FutureDocSpec extends AkkaSpec { val msg1 = -1 //#recover val future = akka.pattern.ask(actor, msg1) recover { - case e: ArithmeticException ⇒ 0 + case e: ArithmeticException => 0 } future foreach println //#recover @@ -317,8 +317,8 @@ class FutureDocSpec extends AkkaSpec { val msg1 = -1 //#try-recover val future = akka.pattern.ask(actor, msg1) recoverWith { - case e: ArithmeticException ⇒ Future.successful(0) - case foo: IllegalArgumentException ⇒ + case e: ArithmeticException => Future.successful(0) + case foo: IllegalArgumentException => Future.failed[Int](new IllegalStateException("All br0ken!")) } future foreach println @@ -330,7 +330,7 @@ class FutureDocSpec extends AkkaSpec { val future1 = Future { "foo" } val future2 = Future { "bar" } //#zip - val future3 = future1 zip future2 map { case (a, b) ⇒ a + " " + b } + val future3 = future1 zip future2 map { case (a, b) => a + " " + b } future3 foreach println //#zip Await.result(future3, 3 seconds) must be("foo bar") @@ -343,9 +343,9 @@ class FutureDocSpec extends AkkaSpec { def watchSomeTV(): Unit = () //#and-then val result = Future { loadPage(url) } andThen { - case Failure(exception) ⇒ log(exception) + case Failure(exception) => log(exception) } andThen { - case _ ⇒ watchSomeTV() + case _ => watchSomeTV() } result foreach println //#and-then @@ -368,8 +368,8 @@ class FutureDocSpec extends AkkaSpec { val future = Future { "foo" } //#onSuccess future onSuccess { - case "bar" ⇒ println("Got my bar alright!") - case x: String ⇒ println("Got some random string: " + x) + case "bar" => println("Got my bar alright!") + case x: String => println("Got some random string: " + x) } //#onSuccess Await.result(future, 3 seconds) must be("foo") @@ -378,9 +378,9 @@ class FutureDocSpec extends AkkaSpec { val future = Future.failed[String](new IllegalStateException("OHNOES")) //#onFailure future onFailure { - case ise: IllegalStateException if ise.getMessage == "OHNOES" ⇒ + case ise: IllegalStateException if ise.getMessage == "OHNOES" => //OHNOES! We are in deep trouble, do something! - case e: Exception ⇒ + case e: Exception => //Do something else } //#onFailure @@ -391,8 +391,8 @@ class FutureDocSpec extends AkkaSpec { def doSomethingOnFailure(t: Throwable) = () //#onComplete future onComplete { - case Success(result) ⇒ doSomethingOnSuccess(result) - case Failure(failure) ⇒ doSomethingOnFailure(failure) + case Success(result) => doSomethingOnSuccess(result) + case Failure(failure) => doSomethingOnFailure(failure) } //#onComplete Await.result(future, 3 seconds) must be("foo") @@ -436,7 +436,7 @@ class FutureDocSpec extends AkkaSpec { val f = Future("hello") def receive = { //#receive-omitted - case _ ⇒ + case _ => //#receive-omitted } } diff --git a/akka-docs/rst/scala/code/docs/io/EchoServer.scala b/akka-docs/rst/scala/code/docs/io/EchoServer.scala index 1b50a7756c..54082c4f26 100644 --- a/akka-docs/rst/scala/code/docs/io/EchoServer.scala +++ b/akka-docs/rst/scala/code/docs/io/EchoServer.scala @@ -53,15 +53,15 @@ class EchoManager(handlerClass: Class[_]) extends Actor with ActorLogging { override def postRestart(thr: Throwable): Unit = context stop self def receive = { - case Bound(localAddress) ⇒ + case Bound(localAddress) => log.info("listening on port {}", localAddress.getPort) - case CommandFailed(Bind(_, local, _, _)) ⇒ + case CommandFailed(Bind(_, local, _, _)) => log.warning(s"cannot bind to [$local]") context stop self //#echo-manager - case Connected(remote, local) ⇒ + case Connected(remote, local) => log.info("received connection from {}", remote) val handler = context.actorOf(Props(handlerClass, sender, remote)) sender ! Register(handler, keepOpenOnPeerClosed = true) @@ -91,18 +91,18 @@ class EchoHandler(connection: ActorRef, remote: InetSocketAddress) //#writing def writing: Receive = { - case Received(data) ⇒ + case Received(data) => connection ! Write(data, Ack(currentOffset)) buffer(data) - case Ack(ack) ⇒ + case Ack(ack) => acknowledge(ack) - case CommandFailed(Write(_, Ack(ack))) ⇒ + case CommandFailed(Write(_, Ack(ack))) => connection ! ResumeWriting context become buffering(ack) - case PeerClosed ⇒ + case PeerClosed => if (storage.isEmpty) context stop self else context become closing } @@ -114,11 +114,11 @@ class EchoHandler(connection: ActorRef, remote: InetSocketAddress) var peerClosed = false { - case Received(data) ⇒ buffer(data) - case WritingResumed ⇒ writeFirst() - case PeerClosed ⇒ peerClosed = true - case Ack(ack) if ack < nack ⇒ acknowledge(ack) - case Ack(ack) ⇒ + case Received(data) => buffer(data) + case WritingResumed => writeFirst() + case PeerClosed => peerClosed = true + case Ack(ack) if ack < nack => acknowledge(ack) + case Ack(ack) => acknowledge(ack) if (storage.nonEmpty) { if (toAck > 0) { @@ -138,19 +138,19 @@ class EchoHandler(connection: ActorRef, remote: InetSocketAddress) //#closing def closing: Receive = { - case CommandFailed(_: Write) ⇒ + case CommandFailed(_: Write) => connection ! ResumeWriting context.become({ - case WritingResumed ⇒ + case WritingResumed => writeAll() context.unbecome() - case ack: Int ⇒ acknowledge(ack) + case ack: Int => acknowledge(ack) }, discardOld = false) - case Ack(ack) ⇒ + case Ack(ack) => acknowledge(ack) if (storage.isEmpty) context stop self } @@ -213,7 +213,7 @@ class EchoHandler(connection: ActorRef, remote: InetSocketAddress) } private def writeAll(): Unit = { - for ((data, i) ← storage.zipWithIndex) { + for ((data, i) <- storage.zipWithIndex) { connection ! Write(data, Ack(storageOffset + i)) } } @@ -234,17 +234,17 @@ class SimpleEchoHandler(connection: ActorRef, remote: InetSocketAddress) case object Ack extends Event def receive = { - case Received(data) ⇒ + case Received(data) => buffer(data) connection ! Write(data, Ack) context.become({ - case Received(data) ⇒ buffer(data) - case Ack ⇒ acknowledge() - case PeerClosed ⇒ closing = true + case Received(data) => buffer(data) + case Ack => acknowledge() + case PeerClosed => closing = true }, discardOld = false) - case PeerClosed ⇒ context stop self + case PeerClosed => context stop self } //#storage-omitted diff --git a/akka-docs/rst/scala/code/docs/io/IODocSpec.scala b/akka-docs/rst/scala/code/docs/io/IODocSpec.scala index 0a4a84771e..2f5a81398c 100644 --- a/akka-docs/rst/scala/code/docs/io/IODocSpec.scala +++ b/akka-docs/rst/scala/code/docs/io/IODocSpec.scala @@ -34,14 +34,14 @@ class Server extends Actor { IO(Tcp) ! Bind(self, new InetSocketAddress("localhost", 0)) def receive = { - case b @ Bound(localAddress) ⇒ + case b @ Bound(localAddress) => //#do-some-logging-or-setup context.parent ! b //#do-some-logging-or-setup - case CommandFailed(_: Bind) ⇒ context stop self + case CommandFailed(_: Bind) => context stop self - case c @ Connected(remote, local) ⇒ + case c @ Connected(remote, local) => //#server context.parent ! c //#server @@ -57,8 +57,8 @@ class Server extends Actor { class SimplisticHandler extends Actor { import Tcp._ def receive = { - case Received(data) ⇒ sender ! Write(data) - case PeerClosed ⇒ context stop self + case Received(data) => sender ! Write(data) + case PeerClosed => context stop self } } //#simplistic-handler @@ -77,20 +77,20 @@ class Client(remote: InetSocketAddress, listener: ActorRef) extends Actor { IO(Tcp) ! Connect(remote) def receive = { - case CommandFailed(_: Connect) ⇒ + case CommandFailed(_: Connect) => listener ! "failed" context stop self - case c @ Connected(remote, local) ⇒ + case c @ Connected(remote, local) => listener ! c val connection = sender connection ! Register(self) context become { - case data: ByteString ⇒ connection ! Write(data) - case CommandFailed(w: Write) ⇒ // O/S buffer was full - case Received(data) ⇒ listener ! data - case "close" ⇒ connection ! Close - case _: ConnectionClosed ⇒ context stop self + case data: ByteString => connection ! Write(data) + case CommandFailed(w: Write) => // O/S buffer was full + case Received(data) => listener ! data + case "close" => connection ! Close + case _: ConnectionClosed => context stop self } } } @@ -101,7 +101,7 @@ class IODocSpec extends AkkaSpec { class Parent extends Actor { context.actorOf(Props[Server], "server") def receive = { - case msg ⇒ testActor forward msg + case msg => testActor forward msg } } diff --git a/akka-docs/rst/scala/code/docs/io/Pipelines.scala b/akka-docs/rst/scala/code/docs/io/Pipelines.scala index 3c42b83ba7..1f830cf25b 100644 --- a/akka-docs/rst/scala/code/docs/io/Pipelines.scala +++ b/akka-docs/rst/scala/code/docs/io/Pipelines.scala @@ -45,12 +45,12 @@ class PipelinesDocSpec extends AkkaSpec { builder ++= bs } - override val commandPipeline = { msg: Message ⇒ + override val commandPipeline = { msg: Message => val bs = ByteString.newBuilder // first store the persons bs putInt msg.persons.size - msg.persons foreach { p ⇒ + msg.persons foreach { p => putString(bs, p.first) putString(bs, p.last) } @@ -72,12 +72,12 @@ class PipelinesDocSpec extends AkkaSpec { ByteString(bytes).utf8String } - override val eventPipeline = { bs: ByteString ⇒ + override val eventPipeline = { bs: ByteString => val iter = bs.iterator val personLength = iter.getInt val persons = - (1 to personLength) map (_ ⇒ Person(getString(iter), getString(iter))) + (1 to personLength) map (_ => Person(getString(iter), getString(iter))) val curveLength = iter.getInt val curve = new Array[Double](curveLength) @@ -94,10 +94,10 @@ class PipelinesDocSpec extends AkkaSpec { var lastTick = Duration.Zero override val managementPort: Mgmt = { - case TickGenerator.Tick(timestamp) ⇒ + case TickGenerator.Tick(timestamp) => //#omitted testActor ! TickGenerator.Tick(timestamp) - import java.lang.String.{ valueOf ⇒ println } + import java.lang.String.{ valueOf => println } //#omitted println(s"time since last tick: ${timestamp - lastTick}") lastTick = timestamp @@ -207,20 +207,20 @@ class PipelinesDocSpec extends AkkaSpec { new LengthFieldFrame(10000) // )( // failure in the pipeline will fail this actor - cmd ⇒ cmds ! cmd.get, - evt ⇒ evts ! evt.get) + cmd => cmds ! cmd.get, + evt => evts ! evt.get) def receive = { - case m: Message ⇒ pipeline.injectCommand(m) - case b: ByteString ⇒ pipeline.injectEvent(b) - case t: TickGenerator.Trigger ⇒ pipeline.managementCommand(t) + case m: Message => pipeline.injectCommand(m) + case b: ByteString => pipeline.injectEvent(b) + case t: TickGenerator.Trigger => pipeline.managementCommand(t) } } //#actor class P(cmds: ActorRef, evts: ActorRef) extends Processor(cmds, evts) { override def receive = ({ - case "fail!" ⇒ throw new RuntimeException("FAIL!") + case "fail!" => throw new RuntimeException("FAIL!") }: Receive) orElse super.receive } diff --git a/akka-docs/rst/scala/code/docs/io/UdpDocSpec.scala b/akka-docs/rst/scala/code/docs/io/UdpDocSpec.scala index ac0ee7d3d9..efe8c3d122 100644 --- a/akka-docs/rst/scala/code/docs/io/UdpDocSpec.scala +++ b/akka-docs/rst/scala/code/docs/io/UdpDocSpec.scala @@ -21,7 +21,7 @@ object ScalaUdpDocSpec { IO(Udp) ! Udp.SimpleSender def receive = { - case Udp.SimpleSenderReady ⇒ + case Udp.SimpleSenderReady => context.become(ready(sender)) //#sender sender ! Udp.Send(ByteString("hello"), remote) @@ -29,7 +29,7 @@ object ScalaUdpDocSpec { } def ready(send: ActorRef): Receive = { - case msg: String ⇒ + case msg: String => send ! Udp.Send(ByteString(msg), remote) //#sender if (msg == "world") send ! PoisonPill @@ -44,7 +44,7 @@ object ScalaUdpDocSpec { IO(Udp) ! Udp.Bind(self, new InetSocketAddress("localhost", 0)) def receive = { - case Udp.Bound(local) ⇒ + case Udp.Bound(local) => //#listener nextActor forward local //#listener @@ -52,15 +52,15 @@ object ScalaUdpDocSpec { } def ready(socket: ActorRef): Receive = { - case Udp.Received(data, remote) ⇒ + case Udp.Received(data, remote) => val processed = // parse data etc., e.g. using PipelineStage //#listener data.utf8String //#listener socket ! Udp.Send(data, remote) // example server echoes back nextActor ! processed - case Udp.Unbind ⇒ socket ! Udp.Unbind - case Udp.Unbound ⇒ context.stop(self) + case Udp.Unbind => socket ! Udp.Unbind + case Udp.Unbound => context.stop(self) } } //#listener @@ -71,7 +71,7 @@ object ScalaUdpDocSpec { IO(UdpConnected) ! UdpConnected.Connect(self, remote) def receive = { - case UdpConnected.Connected ⇒ + case UdpConnected.Connected => context.become(ready(sender)) //#connected sender ! UdpConnected.Send(ByteString("hello")) @@ -79,16 +79,16 @@ object ScalaUdpDocSpec { } def ready(connection: ActorRef): Receive = { - case UdpConnected.Received(data) ⇒ + case UdpConnected.Received(data) => // process data, send it on, etc. //#connected if (data.utf8String == "hello") connection ! UdpConnected.Send(ByteString("world")) //#connected - case msg: String ⇒ + case msg: String => connection ! UdpConnected.Send(ByteString(msg)) - case d @ UdpConnected.Disconnect ⇒ connection ! d - case UdpConnected.Disconnected ⇒ context.stop(self) + case d @ UdpConnected.Disconnect => connection ! d + case UdpConnected.Disconnected => context.stop(self) } } //#connected diff --git a/akka-docs/rst/scala/code/docs/pattern/SchedulerPatternSpec.scala b/akka-docs/rst/scala/code/docs/pattern/SchedulerPatternSpec.scala index 433a98cbd0..c63375e3e6 100644 --- a/akka-docs/rst/scala/code/docs/pattern/SchedulerPatternSpec.scala +++ b/akka-docs/rst/scala/code/docs/pattern/SchedulerPatternSpec.scala @@ -26,11 +26,11 @@ object SchedulerPatternSpec { override def postStop() = tick.cancel() def receive = { - case "tick" ⇒ + case "tick" => // do something useful here //#schedule-constructor target ! "tick" - case "restart" ⇒ + case "restart" => throw new ArithmeticException //#schedule-constructor } @@ -53,13 +53,13 @@ object SchedulerPatternSpec { override def postRestart(reason: Throwable) = {} def receive = { - case "tick" ⇒ + case "tick" => // send another periodic tick after the specified delay system.scheduler.scheduleOnce(1000 millis, self, "tick") // do something useful here //#schedule-receive target ! "tick" - case "restart" ⇒ + case "restart" => throw new ArithmeticException //#schedule-receive } diff --git a/akka-docs/rst/scala/code/docs/persistence/PersistenceDocSpec.scala b/akka-docs/rst/scala/code/docs/persistence/PersistenceDocSpec.scala index d532061181..2854db0e46 100644 --- a/akka-docs/rst/scala/code/docs/persistence/PersistenceDocSpec.scala +++ b/akka-docs/rst/scala/code/docs/persistence/PersistenceDocSpec.scala @@ -21,11 +21,11 @@ trait PersistenceDocSpec { class MyProcessor extends Processor { def receive = { - case Persistent(payload, sequenceNr) ⇒ + case Persistent(payload, sequenceNr) => // message successfully written to journal - case PersistenceFailure(payload, sequenceNr, cause) ⇒ + case PersistenceFailure(payload, sequenceNr, cause) => // message failed to be written to journal - case other ⇒ + case other => // message not written to journal } } @@ -67,8 +67,8 @@ trait PersistenceDocSpec { //#deletion override def preRestart(reason: Throwable, message: Option[Any]) { message match { - case Some(p: Persistent) ⇒ deleteMessage(p.sequenceNr) - case _ ⇒ + case Some(p: Persistent) => deleteMessage(p.sequenceNr) + case _ => } super.preRestart(reason, message) } @@ -94,7 +94,7 @@ trait PersistenceDocSpec { override def processorId = "my-stable-processor-id" //#processor-id-override def receive = { - case _ ⇒ + case _ => } } } @@ -109,14 +109,14 @@ trait PersistenceDocSpec { val channel = context.actorOf(Channel.props(), name = "myChannel") def receive = { - case p @ Persistent(payload, _) ⇒ + case p @ Persistent(payload, _) => channel ! Deliver(p.withPayload(s"processed ${payload}"), destination) } } class MyDestination extends Actor { def receive = { - case p @ ConfirmablePersistent(payload, sequenceNr, redeliveries) ⇒ + case p @ ConfirmablePersistent(payload, sequenceNr, redeliveries) => // ... p.confirm() } @@ -139,7 +139,7 @@ trait PersistenceDocSpec { //#channel-custom-settings def receive = { - case p @ Persistent(payload, _) ⇒ + case p @ Persistent(payload, _) => //#channel-example-reply channel ! Deliver(p.withPayload(s"processed ${payload}"), sender) //#channel-example-reply @@ -155,7 +155,7 @@ trait PersistenceDocSpec { class MyProcessor3 extends Processor { def receive = { //#payload-pattern-matching - case Persistent(payload, _) ⇒ + case Persistent(payload, _) => //#payload-pattern-matching } } @@ -163,7 +163,7 @@ trait PersistenceDocSpec { class MyProcessor4 extends Processor { def receive = { //#sequence-nr-pattern-matching - case Persistent(_, sequenceNr) ⇒ + case Persistent(_, sequenceNr) => //#sequence-nr-pattern-matching } } @@ -178,12 +178,12 @@ trait PersistenceDocSpec { startWith("closed", 0) when("closed") { - case Event(Persistent("open", _), counter) ⇒ + case Event(Persistent("open", _), counter) => goto("open") using (counter + 1) replying (counter) } when("open") { - case Event(Persistent("close", _), counter) ⇒ + case Event(Persistent("close", _), counter) => goto("closed") using (counter + 1) replying (counter) } } @@ -196,9 +196,9 @@ trait PersistenceDocSpec { var state: Any = _ def receive = { - case "snap" ⇒ saveSnapshot(state) - case SaveSnapshotSuccess(metadata) ⇒ // ... - case SaveSnapshotFailure(metadata, reason) ⇒ // ... + case "snap" => saveSnapshot(state) + case SaveSnapshotSuccess(metadata) => // ... + case SaveSnapshotFailure(metadata, reason) => // ... } } //#save-snapshot @@ -210,8 +210,8 @@ trait PersistenceDocSpec { var state: Any = _ def receive = { - case SnapshotOffer(metadata, offeredSnapshot) ⇒ state = offeredSnapshot - case Persistent(payload, sequenceNr) ⇒ // ... + case SnapshotOffer(metadata, offeredSnapshot) => state = offeredSnapshot + case Persistent(payload, sequenceNr) => // ... } } //#snapshot-offer @@ -232,8 +232,8 @@ trait PersistenceDocSpec { //#batch-write class MyProcessor extends Processor { def receive = { - case Persistent("a", _) ⇒ // ... - case Persistent("b", _) ⇒ // ... + case Persistent("a", _) => // ... + case Persistent("b", _) => // ... } } @@ -278,11 +278,11 @@ trait PersistenceDocSpec { } def receiveReplay: Receive = { - case event: String ⇒ handleEvent(event) + case event: String => handleEvent(event) } def receiveCommand: Receive = { - case "cmd" ⇒ { + case "cmd" => { // ... persist("evt")(handleEvent) } diff --git a/akka-docs/rst/scala/code/docs/persistence/PersistencePluginDocSpec.scala b/akka-docs/rst/scala/code/docs/persistence/PersistencePluginDocSpec.scala index 86fba92691..f751294844 100644 --- a/akka-docs/rst/scala/code/docs/persistence/PersistencePluginDocSpec.scala +++ b/akka-docs/rst/scala/code/docs/persistence/PersistencePluginDocSpec.scala @@ -98,7 +98,7 @@ object SharedLeveldbPluginDocSpec { } def receive = { - case ActorIdentity(1, Some(store)) ⇒ + case ActorIdentity(1, Some(store)) => SharedLeveldbJournal.setStore(store, context.system) } } @@ -122,7 +122,7 @@ class MyJournal extends AsyncWriteJournal { def writeAsync(persistentBatch: Seq[PersistentRepr]): Future[Unit] = ??? def deleteAsync(processorId: String, fromSequenceNr: Long, toSequenceNr: Long, permanent: Boolean): Future[Unit] = ??? def confirmAsync(processorId: String, sequenceNr: Long, channelId: String): Future[Unit] = ??? - def replayAsync(processorId: String, fromSequenceNr: Long, toSequenceNr: Long)(replayCallback: (PersistentRepr) ⇒ Unit): Future[Long] = ??? + def replayAsync(processorId: String, fromSequenceNr: Long, toSequenceNr: Long)(replayCallback: (PersistentRepr) => Unit): Future[Long] = ??? } class MySnapshotStore extends SnapshotStore { diff --git a/akka-docs/rst/scala/code/docs/remoting/RemoteDeploymentDocSpec.scala b/akka-docs/rst/scala/code/docs/remoting/RemoteDeploymentDocSpec.scala index 25a1e4a5b3..b636380835 100644 --- a/akka-docs/rst/scala/code/docs/remoting/RemoteDeploymentDocSpec.scala +++ b/akka-docs/rst/scala/code/docs/remoting/RemoteDeploymentDocSpec.scala @@ -13,7 +13,7 @@ import akka.remote.RemoteScope object RemoteDeploymentDocSpec { class SampleActor extends Actor { - def receive = { case _ ⇒ sender ! self } + def receive = { case _ => sender ! self } } } diff --git a/akka-docs/rst/scala/code/docs/routing/ConsistentHashingRouterDocSpec.scala b/akka-docs/rst/scala/code/docs/routing/ConsistentHashingRouterDocSpec.scala index 391edb63b1..b97f13e1c1 100644 --- a/akka-docs/rst/scala/code/docs/routing/ConsistentHashingRouterDocSpec.scala +++ b/akka-docs/rst/scala/code/docs/routing/ConsistentHashingRouterDocSpec.scala @@ -18,9 +18,9 @@ object ConsistentHashingRouterDocSpec { var cache = Map.empty[String, String] def receive = { - case Entry(key, value) ⇒ cache += (key -> value) - case Get(key) ⇒ sender ! cache.get(key) - case Evict(key) ⇒ cache -= key + case Entry(key, value) => cache += (key -> value) + case Get(key) => sender ! cache.get(key) + case Evict(key) => cache -= key } } @@ -50,7 +50,7 @@ class ConsistentHashingRouterDocSpec extends AkkaSpec with ImplicitSender { import akka.routing.ConsistentHashingRouter.ConsistentHashableEnvelope def hashMapping: ConsistentHashMapping = { - case Evict(key) ⇒ key + case Evict(key) => key } val cache: ActorRef = diff --git a/akka-docs/rst/scala/code/docs/routing/CustomRouterDocSpec.scala b/akka-docs/rst/scala/code/docs/routing/CustomRouterDocSpec.scala index acbabf0af1..3a3f8e1f78 100644 --- a/akka-docs/rst/scala/code/docs/routing/CustomRouterDocSpec.scala +++ b/akka-docs/rst/scala/code/docs/routing/CustomRouterDocSpec.scala @@ -50,7 +50,7 @@ akka.actor.deployment { class RedundancyRoutingLogic(nbrCopies: Int) extends RoutingLogic { val roundRobin = RoundRobinRoutingLogic() def select(message: Any, routees: immutable.IndexedSeq[Routee]): Routee = { - val targets = (1 to nbrCopies).map(_ ⇒ roundRobin.select(message, routees)) + val targets = (1 to nbrCopies).map(_ => roundRobin.select(message, routees)) SeveralRoutees(targets) } } @@ -58,7 +58,7 @@ akka.actor.deployment { class Storage extends Actor { def receive = { - case x ⇒ sender ! x + case x => sender ! x } } @@ -99,7 +99,7 @@ class CustomRouterDocSpec extends AkkaSpec(CustomRouterDocSpec.config) with Impl //#unit-test-logic val logic = new RedundancyRoutingLogic(nbrCopies = 3) - val routees = for (n ← 1 to 7) yield TestRoutee(n) + val routees = for (n <- 1 to 7) yield TestRoutee(n) val r1 = logic.select("msg", routees) r1.asInstanceOf[SeveralRoutees].routees must be( @@ -118,16 +118,16 @@ class CustomRouterDocSpec extends AkkaSpec(CustomRouterDocSpec.config) with Impl "demonstrate usage of custom router" in { //#usage-1 - for (n ← 1 to 10) system.actorOf(Props[Storage], "s" + n) + for (n <- 1 to 10) system.actorOf(Props[Storage], "s" + n) - val paths = for (n ← 1 to 10) yield ("/user/s" + n) + val paths = for (n <- 1 to 10) yield ("/user/s" + n) val redundancy1: ActorRef = system.actorOf(RedundancyGroup(paths, nbrCopies = 3).props(), name = "redundancy1") redundancy1 ! "important" //#usage-1 - for (_ ← 1 to 3) expectMsg("important") + for (_ <- 1 to 3) expectMsg("important") //#usage-2 val redundancy2: ActorRef = system.actorOf(FromConfig.props(), @@ -135,7 +135,7 @@ class CustomRouterDocSpec extends AkkaSpec(CustomRouterDocSpec.config) with Impl redundancy2 ! "very important" //#usage-2 - for (_ ← 1 to 5) expectMsg("very important") + for (_ <- 1 to 5) expectMsg("very important") } diff --git a/akka-docs/rst/scala/code/docs/routing/RouterDocSpec.scala b/akka-docs/rst/scala/code/docs/routing/RouterDocSpec.scala index be4dce4799..ac3c410ee7 100644 --- a/akka-docs/rst/scala/code/docs/routing/RouterDocSpec.scala +++ b/akka-docs/rst/scala/code/docs/routing/RouterDocSpec.scala @@ -173,9 +173,9 @@ router-dispatcher {} } def receive = { - case w: Work ⇒ + case w: Work => router.route(w, sender) - case Terminated(a) ⇒ + case Terminated(a) => router = router.removeRoutee(a) val r = context.actorOf(Props[Worker]) context watch r @@ -186,7 +186,7 @@ router-dispatcher {} class Worker extends Actor { def receive = { - case _ ⇒ + case _ => } } @@ -199,7 +199,7 @@ router-dispatcher {} //#create-worker-actors def receive = { - case _ ⇒ + case _ => } } @@ -335,14 +335,14 @@ router-dispatcher {} //#resize-pool-2 def receive = { - case _ ⇒ + case _ => } } class Echo extends Actor { def receive = { - case m ⇒ sender ! m + case m => sender ! m } } } diff --git a/akka-docs/rst/scala/code/docs/testkit/PlainWordSpec.scala b/akka-docs/rst/scala/code/docs/testkit/PlainWordSpec.scala index 3e2b61e106..9da9036a15 100644 --- a/akka-docs/rst/scala/code/docs/testkit/PlainWordSpec.scala +++ b/akka-docs/rst/scala/code/docs/testkit/PlainWordSpec.scala @@ -16,7 +16,7 @@ import akka.testkit.ImplicitSender object MySpec { class EchoActor extends Actor { def receive = { - case x ⇒ sender ! x + case x => sender ! x } } } diff --git a/akka-docs/rst/scala/code/docs/testkit/TestKitUsageSpec.scala b/akka-docs/rst/scala/code/docs/testkit/TestKitUsageSpec.scala index 2486cec913..9ae9f780cc 100644 --- a/akka-docs/rst/scala/code/docs/testkit/TestKitUsageSpec.scala +++ b/akka-docs/rst/scala/code/docs/testkit/TestKitUsageSpec.scala @@ -79,7 +79,7 @@ class TestKitUsageSpec filterRef ! 1 receiveWhile(500 millis) { - case msg: String ⇒ messages = msg +: messages + case msg: String => messages = msg +: messages } } messages.length should be(3) @@ -90,12 +90,12 @@ class TestKitUsageSpec "receive an interesting message at some point " in { within(500 millis) { ignoreMsg { - case msg: String ⇒ msg != "something" + case msg: String => msg != "something" } seqRef ! "something" expectMsg("something") ignoreMsg { - case msg: String ⇒ msg == "1" + case msg: String => msg == "1" } expectNoMsg ignoreNoMsg @@ -117,7 +117,7 @@ object TestKitUsageSpec { */ class EchoActor extends Actor { def receive = { - case msg ⇒ sender ! msg + case msg => sender ! msg } } @@ -126,7 +126,7 @@ object TestKitUsageSpec { */ class ForwardingActor(next: ActorRef) extends Actor { def receive = { - case msg ⇒ next ! msg + case msg => next ! msg } } @@ -135,8 +135,8 @@ object TestKitUsageSpec { */ class FilteringActor(next: ActorRef) extends Actor { def receive = { - case msg: String ⇒ next ! msg - case _ ⇒ None + case msg: String => next ! msg + case _ => None } } @@ -149,7 +149,7 @@ object TestKitUsageSpec { class SequencingActor(next: ActorRef, head: immutable.Seq[String], tail: immutable.Seq[String]) extends Actor { def receive = { - case msg ⇒ { + case msg => { head foreach { next ! _ } next ! msg tail foreach { next ! _ } diff --git a/akka-docs/rst/scala/code/docs/testkit/TestkitDocSpec.scala b/akka-docs/rst/scala/code/docs/testkit/TestkitDocSpec.scala index 0be722d309..a19d9f20cc 100644 --- a/akka-docs/rst/scala/code/docs/testkit/TestkitDocSpec.scala +++ b/akka-docs/rst/scala/code/docs/testkit/TestkitDocSpec.scala @@ -22,18 +22,18 @@ object TestkitDocSpec { class MyActor extends Actor { def receive = { - case Say42 ⇒ sender ! 42 - case "some work" ⇒ sender ! "some result" + case Say42 => sender ! 42 + case "some work" => sender ! "some result" } } class TestFsmActor extends Actor with FSM[Int, String] { startWith(1, "") when(1) { - case Event("go", _) ⇒ goto(2) using "go" + case Event("go", _) => goto(2) using "go" } when(2) { - case Event("back", _) ⇒ goto(1) using "back" + case Event("back", _) => goto(1) using "back" } } @@ -42,10 +42,10 @@ object TestkitDocSpec { var dest1: ActorRef = _ var dest2: ActorRef = _ def receive = { - case (d1: ActorRef, d2: ActorRef) ⇒ + case (d1: ActorRef, d2: ActorRef) => dest1 = d1 dest2 = d2 - case x ⇒ + case x => dest1 ! x dest2 ! x } @@ -58,13 +58,13 @@ object TestkitDocSpec { //#test-probe-forward-actors class Source(target: ActorRef) extends Actor { def receive = { - case "start" ⇒ target ! "work" + case "start" => target ! "work" } } class Destination extends Actor { def receive = { - case x ⇒ // Do something.. + case x => // Do something.. } } @@ -74,7 +74,7 @@ object TestkitDocSpec { //#logging-receive import akka.event.LoggingReceive def receive = LoggingReceive { - case msg ⇒ // Do something... + case msg => // Do something... } //#logging-receive } @@ -151,7 +151,7 @@ class TestkitDocSpec extends AkkaSpec with DefaultTimeout with ImplicitSender { val actorRef = TestActorRef(new Actor { def receive = { - case "hello" ⇒ throw new IllegalArgumentException("boom") + case "hello" => throw new IllegalArgumentException("boom") } }) intercept[IllegalArgumentException] { actorRef.receive("hello") } @@ -199,7 +199,7 @@ class TestkitDocSpec extends AkkaSpec with DefaultTimeout with ImplicitSender { val probe = new TestProbe(system) { def expectUpdate(x: Int) = { expectMsgPF() { - case Update(id, _) if id == x ⇒ true + case Update(id, _) if id == x => true } sender ! "ACK" } @@ -280,7 +280,7 @@ class TestkitDocSpec extends AkkaSpec with DefaultTimeout with ImplicitSender { //#put-your-test-code-here val probe = TestProbe() probe.send(testActor, "hello") - try expectMsg("hello") catch { case NonFatal(e) ⇒ system.shutdown(); throw e } + try expectMsg("hello") catch { case NonFatal(e) => system.shutdown(); throw e } //#put-your-test-code-here shutdown(system) diff --git a/akka-docs/rst/scala/code/docs/transactor/TransactorDocSpec.scala b/akka-docs/rst/scala/code/docs/transactor/TransactorDocSpec.scala index 2c9a726d7e..645e4a744e 100644 --- a/akka-docs/rst/scala/code/docs/transactor/TransactorDocSpec.scala +++ b/akka-docs/rst/scala/code/docs/transactor/TransactorDocSpec.scala @@ -26,13 +26,13 @@ object CoordinatedExample { val count = Ref(0) def receive = { - case coordinated @ Coordinated(Increment(friend)) ⇒ { + case coordinated @ Coordinated(Increment(friend)) => { friend foreach (_ ! coordinated(Increment())) - coordinated atomic { implicit t ⇒ + coordinated atomic { implicit t => count transform (_ + 1) } } - case GetCount ⇒ sender ! count.single.get + case GetCount => sender ! count.single.get } } //#coordinated-example @@ -44,9 +44,9 @@ object CoordinatedApi { class Coordinator extends Actor { //#receive-coordinated def receive = { - case coordinated @ Coordinated(Message) ⇒ { + case coordinated @ Coordinated(Message) => { //#coordinated-atomic - coordinated atomic { implicit t ⇒ + coordinated atomic { implicit t => // do something in the coordinated transaction ... } //#coordinated-atomic @@ -66,8 +66,8 @@ object CounterExample { class Counter extends Transactor { val count = Ref(0) - def atomically = implicit txn ⇒ { - case Increment ⇒ count transform (_ + 1) + def atomically = implicit txn => { + case Increment => count transform (_ + 1) } } //#counter-example @@ -85,11 +85,11 @@ object FriendlyCounterExample { val count = Ref(0) override def coordinate = { - case Increment ⇒ include(friend) + case Increment => include(friend) } - def atomically = implicit txn ⇒ { - case Increment ⇒ count transform (_ + 1) + def atomically = implicit txn => { + case Increment => count transform (_ + 1) } } //#friendly-counter-example @@ -97,8 +97,8 @@ object FriendlyCounterExample { class Friend extends Transactor { val count = Ref(0) - def atomically = implicit txn ⇒ { - case Increment ⇒ count transform (_ + 1) + def atomically = implicit txn => { + case Increment => count transform (_ + 1) } } } @@ -115,22 +115,22 @@ object TransactorCoordinate { class TestCoordinateInclude(actor1: ActorRef, actor2: ActorRef, actor3: ActorRef) extends Transactor { //#coordinate-include override def coordinate = { - case Message ⇒ include(actor1, actor2, actor3) + case Message => include(actor1, actor2, actor3) } //#coordinate-include - def atomically = txn ⇒ doNothing + def atomically = txn => doNothing } class TestCoordinateSendTo(someActor: ActorRef, actor1: ActorRef, actor2: ActorRef) extends Transactor { //#coordinate-sendto override def coordinate = { - case SomeMessage ⇒ sendTo(someActor -> SomeOtherMessage) - case OtherMessage ⇒ sendTo(actor1 -> Message1, actor2 -> Message2) + case SomeMessage => sendTo(someActor -> SomeOtherMessage) + case OtherMessage => sendTo(actor1 -> Message1, actor2 -> Message2) } //#coordinate-sendto - def atomically = txn ⇒ doNothing + def atomically = txn => doNothing } } diff --git a/akka-docs/rst/scala/code/docs/zeromq/ZeromqDocSpec.scala b/akka-docs/rst/scala/code/docs/zeromq/ZeromqDocSpec.scala index 0d4abd241e..9f6705d476 100644 --- a/akka-docs/rst/scala/code/docs/zeromq/ZeromqDocSpec.scala +++ b/akka-docs/rst/scala/code/docs/zeromq/ZeromqDocSpec.scala @@ -45,7 +45,7 @@ object ZeromqDocSpec { } def receive: Receive = { - case Tick ⇒ + case Tick => val currentHeap = memory.getHeapMemoryUsage val timestamp = System.currentTimeMillis @@ -73,13 +73,13 @@ object ZeromqDocSpec { def receive = { // the first frame is the topic, second is the message - case m: ZMQMessage if m.frames(0).utf8String == "health.heap" ⇒ + case m: ZMQMessage if m.frames(0).utf8String == "health.heap" => val Heap(timestamp, used, max) = ser.deserialize(m.frames(1).toArray, classOf[Heap]).get log.info("Used heap {} bytes, at {}", used, timestampFormat.format(new Date(timestamp))) - case m: ZMQMessage if m.frames(0).utf8String == "health.load" ⇒ + case m: ZMQMessage if m.frames(0).utf8String == "health.load" => val Load(timestamp, loadAverage) = ser.deserialize(m.frames(1).toArray, classOf[Load]).get log.info("Load average {}, at {}", loadAverage, @@ -98,7 +98,7 @@ object ZeromqDocSpec { def receive = { // the first frame is the topic, second is the message - case m: ZMQMessage if m.frames(0).utf8String == "health.heap" ⇒ + case m: ZMQMessage if m.frames(0).utf8String == "health.heap" => val Heap(timestamp, used, max) = ser.deserialize(m.frames(1).toArray, classOf[Heap]).get if ((used.toDouble / max) > 0.9) count += 1 @@ -130,9 +130,9 @@ class ZeromqDocSpec extends AkkaSpec("akka.loglevel=INFO") { class Listener extends Actor { def receive: Receive = { - case Connecting ⇒ //... - case m: ZMQMessage ⇒ //... - case _ ⇒ //... + case Connecting => //... + case m: ZMQMessage => //... + case _ => //... } } @@ -195,11 +195,11 @@ class ZeromqDocSpec extends AkkaSpec("akka.loglevel=INFO") { def checkZeroMQInstallation() = try { ZeroMQExtension(system).version match { - case ZeroMQVersion(2, x, _) if x >= 1 ⇒ Unit - case ZeroMQVersion(y, _, _) if y >= 3 ⇒ Unit - case version ⇒ pending + case ZeroMQVersion(2, x, _) if x >= 1 => Unit + case ZeroMQVersion(y, _, _) if y >= 3 => Unit + case version => pending } } catch { - case e: LinkageError ⇒ pending + case e: LinkageError => pending } } diff --git a/akka-docs/rst/scala/hello-world.rst b/akka-docs/rst/scala/hello-world.rst index ad79cbe505..1691482d8e 100644 --- a/akka-docs/rst/scala/hello-world.rst +++ b/akka-docs/rst/scala/hello-world.rst @@ -2,43 +2,17 @@ The Obligatory Hello World ########################## -Since every programming paradigm needs to solve the tough problem of printing a -well-known greeting to the console we’ll introduce you to the actor-based -version. +The actor based version of the tough problem of printing a +well-known greeting to the console is introduced in a `Typesafe Activator `_ +tutorial named `Akka Main in Scala `_. -.. includecode:: ../scala/code/docs/actor/IntroDocSpec.scala#hello-world +The tutorial illustrates the generic launcher class :class:`akka.Main` which expects only +one command line argument: the class name of the application’s main actor. This +main method will then create the infrastructure needed for running the actors, +start the given main actor and arrange for the whole application to shut down +once the main actor terminates. -The ``HelloWorld`` actor is the application’s “main” class; when it terminates -the application will shut down—more on that later. The main business logic -happens in the :meth:`preStart` method, where a ``Greeter`` actor is created -and instructed to issue that greeting we crave for. When the greeter is done it -will tell us so by sending back a message, and when that message has been -received it will be passed into the behavior described by the :meth:`receive` -method where we can conclude the demonstration by stopping the ``HelloWorld`` -actor. You will be very curious to see how the ``Greeter`` actor performs the -actual task: - -.. includecode:: ../scala/code/docs/actor/IntroDocSpec.scala#greeter - -This is extremely simple now: after its creation this actor will not do -anything until someone sends it a message, and if that happens to be an -invitation to greet the world then the ``Greeter`` complies and informs the -requester that the deed has been done. - -As a Scala developer you will probably want to tell us that there is no -``main(Array[String])`` method anywhere in these classes, so how do we run this -program? The answer is that the appropriate :meth:`main` method is implemented -in the generic launcher class :class:`akka.Main` which expects only one command -line argument: the class name of the application’s main actor. This main method -will then create the infrastructure needed for running the actors, start the -given main actor and arrange for the whole application to shut down once the -main actor terminates. Thus you will be able to run the above code with a -command similar to the following:: - - java -classpath akka.Main com.example.HelloWorld - -This conveniently assumes placement of the above class definitions in package -``com.example`` and it further assumes that you have the required JAR files for -``scala-library`` and ``akka-actor`` available. The easiest would be to manage -these dependencies with a build tool, see :ref:`build-tool`. +There is also another `Typesafe Activator `_ +tutorial in the same problem domain that is named `Hello Akka! `_. +It describes the basics of Akka in more depth. diff --git a/akka-docs/rst/scala/remoting.rst b/akka-docs/rst/scala/remoting.rst index 367ac1d0df..d215d8586a 100644 --- a/akka-docs/rst/scala/remoting.rst +++ b/akka-docs/rst/scala/remoting.rst @@ -253,105 +253,14 @@ This is also done via configuration:: This configuration setting will clone the actor “aggregation” 10 times and deploy it evenly distributed across the two given target nodes. -Description of the Remoting Sample -^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ +.. _remote-sample-scala: -There is a more extensive remote example that comes with the Akka distribution. -Please have a look here for more information: `Remote Sample -<@github@/akka-samples/akka-sample-remote>`_ -This sample demonstrates both, remote deployment and look-up of remote actors. -First, let us have a look at the common setup for both scenarios (this is -``common.conf``): +Remoting Sample +^^^^^^^^^^^^^^^ -.. includecode:: ../../../akka-samples/akka-sample-remote/src/main/resources/common.conf - -This enables the remoting by installing the :class:`RemoteActorRefProvider` and -chooses the default remote transport. All other options will be set -specifically for each show case. - -.. note:: - - Be sure to replace the default IP 127.0.0.1 with the real address the system - is reachable by if you deploy onto multiple machines! - -.. _remote-lookup-sample-scala: - -Remote Lookup -------------- - -In order to look up a remote actor, that one must be created first. For this -purpose, we configure an actor system to listen on port 2552 (this is a snippet -from ``application.conf``): - -.. includecode:: ../../../akka-samples/akka-sample-remote/src/main/resources/application.conf - :include: calculator - -Then the actor must be created. For all code which follows, assume these imports: - -.. includecode:: ../../../akka-samples/akka-sample-remote/src/main/scala/sample/remote/calculator/LookupApplication.scala - :include: imports - -The actor doing the work will be this one: - -.. includecode:: ../../../akka-samples/akka-sample-remote/src/main/scala/sample/remote/calculator/CalculatorApplication.scala - :include: actor - -and we start it within an actor system using the above configuration - -.. includecode:: ../../../akka-samples/akka-sample-remote/src/main/scala/sample/remote/calculator/CalculatorApplication.scala - :include: setup - -With the service actor up and running, we may look it up from another actor -system, which will be configured to use port 2553 (this is a snippet from -``application.conf``). - -.. includecode:: ../../../akka-samples/akka-sample-remote/src/main/resources/application.conf - :include: remotelookup - -The actor which will query the calculator is a quite simple one for demonstration purposes - -.. includecode:: ../../../akka-samples/akka-sample-remote/src/main/scala/sample/remote/calculator/LookupApplication.scala - :include: actor - -and it is created from an actor system using the aforementioned client’s config. - -.. includecode:: ../../../akka-samples/akka-sample-remote/src/main/scala/sample/remote/calculator/LookupApplication.scala - :include: setup - -Requests which come in via ``doSomething`` will be sent to the client actor, -which will use the actor reference that was identified earlier. Observe how the actor -system name using in ``actorSelection`` matches the remote system’s name, as do IP -and port number. Top-level actors are always created below the ``"/user"`` -guardian, which supervises them. - -Remote Deployment ------------------ - -Creating remote actors instead of looking them up is not visible in the source -code, only in the configuration file. This section is used in this scenario -(this is a snippet from ``application.conf``): - -.. includecode:: ../../../akka-samples/akka-sample-remote/src/main/resources/application.conf - :include: remotecreation - -For all code which follows, assume these imports: - -.. includecode:: ../../../akka-samples/akka-sample-remote/src/main/scala/sample/remote/calculator/LookupApplication.scala - :include: imports - -The client actor looks like in the previous example - -.. includecode:: ../../../akka-samples/akka-sample-remote/src/main/scala/sample/remote/calculator/CreationApplication.scala - :include: actor - -but the setup uses only ``actorOf``: - -.. includecode:: ../../../akka-samples/akka-sample-remote/src/main/scala/sample/remote/calculator/CreationApplication.scala - :include: setup - -Observe how the name of the server actor matches the deployment given in the -configuration file, which will transparently delegate the actor creation to the -remote node. +There is a more extensive remote example that comes with `Typesafe Activator `_. +The tutorial named `Akka Remote Samples with Scala `_ +demonstrates both remote deployment and look-up of remote actors. Pluggable transport support --------------------------- diff --git a/akka-samples/akka-sample-camel-java/.gitignore b/akka-samples/akka-sample-camel-java/.gitignore new file mode 100644 index 0000000000..660c959e44 --- /dev/null +++ b/akka-samples/akka-sample-camel-java/.gitignore @@ -0,0 +1,17 @@ +*# +*.iml +*.ipr +*.iws +*.pyc +*.tm.epoch +*.vim +*-shim.sbt +.idea/ +/project/plugins/project +project/boot +target/ +/logs +.cache +.classpath +.project +.settings \ No newline at end of file diff --git a/akka-samples/akka-sample-camel-java/LICENSE b/akka-samples/akka-sample-camel-java/LICENSE new file mode 100644 index 0000000000..a02154466b --- /dev/null +++ b/akka-samples/akka-sample-camel-java/LICENSE @@ -0,0 +1,13 @@ +Copyright 2013 Typesafe, Inc. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + +http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. diff --git a/akka-samples/akka-sample-camel-java/activator.properties b/akka-samples/akka-sample-camel-java/activator.properties new file mode 100644 index 0000000000..427d9683d4 --- /dev/null +++ b/akka-samples/akka-sample-camel-java/activator.properties @@ -0,0 +1,4 @@ +name=akka-sample-camel-java +title=Akka Camel Samples with Java +description=Akka Camel Samples with Java +tags=akka,camel,java,sample diff --git a/akka-samples/akka-sample-camel-java/build.sbt b/akka-samples/akka-sample-camel-java/build.sbt new file mode 100644 index 0000000000..288934017e --- /dev/null +++ b/akka-samples/akka-sample-camel-java/build.sbt @@ -0,0 +1,14 @@ +name := "akka-sample-camel-java" + +version := "1.0" + +scalaVersion := "2.10.3" + +libraryDependencies ++= Seq( + "com.typesafe.akka" %% "akka-camel" % "2.3-SNAPSHOT", + "org.apache.camel" % "camel-jetty" % "2.10.3", + "org.apache.camel" % "camel-quartz" % "2.10.3", + "org.slf4j" % "slf4j-api" % "1.7.2", + "ch.qos.logback" % "logback-classic" % "1.0.7" +) + diff --git a/akka-samples/akka-sample-camel-java/project/build.properties b/akka-samples/akka-sample-camel-java/project/build.properties new file mode 100644 index 0000000000..0974fce44d --- /dev/null +++ b/akka-samples/akka-sample-camel-java/project/build.properties @@ -0,0 +1 @@ +sbt.version=0.13.0 diff --git a/akka-docs/rst/java/code/docs/camel/sample/http/HttpConsumer.java b/akka-samples/akka-sample-camel-java/src/main/java/sample/camel/http/HttpConsumer.java similarity index 66% rename from akka-docs/rst/java/code/docs/camel/sample/http/HttpConsumer.java rename to akka-samples/akka-sample-camel-java/src/main/java/sample/camel/http/HttpConsumer.java index 9ca5fbb886..34e373cecd 100644 --- a/akka-docs/rst/java/code/docs/camel/sample/http/HttpConsumer.java +++ b/akka-samples/akka-sample-camel-java/src/main/java/sample/camel/http/HttpConsumer.java @@ -1,14 +1,13 @@ -package docs.camel.sample.http; +package sample.camel.http; import akka.actor.ActorRef; import akka.camel.javaapi.UntypedConsumerActor; -//#HttpExample -public class HttpConsumer extends UntypedConsumerActor{ +public class HttpConsumer extends UntypedConsumerActor { private ActorRef producer; - public HttpConsumer(ActorRef producer){ + public HttpConsumer(ActorRef producer) { this.producer = producer; } @@ -20,4 +19,3 @@ public class HttpConsumer extends UntypedConsumerActor{ producer.forward(message, getContext()); } } -//#HttpExample \ No newline at end of file diff --git a/akka-docs/rst/java/code/docs/camel/sample/http/HttpProducer.java b/akka-samples/akka-sample-camel-java/src/main/java/sample/camel/http/HttpProducer.java similarity index 60% rename from akka-docs/rst/java/code/docs/camel/sample/http/HttpProducer.java rename to akka-samples/akka-sample-camel-java/src/main/java/sample/camel/http/HttpProducer.java index 27e0cb0df3..8c37b50c4d 100644 --- a/akka-docs/rst/java/code/docs/camel/sample/http/HttpProducer.java +++ b/akka-samples/akka-sample-camel-java/src/main/java/sample/camel/http/HttpProducer.java @@ -1,4 +1,4 @@ -package docs.camel.sample.http; +package sample.camel.http; import akka.actor.ActorRef; import akka.camel.CamelMessage; @@ -8,8 +8,7 @@ import org.apache.camel.Exchange; import java.util.HashSet; import java.util.Set; -//#HttpExample -public class HttpProducer extends UntypedProducerActor{ +public class HttpProducer extends UntypedProducerActor { private ActorRef transformer; public HttpProducer(ActorRef transformer) { @@ -17,9 +16,13 @@ public class HttpProducer extends UntypedProducerActor{ } public String getEndpointUri() { + // bridgeEndpoint=true makes the producer ignore the Exchange.HTTP_URI header, + // and use the endpoint's URI for request return "jetty://http://akka.io/?bridgeEndpoint=true"; } + // before producing messages to endpoints, producer actors can pre-process + // them by overriding the onTransformOutgoingMessage method @Override public Object onTransformOutgoingMessage(Object message) { if (message instanceof CamelMessage) { @@ -27,12 +30,14 @@ public class HttpProducer extends UntypedProducerActor{ Set httpPath = new HashSet(); httpPath.add(Exchange.HTTP_PATH); return camelMessage.withHeaders(camelMessage.getHeaders(httpPath)); - } else return super.onTransformOutgoingMessage(message); + } else + return super.onTransformOutgoingMessage(message); } + // instead of replying to the initial sender, producer actors can implement custom + // response processing by overriding the onRouteResponse method @Override public void onRouteResponse(Object message) { transformer.forward(message, getContext()); } } -//#HttpExample \ No newline at end of file diff --git a/akka-samples/akka-sample-camel-java/src/main/java/sample/camel/http/HttpSample.java b/akka-samples/akka-sample-camel-java/src/main/java/sample/camel/http/HttpSample.java new file mode 100644 index 0000000000..19c1eef98f --- /dev/null +++ b/akka-samples/akka-sample-camel-java/src/main/java/sample/camel/http/HttpSample.java @@ -0,0 +1,15 @@ +package sample.camel.http; + +import akka.actor.*; + +public class HttpSample { + public static void main(String[] args) { + ActorSystem system = ActorSystem.create("some-system"); + + final ActorRef httpTransformer = system.actorOf(Props.create(HttpTransformer.class)); + + final ActorRef httpProducer = system.actorOf(Props.create(HttpProducer.class, httpTransformer)); + + final ActorRef httpConsumer = system.actorOf(Props.create(HttpConsumer.class, httpProducer)); + } +} diff --git a/akka-docs/rst/java/code/docs/camel/sample/http/HttpTransformer.java b/akka-samples/akka-sample-camel-java/src/main/java/sample/camel/http/HttpTransformer.java similarity index 67% rename from akka-docs/rst/java/code/docs/camel/sample/http/HttpTransformer.java rename to akka-samples/akka-sample-camel-java/src/main/java/sample/camel/http/HttpTransformer.java index 7aee59293a..6ae1e2bfcc 100644 --- a/akka-docs/rst/java/code/docs/camel/sample/http/HttpTransformer.java +++ b/akka-samples/akka-sample-camel-java/src/main/java/sample/camel/http/HttpTransformer.java @@ -1,21 +1,18 @@ -package docs.camel.sample.http; +package sample.camel.http; import akka.actor.Status; import akka.actor.UntypedActor; import akka.camel.CamelMessage; import akka.dispatch.Mapper; -import akka.japi.Function; -//#HttpExample -public class HttpTransformer extends UntypedActor{ +public class HttpTransformer extends UntypedActor { public void onReceive(Object message) { if (message instanceof CamelMessage) { CamelMessage camelMessage = (CamelMessage) message; - CamelMessage replacedMessage = - camelMessage.mapBody(new Mapper(){ + CamelMessage replacedMessage = camelMessage.mapBody(new Mapper() { @Override public String apply(Object body) { - String text = new String((byte[])body); + String text = new String((byte[]) body); return text.replaceAll("Akka ", "AKKA "); } }); @@ -26,4 +23,3 @@ public class HttpTransformer extends UntypedActor{ unhandled(message); } } -//#HttpExample \ No newline at end of file diff --git a/akka-docs/rst/java/code/docs/camel/sample/quartz/MyQuartzActor.java b/akka-samples/akka-sample-camel-java/src/main/java/sample/camel/quartz/MyQuartzActor.java similarity index 66% rename from akka-docs/rst/java/code/docs/camel/sample/quartz/MyQuartzActor.java rename to akka-samples/akka-sample-camel-java/src/main/java/sample/camel/quartz/MyQuartzActor.java index 7f38d01943..967abea0c8 100644 --- a/akka-docs/rst/java/code/docs/camel/sample/quartz/MyQuartzActor.java +++ b/akka-samples/akka-sample-camel-java/src/main/java/sample/camel/quartz/MyQuartzActor.java @@ -1,9 +1,9 @@ -package docs.camel.sample.quartz; -//#QuartzExample +package sample.camel.quartz; + import akka.camel.CamelMessage; import akka.camel.javaapi.UntypedConsumerActor; -public class MyQuartzActor extends UntypedConsumerActor{ +public class MyQuartzActor extends UntypedConsumerActor { public String getEndpointUri() { return "quartz://example?cron=0/2+*+*+*+*+?"; } @@ -11,10 +11,8 @@ public class MyQuartzActor extends UntypedConsumerActor{ public void onReceive(Object message) { if (message instanceof CamelMessage) { CamelMessage camelMessage = (CamelMessage) message; - String body = camelMessage.getBodyAs(String.class, getCamelContext()); - System.out.println(String.format("==============> received %s ", body)); + System.out.println(String.format("==============> received %s ", camelMessage)); } else unhandled(message); } } -//#QuartzExample \ No newline at end of file diff --git a/akka-docs/rst/java/code/docs/camel/sample/quartz/QuartzSample.java b/akka-samples/akka-sample-camel-java/src/main/java/sample/camel/quartz/QuartzSample.java similarity index 79% rename from akka-docs/rst/java/code/docs/camel/sample/quartz/QuartzSample.java rename to akka-samples/akka-sample-camel-java/src/main/java/sample/camel/quartz/QuartzSample.java index 59875e77a6..2f4c73d38a 100644 --- a/akka-docs/rst/java/code/docs/camel/sample/quartz/QuartzSample.java +++ b/akka-samples/akka-sample-camel-java/src/main/java/sample/camel/quartz/QuartzSample.java @@ -1,5 +1,5 @@ -package docs.camel.sample.quartz; -//#QuartzExample +package sample.camel.quartz; + import akka.actor.ActorSystem; import akka.actor.Props; @@ -9,4 +9,3 @@ public class QuartzSample { system.actorOf(Props.create(MyQuartzActor.class)); } } -//#QuartzExample \ No newline at end of file diff --git a/akka-samples/akka-sample-camel-java/src/main/java/sample/camel/route/CustomRouteBuilder.java b/akka-samples/akka-sample-camel-java/src/main/java/sample/camel/route/CustomRouteBuilder.java new file mode 100644 index 0000000000..97ace488e5 --- /dev/null +++ b/akka-samples/akka-sample-camel-java/src/main/java/sample/camel/route/CustomRouteBuilder.java @@ -0,0 +1,15 @@ +package sample.camel.route; + +import org.apache.camel.Exchange; +import org.apache.camel.Processor; +import org.apache.camel.builder.RouteBuilder; + +public class CustomRouteBuilder extends RouteBuilder { + public void configure() throws Exception { + from("direct:welcome").process(new Processor() { + public void process(Exchange exchange) throws Exception { + exchange.getOut().setBody(String.format("Welcome %s", exchange.getIn().getBody())); + } + }); + } +} diff --git a/akka-samples/akka-sample-camel-java/src/main/java/sample/camel/route/CustomRouteSample.java b/akka-samples/akka-sample-camel-java/src/main/java/sample/camel/route/CustomRouteSample.java new file mode 100644 index 0000000000..669800b9f6 --- /dev/null +++ b/akka-samples/akka-sample-camel-java/src/main/java/sample/camel/route/CustomRouteSample.java @@ -0,0 +1,19 @@ +package sample.camel.route; + +import akka.actor.*; +import akka.camel.CamelExtension; + +public class CustomRouteSample { + @SuppressWarnings("unused") + public static void main(String[] args) { + try { + ActorSystem system = ActorSystem.create("some-system"); + final ActorRef producer = system.actorOf(Props.create(RouteProducer.class)); + final ActorRef mediator = system.actorOf(Props.create(RouteTransformer.class, producer)); + final ActorRef consumer = system.actorOf(Props.create(RouteConsumer.class, mediator)); + CamelExtension.get(system).context().addRoutes(new CustomRouteBuilder()); + } catch (Exception e) { + e.printStackTrace(); + } + } +} diff --git a/akka-docs/rst/java/code/docs/camel/sample/route/Consumer3.java b/akka-samples/akka-sample-camel-java/src/main/java/sample/camel/route/RouteConsumer.java similarity index 79% rename from akka-docs/rst/java/code/docs/camel/sample/route/Consumer3.java rename to akka-samples/akka-sample-camel-java/src/main/java/sample/camel/route/RouteConsumer.java index 05cd7eb85a..d3067fd8ef 100644 --- a/akka-docs/rst/java/code/docs/camel/sample/route/Consumer3.java +++ b/akka-samples/akka-sample-camel-java/src/main/java/sample/camel/route/RouteConsumer.java @@ -1,14 +1,13 @@ -package docs.camel.sample.route; +package sample.camel.route; -//#CustomRouteExample import akka.actor.ActorRef; import akka.camel.CamelMessage; import akka.camel.javaapi.UntypedConsumerActor; -public class Consumer3 extends UntypedConsumerActor{ +public class RouteConsumer extends UntypedConsumerActor { private ActorRef transformer; - public Consumer3(ActorRef transformer){ + public RouteConsumer(ActorRef transformer) { this.transformer = transformer; } @@ -26,4 +25,3 @@ public class Consumer3 extends UntypedConsumerActor{ unhandled(message); } } -//#CustomRouteExample diff --git a/akka-samples/akka-sample-camel-java/src/main/java/sample/camel/route/RouteProducer.java b/akka-samples/akka-sample-camel-java/src/main/java/sample/camel/route/RouteProducer.java new file mode 100644 index 0000000000..5a47947087 --- /dev/null +++ b/akka-samples/akka-sample-camel-java/src/main/java/sample/camel/route/RouteProducer.java @@ -0,0 +1,9 @@ +package sample.camel.route; + +import akka.camel.javaapi.UntypedProducerActor; + +public class RouteProducer extends UntypedProducerActor { + public String getEndpointUri() { + return "direct:welcome"; + } +} diff --git a/akka-docs/rst/java/code/docs/camel/sample/route/Transformer.java b/akka-samples/akka-sample-camel-java/src/main/java/sample/camel/route/RouteTransformer.java similarity index 55% rename from akka-docs/rst/java/code/docs/camel/sample/route/Transformer.java rename to akka-samples/akka-sample-camel-java/src/main/java/sample/camel/route/RouteTransformer.java index 97cc8b8f09..5d0ff079c7 100644 --- a/akka-docs/rst/java/code/docs/camel/sample/route/Transformer.java +++ b/akka-samples/akka-sample-camel-java/src/main/java/sample/camel/route/RouteTransformer.java @@ -1,15 +1,14 @@ -package docs.camel.sample.route; -//#CustomRouteExample +package sample.camel.route; + import akka.actor.ActorRef; import akka.actor.UntypedActor; import akka.camel.CamelMessage; import akka.dispatch.Mapper; -import akka.japi.Function; -public class Transformer extends UntypedActor { +public class RouteTransformer extends UntypedActor { private ActorRef producer; - public Transformer(ActorRef producer) { + public RouteTransformer(ActorRef producer) { this.producer = producer; } @@ -18,16 +17,14 @@ public class Transformer extends UntypedActor { // example: transform message body "foo" to "- foo -" and forward result // to producer CamelMessage camelMessage = (CamelMessage) message; - CamelMessage transformedMessage = - camelMessage.mapBody(new Mapper(){ - @Override - public String apply(String body) { - return String.format("- %s -",body); - } - }); + CamelMessage transformedMessage = camelMessage.mapBody(new Mapper() { + @Override + public String apply(String body) { + return String.format("- %s -", body); + } + }); producer.forward(transformedMessage, getContext()); } else unhandled(message); } } -//#CustomRouteExample \ No newline at end of file diff --git a/akka-docs/rst/images/camel-async-interact.png b/akka-samples/akka-sample-camel-java/tutorial/camel-async-interact.png similarity index 100% rename from akka-docs/rst/images/camel-async-interact.png rename to akka-samples/akka-sample-camel-java/tutorial/camel-async-interact.png diff --git a/akka-docs/rst/images/camel-async-sequence.png b/akka-samples/akka-sample-camel-java/tutorial/camel-async-sequence.png similarity index 100% rename from akka-docs/rst/images/camel-async-sequence.png rename to akka-samples/akka-sample-camel-java/tutorial/camel-async-sequence.png diff --git a/akka-docs/rst/images/camel-custom-route.png b/akka-samples/akka-sample-camel-java/tutorial/camel-custom-route.png similarity index 100% rename from akka-docs/rst/images/camel-custom-route.png rename to akka-samples/akka-sample-camel-java/tutorial/camel-custom-route.png diff --git a/akka-samples/akka-sample-camel-java/tutorial/index.html b/akka-samples/akka-sample-camel-java/tutorial/index.html new file mode 100644 index 0000000000..9751aa9615 --- /dev/null +++ b/akka-samples/akka-sample-camel-java/tutorial/index.html @@ -0,0 +1,163 @@ + + + Akka Camel Samples with Java + + + + +
+

+This tutorial contains 3 samples of +Akka Camel. +

+ +
    +
  • Asynchronous routing and transformation
  • +
  • Custom Camel route
  • +
  • Quartz scheduler
  • +
+ +
+ +
+ +

Asynchronous routing and transformation

+ +

+This example demonstrates how to implement consumer and producer actors that +support + +Asynchronous routing with their Camel endpoints. The sample +application transforms the content of the Akka homepage, http://akka.io, +by replacing every occurrence of *Akka* with *AKKA*. +

+ +

+To run this example, go to the Run +tab, and start the application main class sample.camel.http.HttpExample if it's not already started. +Then direct the browser to http://localhost:8875 and the +transformed Akka homepage should be displayed. Please note that this example will probably not work if you're +behind an HTTP proxy. +

+ +

+The following figure gives an overview how the example actors interact with +external systems and with each other. A browser sends a GET request to +http://localhost:8875 which is the published endpoint of the +HttpConsumer +actor. The HttpConsumeractor forwards the requests to the +HttpProducer.java +actor which retrieves the Akka homepage from http://akka.io. The retrieved HTML +is then forwarded to the +HttpTransformer.java +actor which replaces all occurrences of *Akka* with *AKKA*. The transformation result is sent back the HttpConsumer +which finally returns it to the browser. +

+ + + +

+Implementing the example actor classes and wiring them together is rather easy +as shown in HttpConsumer.java, +HttpProducer.java and +HttpTransformer.java. +

+ + +

+The jetty endpoints of HttpConsumer and +HttpProducer support asynchronous in-out message exchanges and do not allocate threads for the full duration of +the exchange. This is achieved by using Jetty continuations +on the consumer-side and by using Jetty's asynchronous HTTP client +on the producer side. The following high-level sequence diagram illustrates that. +

+ + + +
+
+ +

Custom Camel route example

+ +

+This section also demonstrates the combined usage of a +RouteProducer and a +RouteConsumer +actor as well as the inclusion of a +custom Camel route. +The following figure gives an overview. +

+ + + +
    +
  • A consumer actor receives a message from an HTTP client
  • + +
  • It forwards the message to another actor that transforms the message (encloses + the original message into hyphens)
  • + +
  • The transformer actor forwards the transformed message to a producer actor
  • + +
  • The producer actor sends the message to a custom Camel route beginning at the + direct:welcome endpoint
  • + +
  • A processor (transformer) in the custom Camel route prepends "Welcome" to the + original message and creates a result message
  • + +
  • The producer actor sends the result back to the consumer actor which returns + it to the HTTP client
  • +
+ +

+The producer actor knows where to reply the message to because the consumer and +transformer actors have forwarded the original sender reference as well. The +application configuration and the route starting from direct:welcome are done in the code above. +

+ +

+To run this example, go to the Run +tab, and start the application main class sample.camel.route.CustomRouteExample +

+ +

+POST a message to http://localhost:8877/camel/welcome. +

+ +

+   curl -H "Content-Type: text/plain" -d "Anke" http://localhost:8877/camel/welcome
+
+ +

+The response should be: +

+ +

+   Welcome - Anke -
+
+ +
+
+ +

Quartz Scheduler Example

+ +

+Here is an example showing how simple it is to implement a cron-style scheduler by +using the Camel Quartz component in Akka. +

+

+Open MyQuartzActor.java. +

+

+The example creates a "timer" actor which fires a message every 2 +seconds. +

+ +

+For more information about the Camel Quartz component, see here: +http://camel.apache.org/quartz.html +

+ +
+ + + diff --git a/akka-samples/akka-sample-camel-scala/.gitignore b/akka-samples/akka-sample-camel-scala/.gitignore new file mode 100644 index 0000000000..660c959e44 --- /dev/null +++ b/akka-samples/akka-sample-camel-scala/.gitignore @@ -0,0 +1,17 @@ +*# +*.iml +*.ipr +*.iws +*.pyc +*.tm.epoch +*.vim +*-shim.sbt +.idea/ +/project/plugins/project +project/boot +target/ +/logs +.cache +.classpath +.project +.settings \ No newline at end of file diff --git a/akka-samples/akka-sample-camel-scala/LICENSE b/akka-samples/akka-sample-camel-scala/LICENSE new file mode 100644 index 0000000000..a02154466b --- /dev/null +++ b/akka-samples/akka-sample-camel-scala/LICENSE @@ -0,0 +1,13 @@ +Copyright 2013 Typesafe, Inc. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + +http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. diff --git a/akka-samples/akka-sample-camel-scala/activator.properties b/akka-samples/akka-sample-camel-scala/activator.properties new file mode 100644 index 0000000000..be391d3dce --- /dev/null +++ b/akka-samples/akka-sample-camel-scala/activator.properties @@ -0,0 +1,4 @@ +name=akka-sample-camel-scala +title=Akka Camel Samples with Scala +description=Akka Camel Samples with Scala +tags=akka,camel,scala,sample diff --git a/akka-samples/akka-sample-camel-scala/build.sbt b/akka-samples/akka-sample-camel-scala/build.sbt new file mode 100644 index 0000000000..161a8588ee --- /dev/null +++ b/akka-samples/akka-sample-camel-scala/build.sbt @@ -0,0 +1,14 @@ +name := "akka-sample-camel-scala" + +version := "1.0" + +scalaVersion := "2.10.3" + +libraryDependencies ++= Seq( + "com.typesafe.akka" %% "akka-camel" % "2.3-SNAPSHOT", + "org.apache.camel" % "camel-jetty" % "2.10.3", + "org.apache.camel" % "camel-quartz" % "2.10.3", + "org.slf4j" % "slf4j-api" % "1.7.2", + "ch.qos.logback" % "logback-classic" % "1.0.7" +) + diff --git a/akka-samples/akka-sample-camel-scala/project/build.properties b/akka-samples/akka-sample-camel-scala/project/build.properties new file mode 100644 index 0000000000..0974fce44d --- /dev/null +++ b/akka-samples/akka-sample-camel-scala/project/build.properties @@ -0,0 +1 @@ +sbt.version=0.13.0 diff --git a/akka-samples/akka-sample-camel-scala/src/main/scala/sample/camel/CustomRouteExample.scala b/akka-samples/akka-sample-camel-scala/src/main/scala/sample/camel/CustomRouteExample.scala new file mode 100644 index 0000000000..c606f7832f --- /dev/null +++ b/akka-samples/akka-sample-camel-scala/src/main/scala/sample/camel/CustomRouteExample.scala @@ -0,0 +1,58 @@ +package sample.camel + +import org.apache.camel.Exchange +import org.apache.camel.Processor +import org.apache.camel.builder.RouteBuilder +import akka.actor.Actor +import akka.actor.ActorRef +import akka.actor.ActorSystem +import akka.actor.Props +import akka.camel.CamelExtension +import akka.camel.CamelMessage +import akka.camel.Consumer +import akka.camel.Producer + +object CustomRouteExample { + + def main(args: Array[String]): Unit = { + val system = ActorSystem("some-system") + val producer = system.actorOf(Props[RouteProducer]) + val mediator = system.actorOf(Props(classOf[RouteTransformer], producer)) + val consumer = system.actorOf(Props(classOf[RouteConsumer], mediator)) + CamelExtension(system).context.addRoutes(new CustomRouteBuilder) + } + + class RouteConsumer(transformer: ActorRef) extends Actor with Consumer { + def endpointUri = "jetty:http://0.0.0.0:8877/camel/welcome" + + def receive = { + // Forward a string representation of the message body to transformer + case msg: CamelMessage => transformer.forward(msg.withBodyAs[String]) + } + } + + class RouteTransformer(producer: ActorRef) extends Actor { + def receive = { + // example: transform message body "foo" to "- foo -" and forward result + // to producer + case msg: CamelMessage => + producer.forward(msg.mapBody((body: String) => "- %s -" format body)) + } + } + + class RouteProducer extends Actor with Producer { + def endpointUri = "direct:welcome" + } + + class CustomRouteBuilder extends RouteBuilder { + def configure { + from("direct:welcome").process(new Processor() { + def process(exchange: Exchange) { + // Create a 'welcome' message from the input message + exchange.getOut.setBody("Welcome %s" format exchange.getIn.getBody) + } + }) + } + } + +} diff --git a/akka-samples/akka-sample-camel-scala/src/main/scala/sample/camel/HttpExample.scala b/akka-samples/akka-sample-camel-scala/src/main/scala/sample/camel/HttpExample.scala new file mode 100644 index 0000000000..f2ced742b5 --- /dev/null +++ b/akka-samples/akka-sample-camel-scala/src/main/scala/sample/camel/HttpExample.scala @@ -0,0 +1,58 @@ +package sample.camel + +import org.apache.camel.Exchange +import akka.actor.Actor +import akka.actor.ActorRef +import akka.actor.ActorSystem +import akka.actor.Props +import akka.actor.Status.Failure +import akka.actor.actorRef2Scala +import akka.camel.CamelMessage +import akka.camel.Consumer +import akka.camel.Producer + +object HttpExample { + + def main(args: Array[String]): Unit = { + val system = ActorSystem("some-system") + val httpTransformer = system.actorOf(Props[HttpTransformer]) + val httpProducer = system.actorOf(Props(classOf[HttpProducer], httpTransformer)) + val httpConsumer = system.actorOf(Props(classOf[HttpConsumer], httpProducer)) + } + + class HttpConsumer(producer: ActorRef) extends Consumer { + def endpointUri = "jetty:http://0.0.0.0:8875/" + + def receive = { + case msg => producer forward msg + } + } + + class HttpProducer(transformer: ActorRef) extends Actor with Producer { + // bridgeEndpoint=true makes the producer ignore the Exchange.HTTP_URI header, + // and use the endpoint's URI for request + def endpointUri = "jetty://http://akka.io/?bridgeEndpoint=true" + + // before producing messages to endpoints, producer actors can pre-process + // them by overriding the transformOutgoingMessage method + override def transformOutgoingMessage(msg: Any) = msg match { + case camelMsg: CamelMessage => camelMsg.copy(headers = + camelMsg.headers(Set(Exchange.HTTP_PATH))) + } + + // instead of replying to the initial sender, producer actors can implement custom + // response processing by overriding the routeResponse method + override def routeResponse(msg: Any) { transformer forward msg } + } + + class HttpTransformer extends Actor { + def receive = { + case msg: CamelMessage => + sender ! (msg.mapBody { body: Array[Byte] => + new String(body).replaceAll("Akka ", "AKKA ") + }) + case msg: Failure => sender ! msg + } + } + +} diff --git a/akka-samples/akka-sample-camel-scala/src/main/scala/sample/camel/QuartzExample.scala b/akka-samples/akka-sample-camel-scala/src/main/scala/sample/camel/QuartzExample.scala new file mode 100644 index 0000000000..3a02a5be08 --- /dev/null +++ b/akka-samples/akka-sample-camel-scala/src/main/scala/sample/camel/QuartzExample.scala @@ -0,0 +1,26 @@ +package sample.camel + +import akka.actor.ActorSystem +import akka.actor.Props +import akka.camel.Consumer + +object QuartzExample { + + def main(args: Array[String]): Unit = { + val system = ActorSystem("my-quartz-system") + system.actorOf(Props[MyQuartzActor]) + } + + class MyQuartzActor extends Consumer { + + def endpointUri = "quartz://example?cron=0/2+*+*+*+*+?" + + def receive = { + + case msg => println("==============> received %s " format msg) + + } + + } + +} diff --git a/akka-samples/akka-sample-camel-scala/tutorial/camel-async-interact.png b/akka-samples/akka-sample-camel-scala/tutorial/camel-async-interact.png new file mode 100644 index 0000000000..55a2a4505b Binary files /dev/null and b/akka-samples/akka-sample-camel-scala/tutorial/camel-async-interact.png differ diff --git a/akka-samples/akka-sample-camel-scala/tutorial/camel-async-sequence.png b/akka-samples/akka-sample-camel-scala/tutorial/camel-async-sequence.png new file mode 100644 index 0000000000..416c5a181b Binary files /dev/null and b/akka-samples/akka-sample-camel-scala/tutorial/camel-async-sequence.png differ diff --git a/akka-samples/akka-sample-camel-scala/tutorial/camel-custom-route.png b/akka-samples/akka-sample-camel-scala/tutorial/camel-custom-route.png new file mode 100644 index 0000000000..efacdb8f82 Binary files /dev/null and b/akka-samples/akka-sample-camel-scala/tutorial/camel-custom-route.png differ diff --git a/akka-samples/akka-sample-camel-scala/tutorial/index.html b/akka-samples/akka-sample-camel-scala/tutorial/index.html new file mode 100644 index 0000000000..c396ffa4ad --- /dev/null +++ b/akka-samples/akka-sample-camel-scala/tutorial/index.html @@ -0,0 +1,161 @@ + + + Akka Camel Samples with Scala + + + + +
+

+This tutorial contains 3 samples of +Akka Camel. +

+ +
    +
  • Asynchronous routing and transformation
  • +
  • Custom Camel route
  • +
  • Quartz scheduler
  • +
+ +
+ +
+ +

Asynchronous routing and transformation

+ +

+This example demonstrates how to implement consumer and producer actors that +support + +Asynchronous routing with their Camel endpoints. The sample +application transforms the content of the Akka homepage, http://akka.io, +by replacing every occurrence of *Akka* with *AKKA*. +

+ +

+To run this example, go to the Run +tab, and start the application main class sample.camel.HttpExample if it's not already started. +Then direct the browser to http://localhost:8875 and the +transformed Akka homepage should be displayed. Please note that this example will probably not work if you're +behind an HTTP proxy. +

+ +

+The following figure gives an overview how the example actors interact with +external systems and with each other. A browser sends a GET request to +http://localhost:8875 which is the published endpoint of the +HttpConsumer +actor. The HttpConsumer actor forwards the requests to the +HttpProducer +actor which retrieves the Akka homepage from http://akka.io. The retrieved HTML +is then forwarded to the +HttpTransformer +actor which replaces all occurrences of *Akka* with *AKKA*. The transformation result is sent back the HttpConsumer +which finally returns it to the browser. +

+ + + +

+Implementing the example actor classes and wiring them together is rather easy +as shown in HttpExample.scala. +

+ + +

+The jetty endpoints of HttpConsumer and +HttpProducer support asynchronous in-out message exchanges and do not allocate threads for the full duration of +the exchange. This is achieved by using Jetty continuations +on the consumer-side and by using Jetty's asynchronous HTTP client +on the producer side. The following high-level sequence diagram illustrates that. +

+ + + +
+
+ +

Custom Camel route example

+ +

+This section also demonstrates the combined usage of a +RouteProducer +and a RouteConsumer +actor as well as the inclusion of a +custom Camel route. +The following figure gives an overview. +

+ + + +
    +
  • A consumer actor receives a message from an HTTP client
  • + +
  • It forwards the message to another actor that transforms the message (encloses + the original message into hyphens)
  • + +
  • The transformer actor forwards the transformed message to a producer actor
  • + +
  • The producer actor sends the message to a custom Camel route beginning at the + direct:welcome endpoint
  • + +
  • A processor (transformer) in the custom Camel route prepends "Welcome" to the + original message and creates a result message
  • + +
  • The producer actor sends the result back to the consumer actor which returns + it to the HTTP client
  • +
+ +

+The producer actor knows where to reply the message to because the consumer and +transformer actors have forwarded the original sender reference as well. The +application configuration and the route starting from direct:welcome are done in the code above. +

+ +

+To run this example, go to the Run +tab, and start the application main class sample.camel.CustomRouteExample +

+ +

+POST a message to http://localhost:8877/camel/welcome. +

+ +

+   curl -H "Content-Type: text/plain" -d "Anke" http://localhost:8877/camel/welcome
+
+ +

+The response should be: +

+ +

+   Welcome - Anke -
+
+ +
+
+ +

Quartz Scheduler Example

+ +

+Here is an example showing how simple it is to implement a cron-style scheduler by +using the Camel Quartz component in Akka. +

+

+Open QuartzExample.scala. +

+

+The example creates a "timer" actor which fires a message every 2 +seconds. +

+ +

+For more information about the Camel Quartz component, see here: +http://camel.apache.org/quartz.html +

+ +
+ + + diff --git a/akka-samples/akka-sample-camel/README.md b/akka-samples/akka-sample-camel/README.md deleted file mode 100644 index d71806b0f9..0000000000 --- a/akka-samples/akka-sample-camel/README.md +++ /dev/null @@ -1,15 +0,0 @@ -Camel Sample -============ - -This sample is meant to be used by studying the code; it does not perform any -astounding functions when running it. If you want to run it, check out the akka -sources on your local hard drive, follow the [instructions for setting up Akka -with SBT](http://doc.akka.io/docs/akka/current/intro/getting-started.html). -When you start SBT within the checked-out akka source directory, you can run -this sample by typing - - akka-sample-camel/run - -and then choose which of the demonstrations you would like to run. - -You can read more in the [Akka docs](http://akka.io/docs). diff --git a/akka-samples/akka-sample-camel/src/main/scala/AsyncRouteAndTransform.scala b/akka-samples/akka-sample-camel/src/main/scala/AsyncRouteAndTransform.scala deleted file mode 100644 index 85dff9f064..0000000000 --- a/akka-samples/akka-sample-camel/src/main/scala/AsyncRouteAndTransform.scala +++ /dev/null @@ -1,49 +0,0 @@ -import akka.actor.Status.Failure -import akka.actor.{ Actor, ActorRef, Props, ActorSystem } -import akka.camel.{ Producer, CamelMessage, Consumer } -import org.apache.camel.{ Exchange } - -/** - * Asynchronous routing and transformation example - */ -object AsyncRouteAndTransform extends App { - val system = ActorSystem("rewriteAkkaToAKKA") - val httpTransformer = system.actorOf(Props[HttpTransformer], "transformer") - val httpProducer = system.actorOf(Props(classOf[HttpProducer], httpTransformer), "producer") - val httpConsumer = system.actorOf(Props(classOf[HttpConsumer], httpProducer), "consumer") -} - -class HttpConsumer(producer: ActorRef) extends Consumer { - def endpointUri = "jetty:http://0.0.0.0:8875/" - def receive = { - case msg ⇒ producer forward msg - } -} - -class HttpProducer(transformer: ActorRef) extends Actor with Producer { - def endpointUri = "jetty://http://akka.io/?bridgeEndpoint=true" - - override def transformOutgoingMessage(msg: Any) = msg match { - case msg: CamelMessage ⇒ msg.copy(headers = msg.headers(Set(Exchange.HTTP_PATH))) - } - - override def routeResponse(msg: Any) { - transformer forward msg - } -} - -class HttpTransformer extends Actor { - def receive = { - case msg: CamelMessage ⇒ - val transformedMsg = msg.mapBody { - (body: Array[Byte]) ⇒ - new String(body).replaceAll("Akka", "AKKA") - // just to make the result look a bit better. - .replaceAll("href=\"/resources", "href=\"http://akka.io/resources") - .replaceAll("src=\"/resources", "src=\"http://akka.io/resources") - } - sender ! transformedMsg - case msg: Failure ⇒ sender ! msg - } -} - diff --git a/akka-samples/akka-sample-camel/src/main/scala/SimpleFileConsumer.scala b/akka-samples/akka-sample-camel/src/main/scala/SimpleFileConsumer.scala deleted file mode 100644 index 5b96afed2c..0000000000 --- a/akka-samples/akka-sample-camel/src/main/scala/SimpleFileConsumer.scala +++ /dev/null @@ -1,24 +0,0 @@ -import akka.actor.{ Props, ActorSystem } -import akka.camel.{ CamelMessage, Consumer } -import java.io.File -import org.apache.camel.Exchange - -object SimpleFileConsumer extends App { - val subDir = "consume-files" - val tmpDir = System.getProperty("java.io.tmpdir") - val consumeDir = new File(tmpDir, subDir) - consumeDir.mkdirs() - val tmpDirUri = "file://%s/%s" format (tmpDir, subDir) - - val system = ActorSystem("consume-files") - val fileConsumer = system.actorOf(Props(classOf[FileConsumer], tmpDirUri), "fileConsumer") - println(String.format("Put a text file in '%s', the consumer will pick it up!", consumeDir)) -} - -class FileConsumer(uri: String) extends Consumer { - def endpointUri = uri - def receive = { - case msg: CamelMessage ⇒ - println("Received file %s with content:\n%s".format(msg.headers(Exchange.FILE_NAME), msg.bodyAs[String])) - } -} diff --git a/akka-samples/akka-sample-cluster-java/.gitignore b/akka-samples/akka-sample-cluster-java/.gitignore new file mode 100644 index 0000000000..660c959e44 --- /dev/null +++ b/akka-samples/akka-sample-cluster-java/.gitignore @@ -0,0 +1,17 @@ +*# +*.iml +*.ipr +*.iws +*.pyc +*.tm.epoch +*.vim +*-shim.sbt +.idea/ +/project/plugins/project +project/boot +target/ +/logs +.cache +.classpath +.project +.settings \ No newline at end of file diff --git a/akka-samples/akka-sample-cluster-java/LICENSE b/akka-samples/akka-sample-cluster-java/LICENSE new file mode 100644 index 0000000000..a02154466b --- /dev/null +++ b/akka-samples/akka-sample-cluster-java/LICENSE @@ -0,0 +1,13 @@ +Copyright 2013 Typesafe, Inc. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + +http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. diff --git a/akka-samples/akka-sample-cluster-java/activator.properties b/akka-samples/akka-sample-cluster-java/activator.properties new file mode 100644 index 0000000000..54232a778b --- /dev/null +++ b/akka-samples/akka-sample-cluster-java/activator.properties @@ -0,0 +1,4 @@ +name=akka-sample-cluster-java +title=Akka Cluster Samples with Java +description=Akka Cluster Samples with Java +tags=akka,cluster,java,sample diff --git a/akka-samples/akka-sample-cluster-java/project/Build.scala b/akka-samples/akka-sample-cluster-java/project/Build.scala new file mode 100644 index 0000000000..2508e85041 --- /dev/null +++ b/akka-samples/akka-sample-cluster-java/project/Build.scala @@ -0,0 +1,32 @@ +import sbt._ +import sbt.Keys._ +import com.typesafe.sbt.SbtMultiJvm +import com.typesafe.sbt.SbtMultiJvm.MultiJvmKeys.MultiJvm + +object AkkaSampleClusterBuild extends Build { + + val akkaVersion = "2.3-SNAPSHOT" + + lazy val akkaSampleCluster = Project( + id = "akka-sample-cluster-java", + base = file("."), + settings = Project.defaultSettings ++ SbtMultiJvm.multiJvmSettings ++ Seq( + name := "akka-sample-cluster-java", + version := "1.0", + scalaVersion := "2.10.3", + scalacOptions in Compile ++= Seq("-encoding", "UTF-8", "-target:jvm-1.6", "-deprecation", "-feature", "-unchecked", "-Xlog-reflective-calls", "-Xlint"), + javacOptions in Compile ++= Seq("-source", "1.6", "-target", "1.6", "-Xlint:unchecked", "-Xlint:deprecation"), + libraryDependencies ++= Seq( + "com.typesafe.akka" %% "akka-cluster" % akkaVersion, + "com.typesafe.akka" %% "akka-contrib" % akkaVersion, + "com.typesafe.akka" %% "akka-multi-node-testkit" % akkaVersion, + "org.scalatest" %% "scalatest" % "2.0" % "test", + "org.fusesource" % "sigar" % "1.6.4"), + javaOptions in run ++= Seq( + "-Djava.library.path=./sigar", + "-Xms128m", "-Xmx1024m"), + Keys.fork in run := true, + mainClass in (Compile, run) := Some("sample.cluster.simple.SimpleClusterApp") + ) + ) configs (MultiJvm) +} diff --git a/akka-samples/akka-sample-cluster-java/project/build.properties b/akka-samples/akka-sample-cluster-java/project/build.properties new file mode 100644 index 0000000000..0974fce44d --- /dev/null +++ b/akka-samples/akka-sample-cluster-java/project/build.properties @@ -0,0 +1 @@ +sbt.version=0.13.0 diff --git a/akka-samples/akka-sample-cluster-java/project/plugins.sbt b/akka-samples/akka-sample-cluster-java/project/plugins.sbt new file mode 100644 index 0000000000..c3e7d797de --- /dev/null +++ b/akka-samples/akka-sample-cluster-java/project/plugins.sbt @@ -0,0 +1,4 @@ + +resolvers += Classpaths.typesafeResolver + +addSbtPlugin("com.typesafe.sbt" % "sbt-multi-jvm" % "0.3.8") diff --git a/akka-samples/akka-sample-cluster/sigar/libsigar-amd64-freebsd-6.so b/akka-samples/akka-sample-cluster-java/sigar/libsigar-amd64-freebsd-6.so similarity index 100% rename from akka-samples/akka-sample-cluster/sigar/libsigar-amd64-freebsd-6.so rename to akka-samples/akka-sample-cluster-java/sigar/libsigar-amd64-freebsd-6.so diff --git a/akka-samples/akka-sample-cluster/sigar/libsigar-amd64-linux.so b/akka-samples/akka-sample-cluster-java/sigar/libsigar-amd64-linux.so similarity index 100% rename from akka-samples/akka-sample-cluster/sigar/libsigar-amd64-linux.so rename to akka-samples/akka-sample-cluster-java/sigar/libsigar-amd64-linux.so diff --git a/akka-samples/akka-sample-cluster/sigar/libsigar-amd64-solaris.so b/akka-samples/akka-sample-cluster-java/sigar/libsigar-amd64-solaris.so similarity index 100% rename from akka-samples/akka-sample-cluster/sigar/libsigar-amd64-solaris.so rename to akka-samples/akka-sample-cluster-java/sigar/libsigar-amd64-solaris.so diff --git a/akka-samples/akka-sample-cluster/sigar/libsigar-ia64-hpux-11.sl b/akka-samples/akka-sample-cluster-java/sigar/libsigar-ia64-hpux-11.sl similarity index 100% rename from akka-samples/akka-sample-cluster/sigar/libsigar-ia64-hpux-11.sl rename to akka-samples/akka-sample-cluster-java/sigar/libsigar-ia64-hpux-11.sl diff --git a/akka-samples/akka-sample-cluster/sigar/libsigar-ia64-linux.so b/akka-samples/akka-sample-cluster-java/sigar/libsigar-ia64-linux.so similarity index 100% rename from akka-samples/akka-sample-cluster/sigar/libsigar-ia64-linux.so rename to akka-samples/akka-sample-cluster-java/sigar/libsigar-ia64-linux.so diff --git a/akka-samples/akka-sample-cluster/sigar/libsigar-pa-hpux-11.sl b/akka-samples/akka-sample-cluster-java/sigar/libsigar-pa-hpux-11.sl similarity index 100% rename from akka-samples/akka-sample-cluster/sigar/libsigar-pa-hpux-11.sl rename to akka-samples/akka-sample-cluster-java/sigar/libsigar-pa-hpux-11.sl diff --git a/akka-samples/akka-sample-cluster/sigar/libsigar-ppc-aix-5.so b/akka-samples/akka-sample-cluster-java/sigar/libsigar-ppc-aix-5.so similarity index 100% rename from akka-samples/akka-sample-cluster/sigar/libsigar-ppc-aix-5.so rename to akka-samples/akka-sample-cluster-java/sigar/libsigar-ppc-aix-5.so diff --git a/akka-samples/akka-sample-cluster/sigar/libsigar-ppc-linux.so b/akka-samples/akka-sample-cluster-java/sigar/libsigar-ppc-linux.so similarity index 100% rename from akka-samples/akka-sample-cluster/sigar/libsigar-ppc-linux.so rename to akka-samples/akka-sample-cluster-java/sigar/libsigar-ppc-linux.so diff --git a/akka-samples/akka-sample-cluster/sigar/libsigar-ppc64-aix-5.so b/akka-samples/akka-sample-cluster-java/sigar/libsigar-ppc64-aix-5.so similarity index 100% rename from akka-samples/akka-sample-cluster/sigar/libsigar-ppc64-aix-5.so rename to akka-samples/akka-sample-cluster-java/sigar/libsigar-ppc64-aix-5.so diff --git a/akka-samples/akka-sample-cluster/sigar/libsigar-ppc64-linux.so b/akka-samples/akka-sample-cluster-java/sigar/libsigar-ppc64-linux.so similarity index 100% rename from akka-samples/akka-sample-cluster/sigar/libsigar-ppc64-linux.so rename to akka-samples/akka-sample-cluster-java/sigar/libsigar-ppc64-linux.so diff --git a/akka-samples/akka-sample-cluster/sigar/libsigar-s390x-linux.so b/akka-samples/akka-sample-cluster-java/sigar/libsigar-s390x-linux.so similarity index 100% rename from akka-samples/akka-sample-cluster/sigar/libsigar-s390x-linux.so rename to akka-samples/akka-sample-cluster-java/sigar/libsigar-s390x-linux.so diff --git a/akka-samples/akka-sample-cluster/sigar/libsigar-sparc-solaris.so b/akka-samples/akka-sample-cluster-java/sigar/libsigar-sparc-solaris.so similarity index 100% rename from akka-samples/akka-sample-cluster/sigar/libsigar-sparc-solaris.so rename to akka-samples/akka-sample-cluster-java/sigar/libsigar-sparc-solaris.so diff --git a/akka-samples/akka-sample-cluster/sigar/libsigar-sparc64-solaris.so b/akka-samples/akka-sample-cluster-java/sigar/libsigar-sparc64-solaris.so similarity index 100% rename from akka-samples/akka-sample-cluster/sigar/libsigar-sparc64-solaris.so rename to akka-samples/akka-sample-cluster-java/sigar/libsigar-sparc64-solaris.so diff --git a/akka-samples/akka-sample-cluster/sigar/libsigar-universal-macosx.dylib b/akka-samples/akka-sample-cluster-java/sigar/libsigar-universal-macosx.dylib similarity index 100% rename from akka-samples/akka-sample-cluster/sigar/libsigar-universal-macosx.dylib rename to akka-samples/akka-sample-cluster-java/sigar/libsigar-universal-macosx.dylib diff --git a/akka-samples/akka-sample-cluster/sigar/libsigar-universal64-macosx.dylib b/akka-samples/akka-sample-cluster-java/sigar/libsigar-universal64-macosx.dylib similarity index 100% rename from akka-samples/akka-sample-cluster/sigar/libsigar-universal64-macosx.dylib rename to akka-samples/akka-sample-cluster-java/sigar/libsigar-universal64-macosx.dylib diff --git a/akka-samples/akka-sample-cluster/sigar/libsigar-x86-freebsd-5.so b/akka-samples/akka-sample-cluster-java/sigar/libsigar-x86-freebsd-5.so similarity index 100% rename from akka-samples/akka-sample-cluster/sigar/libsigar-x86-freebsd-5.so rename to akka-samples/akka-sample-cluster-java/sigar/libsigar-x86-freebsd-5.so diff --git a/akka-samples/akka-sample-cluster/sigar/libsigar-x86-freebsd-6.so b/akka-samples/akka-sample-cluster-java/sigar/libsigar-x86-freebsd-6.so similarity index 100% rename from akka-samples/akka-sample-cluster/sigar/libsigar-x86-freebsd-6.so rename to akka-samples/akka-sample-cluster-java/sigar/libsigar-x86-freebsd-6.so diff --git a/akka-samples/akka-sample-cluster/sigar/libsigar-x86-linux.so b/akka-samples/akka-sample-cluster-java/sigar/libsigar-x86-linux.so similarity index 100% rename from akka-samples/akka-sample-cluster/sigar/libsigar-x86-linux.so rename to akka-samples/akka-sample-cluster-java/sigar/libsigar-x86-linux.so diff --git a/akka-samples/akka-sample-cluster/sigar/libsigar-x86-solaris.so b/akka-samples/akka-sample-cluster-java/sigar/libsigar-x86-solaris.so similarity index 100% rename from akka-samples/akka-sample-cluster/sigar/libsigar-x86-solaris.so rename to akka-samples/akka-sample-cluster-java/sigar/libsigar-x86-solaris.so diff --git a/akka-samples/akka-sample-cluster/sigar/sigar-amd64-winnt.dll b/akka-samples/akka-sample-cluster-java/sigar/sigar-amd64-winnt.dll similarity index 100% rename from akka-samples/akka-sample-cluster/sigar/sigar-amd64-winnt.dll rename to akka-samples/akka-sample-cluster-java/sigar/sigar-amd64-winnt.dll diff --git a/akka-samples/akka-sample-cluster/sigar/sigar-x86-winnt.dll b/akka-samples/akka-sample-cluster-java/sigar/sigar-x86-winnt.dll similarity index 100% rename from akka-samples/akka-sample-cluster/sigar/sigar-x86-winnt.dll rename to akka-samples/akka-sample-cluster-java/sigar/sigar-x86-winnt.dll diff --git a/akka-samples/akka-sample-cluster/sigar/sigar-x86-winnt.lib b/akka-samples/akka-sample-cluster-java/sigar/sigar-x86-winnt.lib similarity index 100% rename from akka-samples/akka-sample-cluster/sigar/sigar-x86-winnt.lib rename to akka-samples/akka-sample-cluster-java/sigar/sigar-x86-winnt.lib diff --git a/akka-samples/akka-sample-cluster-java/src/main/java/sample/cluster/factorial/Extra.java b/akka-samples/akka-sample-cluster-java/src/main/java/sample/cluster/factorial/Extra.java new file mode 100644 index 0000000000..1a1bfc1640 --- /dev/null +++ b/akka-samples/akka-sample-cluster-java/src/main/java/sample/cluster/factorial/Extra.java @@ -0,0 +1,50 @@ +/** + * Copyright (C) 2011 Typesafe + */ +package sample.cluster.factorial; + +import java.util.Arrays; +import java.util.Collections; + +import akka.actor.ActorRef; +import akka.actor.Props; +import akka.actor.UntypedActor; +import akka.cluster.routing.AdaptiveLoadBalancingGroup; +import akka.cluster.routing.AdaptiveLoadBalancingPool; +import akka.cluster.routing.ClusterRouterGroup; +import akka.cluster.routing.ClusterRouterGroupSettings; +import akka.cluster.routing.ClusterRouterPool; +import akka.cluster.routing.ClusterRouterPoolSettings; +import akka.cluster.routing.HeapMetricsSelector; +import akka.cluster.routing.SystemLoadAverageMetricsSelector; + +//not used, only for documentation +abstract class FactorialFrontend2 extends UntypedActor { + //#router-lookup-in-code + int totalInstances = 100; + Iterable routeesPaths = Arrays.asList("/user/factorialBackend", ""); + boolean allowLocalRoutees = true; + String useRole = "backend"; + ActorRef backend = getContext().actorOf( + new ClusterRouterGroup(new AdaptiveLoadBalancingGroup( + HeapMetricsSelector.getInstance(), Collections. emptyList()), + new ClusterRouterGroupSettings(totalInstances, routeesPaths, + allowLocalRoutees, useRole)).props(), "factorialBackendRouter2"); + //#router-lookup-in-code +} + +//not used, only for documentation +abstract class FactorialFrontend3 extends UntypedActor { + //#router-deploy-in-code + int totalInstances = 100; + int maxInstancesPerNode = 3; + boolean allowLocalRoutees = false; + String useRole = "backend"; + ActorRef backend = getContext().actorOf( + new ClusterRouterPool(new AdaptiveLoadBalancingPool( + SystemLoadAverageMetricsSelector.getInstance(), 0), + new ClusterRouterPoolSettings(totalInstances, maxInstancesPerNode, + allowLocalRoutees, useRole)).props(Props + .create(FactorialBackend.class)), "factorialBackendRouter3"); + //#router-deploy-in-code +} diff --git a/akka-samples/akka-sample-cluster-java/src/main/java/sample/cluster/factorial/FactorialApp.java b/akka-samples/akka-sample-cluster-java/src/main/java/sample/cluster/factorial/FactorialApp.java new file mode 100644 index 0000000000..9c8c6ddeef --- /dev/null +++ b/akka-samples/akka-sample-cluster-java/src/main/java/sample/cluster/factorial/FactorialApp.java @@ -0,0 +1,12 @@ +package sample.cluster.factorial; + +public class FactorialApp { + + public static void main(String[] args) { + // starting 3 backend nodes and 1 frontend node + FactorialBackendMain.main(new String[] { "2551" }); + FactorialBackendMain.main(new String[] { "2552" }); + FactorialBackendMain.main(new String[0]); + FactorialFrontendMain.main(new String[0]); + } +} diff --git a/akka-samples/akka-sample-cluster/src/main/java/sample/cluster/factorial/japi/FactorialBackend.java b/akka-samples/akka-sample-cluster-java/src/main/java/sample/cluster/factorial/FactorialBackend.java similarity index 77% rename from akka-samples/akka-sample-cluster/src/main/java/sample/cluster/factorial/japi/FactorialBackend.java rename to akka-samples/akka-sample-cluster-java/src/main/java/sample/cluster/factorial/FactorialBackend.java index b1f813f684..028de5879e 100644 --- a/akka-samples/akka-sample-cluster/src/main/java/sample/cluster/factorial/japi/FactorialBackend.java +++ b/akka-samples/akka-sample-cluster-java/src/main/java/sample/cluster/factorial/FactorialBackend.java @@ -1,6 +1,5 @@ -package sample.cluster.factorial.japi; +package sample.cluster.factorial; -//#imports import java.math.BigInteger; import java.util.concurrent.Callable; import scala.concurrent.Future; @@ -8,7 +7,6 @@ import akka.actor.UntypedActor; import akka.dispatch.Mapper; import static akka.dispatch.Futures.future; import static akka.pattern.Patterns.pipe; -//#imports //#backend public class FactorialBackend extends UntypedActor { @@ -24,11 +22,11 @@ public class FactorialBackend extends UntypedActor { }, getContext().dispatcher()); Future result = f.map( - new Mapper() { - public FactorialResult apply(BigInteger factorial) { - return new FactorialResult(n, factorial); - } - }, getContext().dispatcher()); + new Mapper() { + public FactorialResult apply(BigInteger factorial) { + return new FactorialResult(n, factorial); + } + }, getContext().dispatcher()); pipe(result, getContext().dispatcher()).to(getSender()); diff --git a/akka-samples/akka-sample-cluster/src/main/java/sample/cluster/factorial/japi/FactorialBackendMain.java b/akka-samples/akka-sample-cluster-java/src/main/java/sample/cluster/factorial/FactorialBackendMain.java similarity index 53% rename from akka-samples/akka-sample-cluster/src/main/java/sample/cluster/factorial/japi/FactorialBackendMain.java rename to akka-samples/akka-sample-cluster-java/src/main/java/sample/cluster/factorial/FactorialBackendMain.java index 73351f2317..6c322a6aa1 100644 --- a/akka-samples/akka-sample-cluster/src/main/java/sample/cluster/factorial/japi/FactorialBackendMain.java +++ b/akka-samples/akka-sample-cluster-java/src/main/java/sample/cluster/factorial/FactorialBackendMain.java @@ -1,4 +1,4 @@ -package sample.cluster.factorial.japi; +package sample.cluster.factorial; import com.typesafe.config.Config; import com.typesafe.config.ConfigFactory; @@ -7,14 +7,12 @@ import akka.actor.Props; public class FactorialBackendMain { - public static void main(String[] args) throws Exception { + public static void main(String[] args) { // Override the configuration of the port when specified as program argument - final Config config = - (args.length > 0 ? - ConfigFactory.parseString(String.format("akka.remote.netty.tcp.port=%s", args[0])) : - ConfigFactory.empty()). - withFallback(ConfigFactory.parseString("akka.cluster.roles = [backend]")). - withFallback(ConfigFactory.load("factorial")); + final String port = args.length > 0 ? args[0] : "0"; + final Config config = ConfigFactory.parseString("akka.remote.netty.tcp.port=" + port). + withFallback(ConfigFactory.parseString("akka.cluster.roles = [backend]")). + withFallback(ConfigFactory.load("factorial")); ActorSystem system = ActorSystem.create("ClusterSystem", config); diff --git a/akka-samples/akka-sample-cluster-java/src/main/java/sample/cluster/factorial/FactorialFrontend.java b/akka-samples/akka-sample-cluster-java/src/main/java/sample/cluster/factorial/FactorialFrontend.java new file mode 100644 index 0000000000..b1848a964f --- /dev/null +++ b/akka-samples/akka-sample-cluster-java/src/main/java/sample/cluster/factorial/FactorialFrontend.java @@ -0,0 +1,64 @@ +package sample.cluster.factorial; + +import java.util.concurrent.TimeUnit; +import scala.concurrent.duration.Duration; +import akka.actor.ActorRef; +import akka.actor.ReceiveTimeout; +import akka.actor.UntypedActor; +import akka.event.Logging; +import akka.event.LoggingAdapter; +import akka.routing.FromConfig; + +//#frontend +public class FactorialFrontend extends UntypedActor { + final int upToN; + final boolean repeat; + + LoggingAdapter log = Logging.getLogger(getContext().system(), this); + + ActorRef backend = getContext().actorOf(FromConfig.getInstance().props(), + "factorialBackendRouter"); + + public FactorialFrontend(int upToN, boolean repeat) { + this.upToN = upToN; + this.repeat = repeat; + } + + @Override + public void preStart() { + sendJobs(); + getContext().setReceiveTimeout(Duration.create(10, TimeUnit.SECONDS)); + } + + @Override + public void onReceive(Object message) { + if (message instanceof FactorialResult) { + FactorialResult result = (FactorialResult) message; + if (result.n == upToN) { + log.debug("{}! = {}", result.n, result.factorial); + if (repeat) + sendJobs(); + else + getContext().stop(getSelf()); + } + + } else if (message instanceof ReceiveTimeout) { + log.info("Timeout"); + sendJobs(); + + } else { + unhandled(message); + } + } + + void sendJobs() { + log.info("Starting batch of factorials up to [{}]", upToN); + for (int n = 1; n <= upToN; n++) { + backend.tell(n, getSelf()); + } + } + +} + +//#frontend + diff --git a/akka-samples/akka-sample-cluster/src/main/java/sample/cluster/factorial/japi/FactorialFrontendMain.java b/akka-samples/akka-sample-cluster-java/src/main/java/sample/cluster/factorial/FactorialFrontendMain.java similarity index 53% rename from akka-samples/akka-sample-cluster/src/main/java/sample/cluster/factorial/japi/FactorialFrontendMain.java rename to akka-samples/akka-sample-cluster-java/src/main/java/sample/cluster/factorial/FactorialFrontendMain.java index 216c58bf10..492dbc5c95 100644 --- a/akka-samples/akka-sample-cluster/src/main/java/sample/cluster/factorial/japi/FactorialFrontendMain.java +++ b/akka-samples/akka-sample-cluster-java/src/main/java/sample/cluster/factorial/FactorialFrontendMain.java @@ -1,4 +1,4 @@ -package sample.cluster.factorial.japi; +package sample.cluster.factorial; import com.typesafe.config.Config; import com.typesafe.config.ConfigFactory; @@ -8,20 +8,22 @@ import akka.cluster.Cluster; public class FactorialFrontendMain { - public static void main(String[] args) throws Exception { - final int upToN = (args.length == 0 ? 200 : Integer.valueOf(args[0])); + public static void main(String[] args) { + final int upToN = 200; - final Config config = ConfigFactory.parseString("akka.cluster.roles = [frontend]"). - withFallback(ConfigFactory.load("factorial")); + final Config config = ConfigFactory.parseString( + "akka.cluster.roles = [frontend]").withFallback( + ConfigFactory.load("factorial")); final ActorSystem system = ActorSystem.create("ClusterSystem", config); - system.log().info("Factorials will start when 2 backend members in the cluster."); + system.log().info( + "Factorials will start when 2 backend members in the cluster."); //#registerOnUp Cluster.get(system).registerOnMemberUp(new Runnable() { @Override public void run() { - system.actorOf(Props.create(FactorialFrontend.class, upToN, true), - "factorialFrontend"); + system.actorOf(Props.create(FactorialFrontend.class, upToN, true), + "factorialFrontend"); } }); //#registerOnUp diff --git a/akka-samples/akka-sample-cluster/src/main/java/sample/cluster/factorial/japi/FactorialResult.java b/akka-samples/akka-sample-cluster-java/src/main/java/sample/cluster/factorial/FactorialResult.java similarity index 87% rename from akka-samples/akka-sample-cluster/src/main/java/sample/cluster/factorial/japi/FactorialResult.java rename to akka-samples/akka-sample-cluster-java/src/main/java/sample/cluster/factorial/FactorialResult.java index 0cb74b6b54..8a80071a2e 100644 --- a/akka-samples/akka-sample-cluster/src/main/java/sample/cluster/factorial/japi/FactorialResult.java +++ b/akka-samples/akka-sample-cluster-java/src/main/java/sample/cluster/factorial/FactorialResult.java @@ -1,4 +1,4 @@ -package sample.cluster.factorial.japi; +package sample.cluster.factorial; import java.math.BigInteger; import java.io.Serializable; diff --git a/akka-samples/akka-sample-cluster/src/main/java/sample/cluster/factorial/japi/MetricsListener.java b/akka-samples/akka-sample-cluster-java/src/main/java/sample/cluster/factorial/MetricsListener.java similarity index 97% rename from akka-samples/akka-sample-cluster/src/main/java/sample/cluster/factorial/japi/MetricsListener.java rename to akka-samples/akka-sample-cluster-java/src/main/java/sample/cluster/factorial/MetricsListener.java index 3acbf3e4c0..08ac1eb120 100644 --- a/akka-samples/akka-sample-cluster/src/main/java/sample/cluster/factorial/japi/MetricsListener.java +++ b/akka-samples/akka-sample-cluster-java/src/main/java/sample/cluster/factorial/MetricsListener.java @@ -1,4 +1,4 @@ -package sample.cluster.factorial.japi; +package sample.cluster.factorial; //#metrics-listener import akka.actor.UntypedActor; diff --git a/akka-samples/akka-sample-cluster-java/src/main/java/sample/cluster/simple/SimpleClusterApp.java b/akka-samples/akka-sample-cluster-java/src/main/java/sample/cluster/simple/SimpleClusterApp.java new file mode 100644 index 0000000000..c13041a513 --- /dev/null +++ b/akka-samples/akka-sample-cluster-java/src/main/java/sample/cluster/simple/SimpleClusterApp.java @@ -0,0 +1,34 @@ +package sample.cluster.simple; + +import akka.actor.ActorSystem; +import akka.actor.Props; + +import com.typesafe.config.Config; +import com.typesafe.config.ConfigFactory; + +public class SimpleClusterApp { + + public static void main(String[] args) { + if (args.length == 0) + startup(new String[] { "2551", "2552", "0" }); + else + startup(args); + } + + public static void startup(String[] ports) { + for (String port : ports) { + // Override the configuration of the port + Config config = ConfigFactory.parseString( + "akka.remote.netty.tcp.port=" + port).withFallback( + ConfigFactory.load()); + + // Create an Akka system + ActorSystem system = ActorSystem.create("ClusterSystem", config); + + // Create an actor that handles cluster domain events + system.actorOf(Props.create(SimpleClusterListener.class), + "clusterListener"); + + } + } +} diff --git a/akka-samples/akka-sample-cluster/src/main/java/sample/cluster/simple/japi/SimpleClusterListener.java b/akka-samples/akka-sample-cluster-java/src/main/java/sample/cluster/simple/SimpleClusterListener.java similarity index 78% rename from akka-samples/akka-sample-cluster/src/main/java/sample/cluster/simple/japi/SimpleClusterListener.java rename to akka-samples/akka-sample-cluster-java/src/main/java/sample/cluster/simple/SimpleClusterListener.java index c5dc955303..755a73d749 100644 --- a/akka-samples/akka-sample-cluster/src/main/java/sample/cluster/simple/japi/SimpleClusterListener.java +++ b/akka-samples/akka-sample-cluster-java/src/main/java/sample/cluster/simple/SimpleClusterListener.java @@ -1,6 +1,7 @@ -package sample.cluster.simple.japi; +package sample.cluster.simple; import akka.actor.UntypedActor; +import akka.cluster.Cluster; import akka.cluster.ClusterEvent.ClusterDomainEvent; import akka.cluster.ClusterEvent.CurrentClusterState; import akka.cluster.ClusterEvent.MemberUp; @@ -11,6 +12,19 @@ import akka.event.LoggingAdapter; public class SimpleClusterListener extends UntypedActor { LoggingAdapter log = Logging.getLogger(getContext().system(), this); + Cluster cluster = Cluster.get(getContext().system()); + + //subscribe to cluster changes, MemberUp + @Override + public void preStart() { + cluster.subscribe(getSelf(), ClusterDomainEvent.class); + } + + //re-subscribe when restart + @Override + public void postStop() { + cluster.unsubscribe(getSelf()); + } @Override public void onReceive(Object message) { diff --git a/akka-samples/akka-sample-cluster-java/src/main/java/sample/cluster/stats/Extra.java b/akka-samples/akka-sample-cluster-java/src/main/java/sample/cluster/stats/Extra.java new file mode 100644 index 0000000000..a7de074c2d --- /dev/null +++ b/akka-samples/akka-sample-cluster-java/src/main/java/sample/cluster/stats/Extra.java @@ -0,0 +1,43 @@ +package sample.cluster.stats; + +import java.util.Collections; + +import akka.actor.ActorRef; +import akka.actor.Props; +import akka.actor.UntypedActor; +import akka.cluster.routing.ClusterRouterGroup; +import akka.cluster.routing.ClusterRouterGroupSettings; +import akka.cluster.routing.ClusterRouterPool; +import akka.cluster.routing.ClusterRouterPoolSettings; +import akka.routing.ConsistentHashingGroup; +import akka.routing.ConsistentHashingPool; + +//not used, only for documentation +abstract class StatsService2 extends UntypedActor { + //#router-lookup-in-code + int totalInstances = 100; + Iterable routeesPaths = Collections + .singletonList("/user/statsWorker"); + boolean allowLocalRoutees = true; + String useRole = "compute"; + ActorRef workerRouter = getContext().actorOf( + new ClusterRouterGroup(new ConsistentHashingGroup(routeesPaths), + new ClusterRouterGroupSettings(totalInstances, routeesPaths, + allowLocalRoutees, useRole)).props(), "workerRouter2"); + //#router-lookup-in-code +} + +//not used, only for documentation +abstract class StatsService3 extends UntypedActor { + //#router-deploy-in-code + int totalInstances = 100; + int maxInstancesPerNode = 3; + boolean allowLocalRoutees = false; + String useRole = "compute"; + ActorRef workerRouter = getContext().actorOf( + new ClusterRouterPool(new ConsistentHashingPool(0), + new ClusterRouterPoolSettings(totalInstances, maxInstancesPerNode, + allowLocalRoutees, useRole)).props(Props + .create(StatsWorker.class)), "workerRouter3"); + //#router-deploy-in-code +} diff --git a/akka-samples/akka-sample-cluster/src/main/java/sample/cluster/stats/japi/StatsAggregator.java b/akka-samples/akka-sample-cluster-java/src/main/java/sample/cluster/stats/StatsAggregator.java similarity index 90% rename from akka-samples/akka-sample-cluster/src/main/java/sample/cluster/stats/japi/StatsAggregator.java rename to akka-samples/akka-sample-cluster-java/src/main/java/sample/cluster/stats/StatsAggregator.java index 0716cc38ec..f4fb49703b 100644 --- a/akka-samples/akka-sample-cluster/src/main/java/sample/cluster/stats/japi/StatsAggregator.java +++ b/akka-samples/akka-sample-cluster-java/src/main/java/sample/cluster/stats/StatsAggregator.java @@ -1,11 +1,11 @@ -package sample.cluster.stats.japi; +package sample.cluster.stats; import java.util.ArrayList; import java.util.List; import java.util.concurrent.TimeUnit; -import sample.cluster.stats.japi.StatsMessages.JobFailed; -import sample.cluster.stats.japi.StatsMessages.StatsResult; +import sample.cluster.stats.StatsMessages.JobFailed; +import sample.cluster.stats.StatsMessages.StatsResult; import scala.concurrent.duration.Duration; import akka.actor.ActorRef; import akka.actor.ReceiveTimeout; diff --git a/akka-samples/akka-sample-cluster/src/main/java/sample/cluster/stats/japi/StatsFacade.java b/akka-samples/akka-sample-cluster-java/src/main/java/sample/cluster/stats/StatsFacade.java similarity index 77% rename from akka-samples/akka-sample-cluster/src/main/java/sample/cluster/stats/japi/StatsFacade.java rename to akka-samples/akka-sample-cluster-java/src/main/java/sample/cluster/stats/StatsFacade.java index c62e48d8e6..24a8403640 100644 --- a/akka-samples/akka-sample-cluster/src/main/java/sample/cluster/stats/japi/StatsFacade.java +++ b/akka-samples/akka-sample-cluster-java/src/main/java/sample/cluster/stats/StatsFacade.java @@ -1,4 +1,4 @@ -package sample.cluster.stats.japi; +package sample.cluster.stats; import java.util.ArrayList; import java.util.Comparator; @@ -6,8 +6,8 @@ import java.util.List; import java.util.SortedSet; import java.util.TreeSet; -import sample.cluster.stats.japi.StatsMessages.JobFailed; -import sample.cluster.stats.japi.StatsMessages.StatsJob; +import sample.cluster.stats.StatsMessages.JobFailed; +import sample.cluster.stats.StatsMessages.StatsJob; import akka.actor.ActorSelection; import akka.actor.UntypedActor; import akka.cluster.Cluster; @@ -19,7 +19,6 @@ import akka.cluster.Member; import akka.event.Logging; import akka.event.LoggingAdapter; - //#facade public class StatsFacade extends UntypedActor { @@ -28,14 +27,16 @@ public class StatsFacade extends UntypedActor { final Comparator ageComparator = new Comparator() { public int compare(Member a, Member b) { - if (a.isOlderThan(b)) return -1; - else if (b.isOlderThan(a)) return 1; - else return 0; + if (a.isOlderThan(b)) + return -1; + else if (b.isOlderThan(a)) + return 1; + else + return 0; } }; final SortedSet membersByAge = new TreeSet(ageComparator); - //subscribe to cluster changes @Override public void preStart() { @@ -61,18 +62,21 @@ public class StatsFacade extends UntypedActor { CurrentClusterState state = (CurrentClusterState) message; List members = new ArrayList(); for (Member m : state.getMembers()) { - if (m.hasRole("compute")) members.add(m); + if (m.hasRole("compute")) + members.add(m); } membersByAge.clear(); membersByAge.addAll(members); } else if (message instanceof MemberUp) { Member m = ((MemberUp) message).member(); - if (m.hasRole("compute")) membersByAge.add(m); + if (m.hasRole("compute")) + membersByAge.add(m); } else if (message instanceof MemberRemoved) { - Member m = ((MemberUp) message).member(); - if (m.hasRole("compute")) membersByAge.remove(m); + Member m = ((MemberRemoved) message).member(); + if (m.hasRole("compute")) + membersByAge.remove(m); } else if (message instanceof MemberEvent) { // not interesting @@ -83,8 +87,8 @@ public class StatsFacade extends UntypedActor { } ActorSelection currentMaster() { - return getContext().actorSelection(membersByAge.first().address() + - "/user/singleton/statsService"); + return getContext().actorSelection( + membersByAge.first().address() + "/user/singleton/statsService"); } } diff --git a/akka-samples/akka-sample-cluster/src/main/java/sample/cluster/stats/japi/StatsMessages.java b/akka-samples/akka-sample-cluster-java/src/main/java/sample/cluster/stats/StatsMessages.java similarity index 96% rename from akka-samples/akka-sample-cluster/src/main/java/sample/cluster/stats/japi/StatsMessages.java rename to akka-samples/akka-sample-cluster-java/src/main/java/sample/cluster/stats/StatsMessages.java index 538024efa6..52d8c61ae7 100644 --- a/akka-samples/akka-sample-cluster/src/main/java/sample/cluster/stats/japi/StatsMessages.java +++ b/akka-samples/akka-sample-cluster-java/src/main/java/sample/cluster/stats/StatsMessages.java @@ -1,4 +1,4 @@ -package sample.cluster.stats.japi; +package sample.cluster.stats; import java.io.Serializable; diff --git a/akka-samples/akka-sample-cluster/src/main/java/sample/cluster/stats/japi/StatsSampleClient.java b/akka-samples/akka-sample-cluster-java/src/main/java/sample/cluster/stats/StatsSampleClient.java similarity index 93% rename from akka-samples/akka-sample-cluster/src/main/java/sample/cluster/stats/japi/StatsSampleClient.java rename to akka-samples/akka-sample-cluster-java/src/main/java/sample/cluster/stats/StatsSampleClient.java index 079c9f2988..acfaebee71 100644 --- a/akka-samples/akka-sample-cluster/src/main/java/sample/cluster/stats/japi/StatsSampleClient.java +++ b/akka-samples/akka-sample-cluster-java/src/main/java/sample/cluster/stats/StatsSampleClient.java @@ -1,4 +1,4 @@ -package sample.cluster.stats.japi; +package sample.cluster.stats; import java.util.ArrayList; import java.util.HashSet; @@ -6,9 +6,9 @@ import java.util.List; import java.util.Set; import java.util.concurrent.TimeUnit; -import sample.cluster.stats.japi.StatsMessages.JobFailed; -import sample.cluster.stats.japi.StatsMessages.StatsJob; -import sample.cluster.stats.japi.StatsMessages.StatsResult; +import sample.cluster.stats.StatsMessages.JobFailed; +import sample.cluster.stats.StatsMessages.StatsJob; +import sample.cluster.stats.StatsMessages.StatsResult; import scala.concurrent.forkjoin.ThreadLocalRandom; import scala.concurrent.duration.Duration; import scala.concurrent.duration.FiniteDuration; diff --git a/akka-samples/akka-sample-cluster-java/src/main/java/sample/cluster/stats/StatsSampleClientMain.java b/akka-samples/akka-sample-cluster-java/src/main/java/sample/cluster/stats/StatsSampleClientMain.java new file mode 100644 index 0000000000..ecc255aa77 --- /dev/null +++ b/akka-samples/akka-sample-cluster-java/src/main/java/sample/cluster/stats/StatsSampleClientMain.java @@ -0,0 +1,17 @@ +package sample.cluster.stats; + +import akka.actor.ActorSystem; +import akka.actor.Props; + +import com.typesafe.config.ConfigFactory; + +public class StatsSampleClientMain { + + public static void main(String[] args) { + // note that client is not a compute node, role not defined + ActorSystem system = ActorSystem.create("ClusterSystem", + ConfigFactory.load("stats1")); + system.actorOf(Props.create(StatsSampleClient.class, "/user/statsService"), + "client"); + } +} diff --git a/akka-samples/akka-sample-cluster-java/src/main/java/sample/cluster/stats/StatsSampleMain.java b/akka-samples/akka-sample-cluster-java/src/main/java/sample/cluster/stats/StatsSampleMain.java new file mode 100644 index 0000000000..5cb5ffbdc0 --- /dev/null +++ b/akka-samples/akka-sample-cluster-java/src/main/java/sample/cluster/stats/StatsSampleMain.java @@ -0,0 +1,36 @@ +package sample.cluster.stats; + +import com.typesafe.config.Config; +import com.typesafe.config.ConfigFactory; + +import akka.actor.ActorSystem; +import akka.actor.Props; + +public class StatsSampleMain { + + public static void main(String[] args) { + if (args.length == 0) { + startup(new String[] { "2551", "2552", "0" }); + StatsSampleClientMain.main(new String[0]); + } else { + startup(args); + } + } + + public static void startup(String[] ports) { + for (String port : ports) { + // Override the configuration of the port + Config config = ConfigFactory + .parseString("akka.remote.netty.tcp.port=" + port) + .withFallback( + ConfigFactory.parseString("akka.cluster.roles = [compute]")) + .withFallback(ConfigFactory.load("stats1")); + + ActorSystem system = ActorSystem.create("ClusterSystem", config); + + system.actorOf(Props.create(StatsWorker.class), "statsWorker"); + system.actorOf(Props.create(StatsService.class), "statsService"); + } + + } +} diff --git a/akka-samples/akka-sample-cluster/src/main/java/sample/cluster/stats/japi/StatsSampleOneMasterClientMain.java b/akka-samples/akka-sample-cluster-java/src/main/java/sample/cluster/stats/StatsSampleOneMasterClientMain.java similarity index 52% rename from akka-samples/akka-sample-cluster/src/main/java/sample/cluster/stats/japi/StatsSampleOneMasterClientMain.java rename to akka-samples/akka-sample-cluster-java/src/main/java/sample/cluster/stats/StatsSampleOneMasterClientMain.java index 96d8924ca9..a68238ce0b 100644 --- a/akka-samples/akka-sample-cluster/src/main/java/sample/cluster/stats/japi/StatsSampleOneMasterClientMain.java +++ b/akka-samples/akka-sample-cluster-java/src/main/java/sample/cluster/stats/StatsSampleOneMasterClientMain.java @@ -1,15 +1,18 @@ -package sample.cluster.stats.japi; +package sample.cluster.stats; + +import com.typesafe.config.ConfigFactory; import akka.actor.ActorSystem; import akka.actor.Props; public class StatsSampleOneMasterClientMain { - public static void main(String[] args) throws Exception { + public static void main(String[] args) { // note that client is not a compute node, role not defined - ActorSystem system = ActorSystem.create("ClusterSystem"); + ActorSystem system = ActorSystem.create("ClusterSystem", + ConfigFactory.load("stats2")); system.actorOf(Props.create(StatsSampleClient.class, "/user/statsFacade"), - "client"); + "client"); } diff --git a/akka-samples/akka-sample-cluster-java/src/main/java/sample/cluster/stats/StatsSampleOneMasterMain.java b/akka-samples/akka-sample-cluster-java/src/main/java/sample/cluster/stats/StatsSampleOneMasterMain.java new file mode 100644 index 0000000000..e549d47704 --- /dev/null +++ b/akka-samples/akka-sample-cluster-java/src/main/java/sample/cluster/stats/StatsSampleOneMasterMain.java @@ -0,0 +1,43 @@ +package sample.cluster.stats; + +import com.typesafe.config.Config; +import com.typesafe.config.ConfigFactory; + +import akka.actor.ActorSystem; +import akka.actor.PoisonPill; +import akka.actor.Props; +import akka.contrib.pattern.ClusterSingletonManager; + +public class StatsSampleOneMasterMain { + + public static void main(String[] args) { + if (args.length == 0) { + startup(new String[] { "2551", "2552", "0" }); + StatsSampleOneMasterClientMain.main(new String[0]); + } else { + startup(args); + } + } + + public static void startup(String[] ports) { + for (String port : ports) { + // Override the configuration of the port + Config config = ConfigFactory + .parseString("akka.remote.netty.tcp.port=" + port) + .withFallback( + ConfigFactory.parseString("akka.cluster.roles = [compute]")) + .withFallback(ConfigFactory.load("stats2")); + + ActorSystem system = ActorSystem.create("ClusterSystem", config); + + //#create-singleton-manager + system.actorOf(ClusterSingletonManager.defaultProps( + Props.create(StatsService.class), "statsService", + PoisonPill.getInstance(), "compute"), "singleton"); + //#create-singleton-manager + + system.actorOf(Props.create(StatsFacade.class), "statsFacade"); + } + + } +} diff --git a/akka-samples/akka-sample-cluster-java/src/main/java/sample/cluster/stats/StatsService.java b/akka-samples/akka-sample-cluster-java/src/main/java/sample/cluster/stats/StatsService.java new file mode 100644 index 0000000000..a20be6ade3 --- /dev/null +++ b/akka-samples/akka-sample-cluster-java/src/main/java/sample/cluster/stats/StatsService.java @@ -0,0 +1,48 @@ +package sample.cluster.stats; + +import sample.cluster.stats.StatsMessages.StatsJob; +import akka.actor.ActorRef; +import akka.actor.Props; +import akka.actor.UntypedActor; +import akka.routing.ConsistentHashingRouter.ConsistentHashableEnvelope; +import akka.routing.FromConfig; + +//#service +public class StatsService extends UntypedActor { + + // This router is used both with lookup and deploy of routees. If you + // have a router with only lookup of routees you can use Props.empty() + // instead of Props.create(StatsWorker.class). + ActorRef workerRouter = getContext().actorOf( + FromConfig.getInstance().props(Props.create(StatsWorker.class)), + "workerRouter"); + + @Override + public void onReceive(Object message) { + if (message instanceof StatsJob) { + StatsJob job = (StatsJob) message; + if (job.getText().equals("")) { + unhandled(message); + } else { + final String[] words = job.getText().split(" "); + final ActorRef replyTo = getSender(); + + // create actor that collects replies from workers + ActorRef aggregator = getContext().actorOf( + Props.create(StatsAggregator.class, words.length, replyTo)); + + // send each word to a worker + for (String word : words) { + workerRouter.tell(new ConsistentHashableEnvelope(word, word), + aggregator); + } + } + + } else { + unhandled(message); + } + } +} + +//#service + diff --git a/akka-samples/akka-sample-cluster/src/main/java/sample/cluster/stats/japi/StatsWorker.java b/akka-samples/akka-sample-cluster-java/src/main/java/sample/cluster/stats/StatsWorker.java similarity index 94% rename from akka-samples/akka-sample-cluster/src/main/java/sample/cluster/stats/japi/StatsWorker.java rename to akka-samples/akka-sample-cluster-java/src/main/java/sample/cluster/stats/StatsWorker.java index ca0af86af6..2d0c97cd19 100644 --- a/akka-samples/akka-sample-cluster/src/main/java/sample/cluster/stats/japi/StatsWorker.java +++ b/akka-samples/akka-sample-cluster-java/src/main/java/sample/cluster/stats/StatsWorker.java @@ -1,4 +1,4 @@ -package sample.cluster.stats.japi; +package sample.cluster.stats; import java.util.HashMap; import java.util.Map; diff --git a/akka-samples/akka-sample-cluster-java/src/main/java/sample/cluster/transformation/TransformationApp.java b/akka-samples/akka-sample-cluster-java/src/main/java/sample/cluster/transformation/TransformationApp.java new file mode 100644 index 0000000000..123c39f309 --- /dev/null +++ b/akka-samples/akka-sample-cluster-java/src/main/java/sample/cluster/transformation/TransformationApp.java @@ -0,0 +1,12 @@ +package sample.cluster.transformation; + +public class TransformationApp { + + public static void main(String[] args) { + // starting 2 frontend nodes and 3 backend nodes + TransformationBackendMain.main(new String[] { "2551" }); + TransformationBackendMain.main(new String[] { "2552" }); + TransformationBackendMain.main(new String[0]); + TransformationFrontendMain.main(new String[0]); + } +} diff --git a/akka-samples/akka-sample-cluster/src/main/java/sample/cluster/transformation/japi/TransformationBackend.java b/akka-samples/akka-sample-cluster-java/src/main/java/sample/cluster/transformation/TransformationBackend.java similarity index 73% rename from akka-samples/akka-sample-cluster/src/main/java/sample/cluster/transformation/japi/TransformationBackend.java rename to akka-samples/akka-sample-cluster-java/src/main/java/sample/cluster/transformation/TransformationBackend.java index c76b6fed57..2bca33d71a 100644 --- a/akka-samples/akka-sample-cluster/src/main/java/sample/cluster/transformation/japi/TransformationBackend.java +++ b/akka-samples/akka-sample-cluster-java/src/main/java/sample/cluster/transformation/TransformationBackend.java @@ -1,16 +1,14 @@ -package sample.cluster.transformation.japi; +package sample.cluster.transformation; -import static sample.cluster.transformation.japi.TransformationMessages.BACKEND_REGISTRATION; -import sample.cluster.transformation.japi.TransformationMessages.TransformationJob; -import sample.cluster.transformation.japi.TransformationMessages.TransformationResult; -//#imports +import static sample.cluster.transformation.TransformationMessages.BACKEND_REGISTRATION; +import sample.cluster.transformation.TransformationMessages.TransformationJob; +import sample.cluster.transformation.TransformationMessages.TransformationResult; import akka.actor.UntypedActor; import akka.cluster.Cluster; import akka.cluster.ClusterEvent.CurrentClusterState; import akka.cluster.ClusterEvent.MemberUp; import akka.cluster.Member; import akka.cluster.MemberStatus; -//#imports //#backend public class TransformationBackend extends UntypedActor { @@ -33,9 +31,8 @@ public class TransformationBackend extends UntypedActor { public void onReceive(Object message) { if (message instanceof TransformationJob) { TransformationJob job = (TransformationJob) message; - getSender() - .tell(new TransformationResult(job.getText().toUpperCase()), - getSelf()); + getSender().tell(new TransformationResult(job.getText().toUpperCase()), + getSelf()); } else if (message instanceof CurrentClusterState) { CurrentClusterState state = (CurrentClusterState) message; @@ -57,7 +54,7 @@ public class TransformationBackend extends UntypedActor { void register(Member member) { if (member.hasRole("frontend")) getContext().actorSelection(member.address() + "/user/frontend").tell( - BACKEND_REGISTRATION, getSelf()); + BACKEND_REGISTRATION, getSelf()); } } //#backend diff --git a/akka-samples/akka-sample-cluster-java/src/main/java/sample/cluster/transformation/TransformationBackendMain.java b/akka-samples/akka-sample-cluster-java/src/main/java/sample/cluster/transformation/TransformationBackendMain.java new file mode 100644 index 0000000000..dd042b8d5b --- /dev/null +++ b/akka-samples/akka-sample-cluster-java/src/main/java/sample/cluster/transformation/TransformationBackendMain.java @@ -0,0 +1,24 @@ +package sample.cluster.transformation; + +import com.typesafe.config.Config; +import com.typesafe.config.ConfigFactory; + +import akka.actor.ActorSystem; +import akka.actor.Props; + +public class TransformationBackendMain { + + public static void main(String[] args) { + // Override the configuration of the port when specified as program argument + final String port = args.length > 0 ? args[0] : "0"; + final Config config = ConfigFactory.parseString("akka.remote.netty.tcp.port=" + port). + withFallback(ConfigFactory.parseString("akka.cluster.roles = [backend]")). + withFallback(ConfigFactory.load()); + + ActorSystem system = ActorSystem.create("ClusterSystem", config); + + system.actorOf(Props.create(TransformationBackend.class), "backend"); + + } + +} diff --git a/akka-samples/akka-sample-cluster/src/main/java/sample/cluster/transformation/japi/TransformationFrontend.java b/akka-samples/akka-sample-cluster-java/src/main/java/sample/cluster/transformation/TransformationFrontend.java similarity index 79% rename from akka-samples/akka-sample-cluster/src/main/java/sample/cluster/transformation/japi/TransformationFrontend.java rename to akka-samples/akka-sample-cluster-java/src/main/java/sample/cluster/transformation/TransformationFrontend.java index c1fe0f9038..4fab1cd559 100644 --- a/akka-samples/akka-sample-cluster/src/main/java/sample/cluster/transformation/japi/TransformationFrontend.java +++ b/akka-samples/akka-sample-cluster-java/src/main/java/sample/cluster/transformation/TransformationFrontend.java @@ -1,12 +1,12 @@ -package sample.cluster.transformation.japi; +package sample.cluster.transformation; -import static sample.cluster.transformation.japi.TransformationMessages.BACKEND_REGISTRATION; +import static sample.cluster.transformation.TransformationMessages.BACKEND_REGISTRATION; import java.util.ArrayList; import java.util.List; -import sample.cluster.transformation.japi.TransformationMessages.JobFailed; -import sample.cluster.transformation.japi.TransformationMessages.TransformationJob; +import sample.cluster.transformation.TransformationMessages.JobFailed; +import sample.cluster.transformation.TransformationMessages.TransformationJob; import akka.actor.ActorRef; import akka.actor.Terminated; import akka.actor.UntypedActor; diff --git a/akka-samples/akka-sample-cluster-java/src/main/java/sample/cluster/transformation/TransformationFrontendMain.java b/akka-samples/akka-sample-cluster-java/src/main/java/sample/cluster/transformation/TransformationFrontendMain.java new file mode 100644 index 0000000000..19ac9da177 --- /dev/null +++ b/akka-samples/akka-sample-cluster-java/src/main/java/sample/cluster/transformation/TransformationFrontendMain.java @@ -0,0 +1,51 @@ +package sample.cluster.transformation; + +import java.util.concurrent.TimeUnit; +import java.util.concurrent.atomic.AtomicInteger; + +import com.typesafe.config.Config; +import com.typesafe.config.ConfigFactory; + +import sample.cluster.transformation.TransformationMessages.TransformationJob; +import scala.concurrent.ExecutionContext; +import scala.concurrent.duration.Duration; +import scala.concurrent.duration.FiniteDuration; +import akka.actor.ActorRef; +import akka.actor.ActorSystem; +import akka.actor.Props; +import akka.dispatch.OnSuccess; +import akka.util.Timeout; +import static akka.pattern.Patterns.ask; + +public class TransformationFrontendMain { + + public static void main(String[] args) { + // Override the configuration of the port when specified as program argument + final String port = args.length > 0 ? args[0] : "0"; + final Config config = ConfigFactory.parseString("akka.remote.netty.tcp.port=" + port). + withFallback(ConfigFactory.parseString("akka.cluster.roles = [frontend]")). + withFallback(ConfigFactory.load()); + + ActorSystem system = ActorSystem.create("ClusterSystem", config); + + final ActorRef frontend = system.actorOf( + Props.create(TransformationFrontend.class), "frontend"); + final FiniteDuration interval = Duration.create(2, TimeUnit.SECONDS); + final Timeout timeout = new Timeout(Duration.create(5, TimeUnit.SECONDS)); + final ExecutionContext ec = system.dispatcher(); + final AtomicInteger counter = new AtomicInteger(); + system.scheduler().schedule(interval, interval, new Runnable() { + public void run() { + ask(frontend, + new TransformationJob("hello-" + counter.incrementAndGet()), + timeout).onSuccess(new OnSuccess() { + public void onSuccess(Object result) { + System.out.println(result); + } + }, ec); + } + + }, ec); + + } +} diff --git a/akka-samples/akka-sample-cluster/src/main/java/sample/cluster/transformation/japi/TransformationMessages.java b/akka-samples/akka-sample-cluster-java/src/main/java/sample/cluster/transformation/TransformationMessages.java similarity index 96% rename from akka-samples/akka-sample-cluster/src/main/java/sample/cluster/transformation/japi/TransformationMessages.java rename to akka-samples/akka-sample-cluster-java/src/main/java/sample/cluster/transformation/TransformationMessages.java index 133ebb09d2..1942122002 100644 --- a/akka-samples/akka-sample-cluster/src/main/java/sample/cluster/transformation/japi/TransformationMessages.java +++ b/akka-samples/akka-sample-cluster-java/src/main/java/sample/cluster/transformation/TransformationMessages.java @@ -1,4 +1,4 @@ -package sample.cluster.transformation.japi; +package sample.cluster.transformation; import java.io.Serializable; diff --git a/akka-samples/akka-sample-cluster-java/src/main/resources/application.conf b/akka-samples/akka-sample-cluster-java/src/main/resources/application.conf new file mode 100644 index 0000000000..41281e5485 --- /dev/null +++ b/akka-samples/akka-sample-cluster-java/src/main/resources/application.conf @@ -0,0 +1,20 @@ +akka { + actor { + provider = "akka.cluster.ClusterActorRefProvider" + } + remote { + log-remote-lifecycle-events = off + netty.tcp { + hostname = "127.0.0.1" + port = 0 + } + } + + cluster { + seed-nodes = [ + "akka.tcp://ClusterSystem@127.0.0.1:2551", + "akka.tcp://ClusterSystem@127.0.0.1:2552"] + + auto-down-unreachable-after = 10s + } +} diff --git a/akka-samples/akka-sample-cluster-java/src/main/resources/factorial.conf b/akka-samples/akka-sample-cluster-java/src/main/resources/factorial.conf new file mode 100644 index 0000000000..def18c1e51 --- /dev/null +++ b/akka-samples/akka-sample-cluster-java/src/main/resources/factorial.conf @@ -0,0 +1,31 @@ +include "application" + +# //#min-nr-of-members +akka.cluster.min-nr-of-members = 3 +# //#min-nr-of-members + +# //#role-min-nr-of-members +akka.cluster.role { + frontend.min-nr-of-members = 1 + backend.min-nr-of-members = 2 +} +# //#role-min-nr-of-members + +# //#adaptive-router +akka.actor.deployment { + /factorialFrontend/factorialBackendRouter = { + router = adaptive-group + # metrics-selector = heap + # metrics-selector = load + # metrics-selector = cpu + metrics-selector = mix + nr-of-instances = 100 + routees.paths = ["/user/factorialBackend"] + cluster { + enabled = on + use-role = backend + allow-local-routees = off + } + } +} +# //#adaptive-router diff --git a/akka-samples/akka-sample-cluster-java/src/main/resources/stats1.conf b/akka-samples/akka-sample-cluster-java/src/main/resources/stats1.conf new file mode 100644 index 0000000000..9f376acb04 --- /dev/null +++ b/akka-samples/akka-sample-cluster-java/src/main/resources/stats1.conf @@ -0,0 +1,16 @@ +include "application" + +# //#config-router-lookup +akka.actor.deployment { + /statsService/workerRouter { + router = consistent-hashing-group + nr-of-instances = 100 + routees.paths = ["/user/statsWorker"] + cluster { + enabled = on + allow-local-routees = on + use-role = compute + } + } +} +# //#config-router-lookup diff --git a/akka-samples/akka-sample-cluster-java/src/main/resources/stats2.conf b/akka-samples/akka-sample-cluster-java/src/main/resources/stats2.conf new file mode 100644 index 0000000000..1eee48fd52 --- /dev/null +++ b/akka-samples/akka-sample-cluster-java/src/main/resources/stats2.conf @@ -0,0 +1,17 @@ +include "application" + +# //#config-router-deploy +akka.actor.deployment { + /singleton/statsService/workerRouter { + router = consistent-hashing-pool + nr-of-instances = 100 + cluster { + enabled = on + max-nr-of-instances-per-node = 3 + allow-local-routees = on + use-role = compute + } + } +} +# //#config-router-deploy + diff --git a/akka-samples/akka-sample-cluster/src/multi-jvm/scala/sample/cluster/stats/japi/StatsSampleSingleMasterJapiSpec.scala b/akka-samples/akka-sample-cluster-java/src/multi-jvm/scala/sample/cluster/stats/StatsSampleSingleMasterSpec.scala similarity index 81% rename from akka-samples/akka-sample-cluster/src/multi-jvm/scala/sample/cluster/stats/japi/StatsSampleSingleMasterJapiSpec.scala rename to akka-samples/akka-sample-cluster-java/src/multi-jvm/scala/sample/cluster/stats/StatsSampleSingleMasterSpec.scala index e24c35172d..547a644db1 100644 --- a/akka-samples/akka-sample-cluster/src/multi-jvm/scala/sample/cluster/stats/japi/StatsSampleSingleMasterJapiSpec.scala +++ b/akka-samples/akka-sample-cluster-java/src/multi-jvm/scala/sample/cluster/stats/StatsSampleSingleMasterSpec.scala @@ -1,4 +1,4 @@ -package sample.cluster.stats.japi +package sample.cluster.stats import language.postfixOps import scala.concurrent.duration._ @@ -18,9 +18,9 @@ import akka.contrib.pattern.ClusterSingletonManager import akka.remote.testkit.MultiNodeConfig import akka.remote.testkit.MultiNodeSpec import akka.testkit.ImplicitSender -import sample.cluster.stats.japi.StatsMessages._ +import sample.cluster.stats.StatsMessages._ -object StatsSampleSingleMasterJapiSpecConfig extends MultiNodeConfig { +object StatsSampleSingleMasterSpecConfig extends MultiNodeConfig { // register the named roles (nodes) of the test val first = role("first") val second = role("second") @@ -35,6 +35,7 @@ object StatsSampleSingleMasterJapiSpecConfig extends MultiNodeConfig { akka.cluster.roles = [compute] # don't use sigar for tests, native lib not in path akka.cluster.metrics.collector-class = akka.cluster.JmxMetricsCollector + #//#router-deploy-config akka.actor.deployment { /singleton/statsService/workerRouter { router = consistent-hashing-pool @@ -42,24 +43,25 @@ object StatsSampleSingleMasterJapiSpecConfig extends MultiNodeConfig { cluster { enabled = on max-nr-of-instances-per-node = 3 - allow-local-routees = off + allow-local-routees = on use-role = compute } } } + #//#router-deploy-config """)) } // need one concrete test class per node -class StatsSampleSingleMasterJapiSpecMultiJvmNode1 extends StatsSampleSingleMasterJapiSpec -class StatsSampleSingleMasterJapiSpecMultiJvmNode2 extends StatsSampleSingleMasterJapiSpec -class StatsSampleSingleMasterJapiSpecMultiJvmNode3 extends StatsSampleSingleMasterJapiSpec +class StatsSampleSingleMasterSpecMultiJvmNode1 extends StatsSampleSingleMasterSpec +class StatsSampleSingleMasterSpecMultiJvmNode2 extends StatsSampleSingleMasterSpec +class StatsSampleSingleMasterSpecMultiJvmNode3 extends StatsSampleSingleMasterSpec -abstract class StatsSampleSingleMasterJapiSpec extends MultiNodeSpec(StatsSampleSingleMasterJapiSpecConfig) +abstract class StatsSampleSingleMasterSpec extends MultiNodeSpec(StatsSampleSingleMasterSpecConfig) with WordSpecLike with MustMatchers with BeforeAndAfterAll with ImplicitSender { - import StatsSampleSingleMasterJapiSpecConfig._ + import StatsSampleSingleMasterSpecConfig._ override def initialParticipants = roles.size @@ -78,7 +80,7 @@ abstract class StatsSampleSingleMasterJapiSpec extends MultiNodeSpec(StatsSample Cluster(system) join firstAddress - receiveN(3).collect { case MemberUp(m) ⇒ m.address }.toSet must be( + receiveN(3).collect { case MemberUp(m) => m.address }.toSet must be( Set(firstAddress, secondAddress, thirdAddress)) Cluster(system).unsubscribe(testActor) diff --git a/akka-samples/akka-sample-cluster/src/multi-jvm/scala/sample/cluster/stats/japi/StatsSampleJapiSpec.scala b/akka-samples/akka-sample-cluster-java/src/multi-jvm/scala/sample/cluster/stats/StatsSampleSpec.scala similarity index 85% rename from akka-samples/akka-sample-cluster/src/multi-jvm/scala/sample/cluster/stats/japi/StatsSampleJapiSpec.scala rename to akka-samples/akka-sample-cluster-java/src/multi-jvm/scala/sample/cluster/stats/StatsSampleSpec.scala index 97905b9344..a6953b77a5 100644 --- a/akka-samples/akka-sample-cluster/src/multi-jvm/scala/sample/cluster/stats/japi/StatsSampleJapiSpec.scala +++ b/akka-samples/akka-sample-cluster-java/src/multi-jvm/scala/sample/cluster/stats/StatsSampleSpec.scala @@ -1,4 +1,4 @@ -package sample.cluster.stats.japi +package sample.cluster.stats import language.postfixOps import scala.concurrent.duration._ @@ -10,7 +10,7 @@ import akka.cluster.Member import akka.cluster.MemberStatus import akka.cluster.ClusterEvent.CurrentClusterState import akka.cluster.ClusterEvent.MemberUp -import sample.cluster.stats.japi.StatsMessages._ +import sample.cluster.stats.StatsMessages._ import akka.remote.testkit.MultiNodeConfig import com.typesafe.config.ConfigFactory import org.scalatest.BeforeAndAfterAll @@ -19,7 +19,7 @@ import org.scalatest.matchers.MustMatchers import akka.remote.testkit.MultiNodeSpec import akka.testkit.ImplicitSender -object StatsSampleJapiSpecConfig extends MultiNodeConfig { +object StatsSampleSpecConfig extends MultiNodeConfig { // register the named roles (nodes) of the test val first = role("first") val second = role("second") @@ -33,6 +33,7 @@ object StatsSampleJapiSpecConfig extends MultiNodeConfig { akka.cluster.roles = [compute] # don't use sigar for tests, native lib not in path akka.cluster.metrics.collector-class = akka.cluster.JmxMetricsCollector + #//#router-lookup-config akka.actor.deployment { /statsService/workerRouter { router = consistent-hashing-group @@ -45,20 +46,21 @@ object StatsSampleJapiSpecConfig extends MultiNodeConfig { } } } + #//#router-lookup-config """)) } // need one concrete test class per node -class StatsSampleJapiSpecMultiJvmNode1 extends StatsSampleJapiSpec -class StatsSampleJapiSpecMultiJvmNode2 extends StatsSampleJapiSpec -class StatsSampleJapiSpecMultiJvmNode3 extends StatsSampleJapiSpec +class StatsSampleSpecMultiJvmNode1 extends StatsSampleSpec +class StatsSampleSpecMultiJvmNode2 extends StatsSampleSpec +class StatsSampleSpecMultiJvmNode3 extends StatsSampleSpec -abstract class StatsSampleJapiSpec extends MultiNodeSpec(StatsSampleJapiSpecConfig) +abstract class StatsSampleSpec extends MultiNodeSpec(StatsSampleSpecConfig) with WordSpecLike with MustMatchers with BeforeAndAfterAll with ImplicitSender { - import StatsSampleJapiSpecConfig._ + import StatsSampleSpecConfig._ override def initialParticipants = roles.size @@ -81,7 +83,7 @@ abstract class StatsSampleJapiSpec extends MultiNodeSpec(StatsSampleJapiSpecConf system.actorOf(Props[StatsWorker], "statsWorker") system.actorOf(Props[StatsService], "statsService") - receiveN(3).collect { case MemberUp(m) ⇒ m.address }.toSet must be( + receiveN(3).collect { case MemberUp(m) => m.address }.toSet must be( Set(firstAddress, secondAddress, thirdAddress)) Cluster(system).unsubscribe(testActor) diff --git a/akka-samples/akka-sample-cluster/src/multi-jvm/scala/sample/cluster/transformation/japi/TransformationSampleJapiSpec.scala b/akka-samples/akka-sample-cluster-java/src/multi-jvm/scala/sample/cluster/transformation/TransformationSampleSpec.scala similarity index 81% rename from akka-samples/akka-sample-cluster/src/multi-jvm/scala/sample/cluster/transformation/japi/TransformationSampleJapiSpec.scala rename to akka-samples/akka-sample-cluster-java/src/multi-jvm/scala/sample/cluster/transformation/TransformationSampleSpec.scala index fafde81f18..40cbed5c66 100644 --- a/akka-samples/akka-sample-cluster/src/multi-jvm/scala/sample/cluster/transformation/japi/TransformationSampleJapiSpec.scala +++ b/akka-samples/akka-sample-cluster-java/src/multi-jvm/scala/sample/cluster/transformation/TransformationSampleSpec.scala @@ -1,4 +1,4 @@ -package sample.cluster.transformation.japi +package sample.cluster.transformation import language.postfixOps import scala.concurrent.duration._ @@ -14,9 +14,9 @@ import akka.cluster.Cluster import akka.remote.testkit.MultiNodeConfig import akka.remote.testkit.MultiNodeSpec import akka.testkit.ImplicitSender -import sample.cluster.transformation.japi.TransformationMessages._ +import sample.cluster.transformation.TransformationMessages._ -object TransformationSampleJapiSpecConfig extends MultiNodeConfig { +object TransformationSampleSpecConfig extends MultiNodeConfig { // register the named roles (nodes) of the test val frontend1 = role("frontend1") val frontend2 = role("frontend2") @@ -42,16 +42,16 @@ object TransformationSampleJapiSpecConfig extends MultiNodeConfig { } // need one concrete test class per node -class TransformationSampleJapiSpecMultiJvmNode1 extends TransformationSampleJapiSpec -class TransformationSampleJapiSpecMultiJvmNode2 extends TransformationSampleJapiSpec -class TransformationSampleJapiSpecMultiJvmNode3 extends TransformationSampleJapiSpec -class TransformationSampleJapiSpecMultiJvmNode4 extends TransformationSampleJapiSpec -class TransformationSampleJapiSpecMultiJvmNode5 extends TransformationSampleJapiSpec +class TransformationSampleSpecMultiJvmNode1 extends TransformationSampleSpec +class TransformationSampleSpecMultiJvmNode2 extends TransformationSampleSpec +class TransformationSampleSpecMultiJvmNode3 extends TransformationSampleSpec +class TransformationSampleSpecMultiJvmNode4 extends TransformationSampleSpec +class TransformationSampleSpecMultiJvmNode5 extends TransformationSampleSpec -abstract class TransformationSampleJapiSpec extends MultiNodeSpec(TransformationSampleJapiSpecConfig) +abstract class TransformationSampleSpec extends MultiNodeSpec(TransformationSampleSpecConfig) with WordSpecLike with MustMatchers with BeforeAndAfterAll with ImplicitSender { - import TransformationSampleJapiSpecConfig._ + import TransformationSampleSpecConfig._ override def initialParticipants = roles.size @@ -68,7 +68,7 @@ abstract class TransformationSampleJapiSpec extends MultiNodeSpec(Transformation transformationFrontend ! new TransformationJob("hello") expectMsgPF() { // no backends yet, service unavailble - case f: JobFailed ⇒ + case f: JobFailed => } } diff --git a/akka-samples/akka-sample-cluster-java/tutorial/index.html b/akka-samples/akka-sample-cluster-java/tutorial/index.html new file mode 100644 index 0000000000..2f6650b23e --- /dev/null +++ b/akka-samples/akka-sample-cluster-java/tutorial/index.html @@ -0,0 +1,482 @@ + + +Akka Cluster Samples with Java + + + + +
+

+This tutorial contains 4 samples illustrating different +Akka cluster features. +

+
    +
  • Subscribe to cluster membership events
  • +
  • Sending messages to actors running on nodes in the cluster
  • +
  • Cluster aware routers
  • +
  • Cluster metrics
  • +
+
+ +
+

A Simple Cluster Example

+ +

+Open application.conf +

+ +

+To enable cluster capabilities in your Akka project you should, at a minimum, add the remote settings, +and use akka.cluster.ClusterActorRefProvider. The akka.cluster.seed-nodes should +normally also be added to your application.conf file. +

+ +

+The seed nodes are configured contact points which newly started nodes will try to connect with in order to join the cluster. +

+ +

+Note that if you are going to start the nodes on different machines you need to specify the +ip-addresses or host names of the machines in application.conf instead of 127.0.0.1. +

+ +

+Open SimpleClusterApp.java. +

+ +

+The small program together with its configuration starts an ActorSystem with the Cluster enabled. +It joins the cluster and starts an actor that logs some membership events. +Take a look at the +SimpleClusterListener.java +actor. +

+ +

+You can read more about the cluster concepts in the +documentation. +

+ +

+To run this sample, go to the Run +tab, and start the application main class sample.cluster.simple.SimpleClusterApp +if it is not already started. +

+ +

+SimpleClusterApp starts three actor systems (cluster members) in the same JVM process. It can be more +interesting to run them in separate processes. Stop the application in the +Run tab and then open three terminal windows. +

+ +

+In the first terminal window, start the first seed node with the following command (on one line): +

+ +

+<path to activator dir>/activator 
+  "runMain sample.cluster.simple.SimpleClusterApp 2551"		
+
+ +

+2551 corresponds to the port of the first seed-nodes element in the configuration. In the log +output you see that the cluster node has been started and changed status to 'Up'. +

+ +

+In the second terminal window, start the second seed node with the following command (on one line): +

+ +

+<path to activator dir>/activator 
+  "runMain sample.cluster.simple.SimpleClusterApp 2552"		
+
+ +

+2552 corresponds to the port of the second seed-nodes element in the configuration. In the +log output you see that the cluster node has been started and joins the other seed node and +becomes a member of the cluster. Its status changed to 'Up'. +

+ +

+Switch over to the first terminal window and see in the log output that the member joined. +

+ +

+Start another node in the third terminal window with the following command (on one line): +

+ +

+<path to activator dir>/activator 
+  "runMain sample.cluster.simple.SimpleClusterApp 0"		
+
+ +

+Now you don't need to specify the port number, 0 means that it will use a random available port. +It joins one of the configured seed nodes. Look at the log output in the different terminal +windows. +

+ +

+Start even more nodes in the same way, if you like. +

+ +

+Shut down one of the nodes by pressing 'ctrl-c' in one of the terminal windows. +The other nodes will detect the failure after a while, which you can see in the log +output in the other terminals. +

+ +

+Look at the +source code +of the actor again. It registers itself as subscriber of certain cluster events. It gets notified with an snapshot event, +CurrentClusterState that holds full state information of the cluster. After that it receives events for changes +that happen in the cluster. +

+ +
+ +
+

Worker Dial-in Example

+ +

+In the previous sample we saw how to subscribe to cluster membership events. +You can read more about it in the +documentation. +How can cluster membership events be used? +

+ +

+Let's take a look at an example that illustrates how workers, here named backend, +can detect and register to new master nodes, here named frontend. +

+ +

+The example application provides a service to transform text. When some text +is sent to one of the frontend services, it will be delegated to one of the +backend workers, which performs the transformation job, and sends the result back to +the original client. New backend nodes, as well as new frontend nodes, can be +added or removed to the cluster dynamically. +

+ +

+Open TransformationMessages.java. +It defines the messages that are sent between the actors. +

+ +

+The backend worker that performs the transformation job is defined in +TransformationBackend.java +

+ +

+Note that the TransformationBackend actor subscribes to cluster events to detect new, +potential, frontend nodes, and send them a registration message so that they know +that they can use the backend worker. +

+ +

+The frontend that receives user jobs and delegates to one of the registered backend workers is defined in +TransformationFrontend.java +

+ +

+Note that the TransformationFrontend actor watch the registered backend +to be able to remove it from its list of available backend workers. +Death watch uses the cluster failure detector for nodes in the cluster, i.e. it detects +network failures and JVM crashes, in addition to graceful termination of watched +actor. +

+ +

+To run this sample, go to the Run +tab, and start the application main class sample.cluster.transformation.TransformationApp +if it is not already started. +

+ +

+TransformationApp starts +5 actor systems (cluster members) in the same JVM process. It can be more +interesting to run them in separate processes. Stop the application in the +Run tab and run the following commands in separate terminal windows. +

+ +

+<path to activator dir>/activator 
+  "runMain sample.cluster.transformation.TransformationFrontendMain 2551"		
+
+ +

+<path to activator dir>/activator 
+  "runMain sample.cluster.transformation.TransformationBackendMain 2552"		
+
+ +

+<path to activator dir>/activator 
+  "runMain sample.cluster.transformation.TransformationBackendMain 0"		
+
+ +

+<path to activator dir>/activator 
+  "runMain sample.cluster.transformation.TransformationBackendMain 0"		
+
+ +

+<path to activator dir>/activator 
+  "runMain sample.cluster.transformation.TransformationFrontendMain 0"		
+
+ +
+ +
+

Cluster Aware Routers

+ +

+All routers +can be made aware of member nodes in the cluster, i.e. deploying new routees or looking up routees +on nodes in the cluster. +When a node becomes unreachable or leaves the cluster the routees of that node are +automatically unregistered from the router. When new nodes join the cluster additional +routees are added to the router, according to the configuration. Routees are also added +when a node becomes reachable again, after having been unreachable. +

+ +

+You can read more about cluster aware routers in the +documentation. +

+ +

+Let's take a look at a few samples that make use of cluster aware routers. +

+ +
+ +
+

Router Example with Group of Routees

+ +

+Let's take a look at how to use a cluster aware router with a group of routees, +i.e. a router which does not create its routees but instead forwards incoming messages to a given +set of actors created elsewhere. +

+ +

+The example application provides a service to calculate statistics for a text. +When some text is sent to the service it splits it into words, and delegates the task +to count number of characters in each word to a separate worker, a routee of a router. +The character count for each word is sent back to an aggregator that calculates +the average number of characters per word when all results have been collected. +

+ +

+Open StatsMessages.java. +It defines the messages that are sent between the actors. +

+ +

+The worker that counts number of characters in each word is defined in +StatsWorker.java. +

+ +

+The service that receives text from users and splits it up into words, delegates to workers and aggregates +is defined in StatsService.java +and StatsAggregator.java. +

+ +

+Note, nothing cluster specific so far, just plain actors. +

+ +

+All nodes start StatsService and StatsWorker actors. Remember, routees are the workers in this case. +

+ +

+Open stats1.conf +The router is configured with routees.paths. +This means that user requests can be sent to StatsService on any node and it will use +StatsWorker on all nodes. +

+ +

+To run this sample, go to the Run +tab, and start the application main class sample.cluster.stats.StatsSampleMain +if it is not already started. +

+ +

+StatsSampleMain starts +4 actor systems (cluster members) in the same JVM process. It can be more +interesting to run them in separate processes. Stop the application in the +Run tab and run the following commands in separate terminal windows. +

+ +

+<path to activator dir>/activator 
+  "runMain sample.cluster.stats.StatsSampleMain 2551"		
+
+ +

+<path to activator dir>/activator 
+  "runMain sample.cluster.stats.StatsSampleMain 2552"		
+
+ +

+<path to activator dir>/activator 
+  "runMain sample.cluster.stats.StatsSampleClientMain"		
+
+ +

+<path to activator dir>/activator 
+  "runMain sample.cluster.stats.StatsSampleMain 0"		
+
+ +
+ +
+

Router Example with Pool of Remote Deployed Routees

+ +

+Let's take a look at how to use a cluster aware router on single master node that creates +and deploys workers instead of looking them up. +

+ +

+Open StatsSampleOneMasterMain.java. +To keep track of a single master we use the Cluster Singleton +in the contrib module. The ClusterSingletonManager is started on each node. +

+ +

+We also need an actor on each node that keeps track of where current single master exists and +delegates jobs to the StatsService. That is handled by the +StatsFacade.java +

+ +

+The StatsFacade receives text from users and delegates to the current StatsService, the single +master. It listens to cluster events to lookup the StatsService on the oldest node. +

+ +

+All nodes start StatsFacade and the ClusterSingletonManager. The router is now configured in +stats2.conf +

+ +

+To run this sample, go to the Run +tab, and start the application main class sample.cluster.stats.StatsSampleOneMasterMain +if it is not already started. +

+ +

+StatsSampleOneMasterMain starts +4 actor systems (cluster members) in the same JVM process. It can be more +interesting to run them in separate processes. Stop the application in the +Run tab and run the following commands in separate terminal windows. +

+ +

+<path to activator dir>/activator 
+  "runMain sample.cluster.stats.StatsSampleOneMasterMain 2551"		
+
+ +

+<path to activator dir>/activator 
+  "runMain sample.cluster.stats.StatsSampleOneMasterMain 2552"		
+
+ +

+<path to activator dir>/activator 
+  "runMain sample.cluster.stats.StatsSampleOneMasterClientMain"		
+
+ +

+<path to activator dir>/activator 
+  "runMain sample.cluster.stats.StatsSampleOneMasterMain 0"		
+
+ +
+ +
+

Adaptive Load Balancing

+ +

+The member nodes of the cluster collects system health metrics and publishes that to other nodes and to +registered subscribers. This information is primarily used for load-balancing routers, such as +the AdaptiveLoadBalancingPool and AdaptiveLoadBalancingGroup routers. +

+ +

+You can read more about cluster metrics in the +documentation. +

+ +

+Let's take a look at this router in action. What can be more demanding than calculating factorials? +

+ +

+The backend worker that performs the factorial calculation: +FactorialBackend +

+ +

+The frontend that receives user jobs and delegates to the backends via the router: +FactorialFrontend +

+ +

+As you can see, the router is defined in the same way as other routers, and in this case it is configured in: +factorial.conf +

+ +

+It is only router type adaptive and the metrics-selector that is specific to this router, +other things work in the same way as other routers. +

+ +

+To run this sample, go to the Run +tab, and start the application main class sample.cluster.factorial.FactorialApp +if it is not already started. +

+ +

+FactorialApp starts +4 actor systems (cluster members) in the same JVM process. It can be more +interesting to run them in separate processes. Stop the application in the +Run tab and run the following commands in separate terminal windows. +

+ +

+<path to activator dir>/activator 
+  "runMain sample.cluster.factorial.FactorialBackendMain 2551"		
+
+ +

+<path to activator dir>/activator 
+  "runMain sample.cluster.factorial.FactorialBackendMain 2552"		
+
+ +

+<path to activator dir>/activator 
+  "runMain sample.cluster.factorial.FactorialBackendMain 0"		
+
+ +

+<path to activator dir>/activator 
+  "runMain sample.cluster.factorial.FactorialFrontendMain 0"		
+
+ +

+Press ctrl-c in the terminal window of the frontend to stop the factorial calculations. +

+ +
+ + + diff --git a/akka-samples/akka-sample-cluster-scala/.gitignore b/akka-samples/akka-sample-cluster-scala/.gitignore new file mode 100644 index 0000000000..660c959e44 --- /dev/null +++ b/akka-samples/akka-sample-cluster-scala/.gitignore @@ -0,0 +1,17 @@ +*# +*.iml +*.ipr +*.iws +*.pyc +*.tm.epoch +*.vim +*-shim.sbt +.idea/ +/project/plugins/project +project/boot +target/ +/logs +.cache +.classpath +.project +.settings \ No newline at end of file diff --git a/akka-samples/akka-sample-cluster-scala/LICENSE b/akka-samples/akka-sample-cluster-scala/LICENSE new file mode 100644 index 0000000000..a02154466b --- /dev/null +++ b/akka-samples/akka-sample-cluster-scala/LICENSE @@ -0,0 +1,13 @@ +Copyright 2013 Typesafe, Inc. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + +http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. diff --git a/akka-samples/akka-sample-cluster-scala/activator.properties b/akka-samples/akka-sample-cluster-scala/activator.properties new file mode 100644 index 0000000000..5a68144e0d --- /dev/null +++ b/akka-samples/akka-sample-cluster-scala/activator.properties @@ -0,0 +1,4 @@ +name=akka-sample-cluster-scala +title=Akka Cluster Samples with Scala +description=Akka Cluster Samples with Scala +tags=akka,cluster,scala,sample diff --git a/akka-samples/akka-sample-cluster-scala/project/Build.scala b/akka-samples/akka-sample-cluster-scala/project/Build.scala new file mode 100644 index 0000000000..9a5a3d8043 --- /dev/null +++ b/akka-samples/akka-sample-cluster-scala/project/Build.scala @@ -0,0 +1,32 @@ +import sbt._ +import sbt.Keys._ +import com.typesafe.sbt.SbtMultiJvm +import com.typesafe.sbt.SbtMultiJvm.MultiJvmKeys.MultiJvm + +object AkkaSampleClusterBuild extends Build { + + val akkaVersion = "2.3-SNAPSHOT" + + lazy val akkaSampleCluster = Project( + id = "akka-sample-cluster-scala", + base = file("."), + settings = Project.defaultSettings ++ SbtMultiJvm.multiJvmSettings ++ Seq( + name := "akka-sample-cluster-scala", + version := "1.0", + scalaVersion := "2.10.3", + scalacOptions in Compile ++= Seq("-encoding", "UTF-8", "-target:jvm-1.6", "-deprecation", "-feature", "-unchecked", "-Xlog-reflective-calls", "-Xlint"), + javacOptions in Compile ++= Seq("-source", "1.6", "-target", "1.6", "-Xlint:unchecked", "-Xlint:deprecation"), + libraryDependencies ++= Seq( + "com.typesafe.akka" %% "akka-cluster" % akkaVersion, + "com.typesafe.akka" %% "akka-contrib" % akkaVersion, + "com.typesafe.akka" %% "akka-multi-node-testkit" % akkaVersion, + "org.scalatest" %% "scalatest" % "2.0" % "test", + "org.fusesource" % "sigar" % "1.6.4"), + javaOptions in run ++= Seq( + "-Djava.library.path=./sigar", + "-Xms128m", "-Xmx1024m"), + Keys.fork in run := true, + mainClass in (Compile, run) := Some("sample.cluster.simple.SimpleClusterApp") + ) + ) configs (MultiJvm) +} diff --git a/akka-samples/akka-sample-cluster-scala/project/build.properties b/akka-samples/akka-sample-cluster-scala/project/build.properties new file mode 100644 index 0000000000..0974fce44d --- /dev/null +++ b/akka-samples/akka-sample-cluster-scala/project/build.properties @@ -0,0 +1 @@ +sbt.version=0.13.0 diff --git a/akka-samples/akka-sample-cluster-scala/project/plugins.sbt b/akka-samples/akka-sample-cluster-scala/project/plugins.sbt new file mode 100644 index 0000000000..c3e7d797de --- /dev/null +++ b/akka-samples/akka-sample-cluster-scala/project/plugins.sbt @@ -0,0 +1,4 @@ + +resolvers += Classpaths.typesafeResolver + +addSbtPlugin("com.typesafe.sbt" % "sbt-multi-jvm" % "0.3.8") diff --git a/akka-samples/akka-sample-cluster-scala/sigar/libsigar-amd64-freebsd-6.so b/akka-samples/akka-sample-cluster-scala/sigar/libsigar-amd64-freebsd-6.so new file mode 100644 index 0000000000..3e94f0d2bf Binary files /dev/null and b/akka-samples/akka-sample-cluster-scala/sigar/libsigar-amd64-freebsd-6.so differ diff --git a/akka-samples/akka-sample-cluster-scala/sigar/libsigar-amd64-linux.so b/akka-samples/akka-sample-cluster-scala/sigar/libsigar-amd64-linux.so new file mode 100644 index 0000000000..5a2e4c24fe Binary files /dev/null and b/akka-samples/akka-sample-cluster-scala/sigar/libsigar-amd64-linux.so differ diff --git a/akka-samples/akka-sample-cluster-scala/sigar/libsigar-amd64-solaris.so b/akka-samples/akka-sample-cluster-scala/sigar/libsigar-amd64-solaris.so new file mode 100644 index 0000000000..6396482a43 Binary files /dev/null and b/akka-samples/akka-sample-cluster-scala/sigar/libsigar-amd64-solaris.so differ diff --git a/akka-samples/akka-sample-cluster-scala/sigar/libsigar-ia64-hpux-11.sl b/akka-samples/akka-sample-cluster-scala/sigar/libsigar-ia64-hpux-11.sl new file mode 100644 index 0000000000..d92ea4a96a Binary files /dev/null and b/akka-samples/akka-sample-cluster-scala/sigar/libsigar-ia64-hpux-11.sl differ diff --git a/akka-samples/akka-sample-cluster-scala/sigar/libsigar-ia64-linux.so b/akka-samples/akka-sample-cluster-scala/sigar/libsigar-ia64-linux.so new file mode 100644 index 0000000000..2bd2fc8e32 Binary files /dev/null and b/akka-samples/akka-sample-cluster-scala/sigar/libsigar-ia64-linux.so differ diff --git a/akka-samples/akka-sample-cluster-scala/sigar/libsigar-pa-hpux-11.sl b/akka-samples/akka-sample-cluster-scala/sigar/libsigar-pa-hpux-11.sl new file mode 100644 index 0000000000..0dfd8a1122 Binary files /dev/null and b/akka-samples/akka-sample-cluster-scala/sigar/libsigar-pa-hpux-11.sl differ diff --git a/akka-samples/akka-sample-cluster-scala/sigar/libsigar-ppc-aix-5.so b/akka-samples/akka-sample-cluster-scala/sigar/libsigar-ppc-aix-5.so new file mode 100644 index 0000000000..7d4b519921 Binary files /dev/null and b/akka-samples/akka-sample-cluster-scala/sigar/libsigar-ppc-aix-5.so differ diff --git a/akka-samples/akka-sample-cluster-scala/sigar/libsigar-ppc-linux.so b/akka-samples/akka-sample-cluster-scala/sigar/libsigar-ppc-linux.so new file mode 100644 index 0000000000..4394b1b00f Binary files /dev/null and b/akka-samples/akka-sample-cluster-scala/sigar/libsigar-ppc-linux.so differ diff --git a/akka-samples/akka-sample-cluster-scala/sigar/libsigar-ppc64-aix-5.so b/akka-samples/akka-sample-cluster-scala/sigar/libsigar-ppc64-aix-5.so new file mode 100644 index 0000000000..35fd828808 Binary files /dev/null and b/akka-samples/akka-sample-cluster-scala/sigar/libsigar-ppc64-aix-5.so differ diff --git a/akka-samples/akka-sample-cluster-scala/sigar/libsigar-ppc64-linux.so b/akka-samples/akka-sample-cluster-scala/sigar/libsigar-ppc64-linux.so new file mode 100644 index 0000000000..a1ba2529c9 Binary files /dev/null and b/akka-samples/akka-sample-cluster-scala/sigar/libsigar-ppc64-linux.so differ diff --git a/akka-samples/akka-sample-cluster-scala/sigar/libsigar-s390x-linux.so b/akka-samples/akka-sample-cluster-scala/sigar/libsigar-s390x-linux.so new file mode 100644 index 0000000000..c275f4ac69 Binary files /dev/null and b/akka-samples/akka-sample-cluster-scala/sigar/libsigar-s390x-linux.so differ diff --git a/akka-samples/akka-sample-cluster-scala/sigar/libsigar-sparc-solaris.so b/akka-samples/akka-sample-cluster-scala/sigar/libsigar-sparc-solaris.so new file mode 100644 index 0000000000..aa847d2b54 Binary files /dev/null and b/akka-samples/akka-sample-cluster-scala/sigar/libsigar-sparc-solaris.so differ diff --git a/akka-samples/akka-sample-cluster-scala/sigar/libsigar-sparc64-solaris.so b/akka-samples/akka-sample-cluster-scala/sigar/libsigar-sparc64-solaris.so new file mode 100644 index 0000000000..6c4fe809c5 Binary files /dev/null and b/akka-samples/akka-sample-cluster-scala/sigar/libsigar-sparc64-solaris.so differ diff --git a/akka-samples/akka-sample-cluster-scala/sigar/libsigar-universal-macosx.dylib b/akka-samples/akka-sample-cluster-scala/sigar/libsigar-universal-macosx.dylib new file mode 100644 index 0000000000..27ab107111 Binary files /dev/null and b/akka-samples/akka-sample-cluster-scala/sigar/libsigar-universal-macosx.dylib differ diff --git a/akka-samples/akka-sample-cluster-scala/sigar/libsigar-universal64-macosx.dylib b/akka-samples/akka-sample-cluster-scala/sigar/libsigar-universal64-macosx.dylib new file mode 100644 index 0000000000..0c721fecf3 Binary files /dev/null and b/akka-samples/akka-sample-cluster-scala/sigar/libsigar-universal64-macosx.dylib differ diff --git a/akka-samples/akka-sample-cluster-scala/sigar/libsigar-x86-freebsd-5.so b/akka-samples/akka-sample-cluster-scala/sigar/libsigar-x86-freebsd-5.so new file mode 100644 index 0000000000..8c50c6117a Binary files /dev/null and b/akka-samples/akka-sample-cluster-scala/sigar/libsigar-x86-freebsd-5.so differ diff --git a/akka-samples/akka-sample-cluster-scala/sigar/libsigar-x86-freebsd-6.so b/akka-samples/akka-sample-cluster-scala/sigar/libsigar-x86-freebsd-6.so new file mode 100644 index 0000000000..f0800274a6 Binary files /dev/null and b/akka-samples/akka-sample-cluster-scala/sigar/libsigar-x86-freebsd-6.so differ diff --git a/akka-samples/akka-sample-cluster-scala/sigar/libsigar-x86-linux.so b/akka-samples/akka-sample-cluster-scala/sigar/libsigar-x86-linux.so new file mode 100644 index 0000000000..a0b64eddb0 Binary files /dev/null and b/akka-samples/akka-sample-cluster-scala/sigar/libsigar-x86-linux.so differ diff --git a/akka-samples/akka-sample-cluster-scala/sigar/libsigar-x86-solaris.so b/akka-samples/akka-sample-cluster-scala/sigar/libsigar-x86-solaris.so new file mode 100644 index 0000000000..c6452e5655 Binary files /dev/null and b/akka-samples/akka-sample-cluster-scala/sigar/libsigar-x86-solaris.so differ diff --git a/akka-samples/akka-sample-cluster-scala/sigar/sigar-amd64-winnt.dll b/akka-samples/akka-sample-cluster-scala/sigar/sigar-amd64-winnt.dll new file mode 100644 index 0000000000..1ec8a0353e Binary files /dev/null and b/akka-samples/akka-sample-cluster-scala/sigar/sigar-amd64-winnt.dll differ diff --git a/akka-samples/akka-sample-cluster-scala/sigar/sigar-x86-winnt.dll b/akka-samples/akka-sample-cluster-scala/sigar/sigar-x86-winnt.dll new file mode 100644 index 0000000000..6afdc0166c Binary files /dev/null and b/akka-samples/akka-sample-cluster-scala/sigar/sigar-x86-winnt.dll differ diff --git a/akka-samples/akka-sample-cluster-scala/sigar/sigar-x86-winnt.lib b/akka-samples/akka-sample-cluster-scala/sigar/sigar-x86-winnt.lib new file mode 100644 index 0000000000..04924a1fc1 Binary files /dev/null and b/akka-samples/akka-sample-cluster-scala/sigar/sigar-x86-winnt.lib differ diff --git a/akka-samples/akka-sample-cluster-scala/src/main/resources/application.conf b/akka-samples/akka-sample-cluster-scala/src/main/resources/application.conf new file mode 100644 index 0000000000..41281e5485 --- /dev/null +++ b/akka-samples/akka-sample-cluster-scala/src/main/resources/application.conf @@ -0,0 +1,20 @@ +akka { + actor { + provider = "akka.cluster.ClusterActorRefProvider" + } + remote { + log-remote-lifecycle-events = off + netty.tcp { + hostname = "127.0.0.1" + port = 0 + } + } + + cluster { + seed-nodes = [ + "akka.tcp://ClusterSystem@127.0.0.1:2551", + "akka.tcp://ClusterSystem@127.0.0.1:2552"] + + auto-down-unreachable-after = 10s + } +} diff --git a/akka-samples/akka-sample-cluster-scala/src/main/resources/factorial.conf b/akka-samples/akka-sample-cluster-scala/src/main/resources/factorial.conf new file mode 100644 index 0000000000..def18c1e51 --- /dev/null +++ b/akka-samples/akka-sample-cluster-scala/src/main/resources/factorial.conf @@ -0,0 +1,31 @@ +include "application" + +# //#min-nr-of-members +akka.cluster.min-nr-of-members = 3 +# //#min-nr-of-members + +# //#role-min-nr-of-members +akka.cluster.role { + frontend.min-nr-of-members = 1 + backend.min-nr-of-members = 2 +} +# //#role-min-nr-of-members + +# //#adaptive-router +akka.actor.deployment { + /factorialFrontend/factorialBackendRouter = { + router = adaptive-group + # metrics-selector = heap + # metrics-selector = load + # metrics-selector = cpu + metrics-selector = mix + nr-of-instances = 100 + routees.paths = ["/user/factorialBackend"] + cluster { + enabled = on + use-role = backend + allow-local-routees = off + } + } +} +# //#adaptive-router diff --git a/akka-samples/akka-sample-cluster-scala/src/main/resources/stats1.conf b/akka-samples/akka-sample-cluster-scala/src/main/resources/stats1.conf new file mode 100644 index 0000000000..9f376acb04 --- /dev/null +++ b/akka-samples/akka-sample-cluster-scala/src/main/resources/stats1.conf @@ -0,0 +1,16 @@ +include "application" + +# //#config-router-lookup +akka.actor.deployment { + /statsService/workerRouter { + router = consistent-hashing-group + nr-of-instances = 100 + routees.paths = ["/user/statsWorker"] + cluster { + enabled = on + allow-local-routees = on + use-role = compute + } + } +} +# //#config-router-lookup diff --git a/akka-samples/akka-sample-cluster-scala/src/main/resources/stats2.conf b/akka-samples/akka-sample-cluster-scala/src/main/resources/stats2.conf new file mode 100644 index 0000000000..1eee48fd52 --- /dev/null +++ b/akka-samples/akka-sample-cluster-scala/src/main/resources/stats2.conf @@ -0,0 +1,17 @@ +include "application" + +# //#config-router-deploy +akka.actor.deployment { + /singleton/statsService/workerRouter { + router = consistent-hashing-pool + nr-of-instances = 100 + cluster { + enabled = on + max-nr-of-instances-per-node = 3 + allow-local-routees = on + use-role = compute + } + } +} +# //#config-router-deploy + diff --git a/akka-samples/akka-sample-cluster-scala/src/main/scala/sample/cluster/factorial/Extra.scala b/akka-samples/akka-sample-cluster-scala/src/main/scala/sample/cluster/factorial/Extra.scala new file mode 100644 index 0000000000..24149b0b85 --- /dev/null +++ b/akka-samples/akka-sample-cluster-scala/src/main/scala/sample/cluster/factorial/Extra.scala @@ -0,0 +1,38 @@ +package sample.cluster.factorial + +import akka.actor.Props +import akka.actor.Actor + +// not used, only for documentation +abstract class FactorialFrontend2 extends Actor { + //#router-lookup-in-code + import akka.cluster.routing.ClusterRouterGroup + import akka.cluster.routing.ClusterRouterGroupSettings + import akka.cluster.routing.AdaptiveLoadBalancingGroup + import akka.cluster.routing.HeapMetricsSelector + + val backend = context.actorOf( + ClusterRouterGroup(AdaptiveLoadBalancingGroup(HeapMetricsSelector), + ClusterRouterGroupSettings( + totalInstances = 100, routeesPaths = List("/user/factorialBackend"), + allowLocalRoutees = true, useRole = Some("backend"))).props(), + name = "factorialBackendRouter2") + //#router-lookup-in-code +} + +// not used, only for documentation +abstract class FactorialFrontend3 extends Actor { + //#router-deploy-in-code + import akka.cluster.routing.ClusterRouterPool + import akka.cluster.routing.ClusterRouterPoolSettings + import akka.cluster.routing.AdaptiveLoadBalancingPool + import akka.cluster.routing.SystemLoadAverageMetricsSelector + + val backend = context.actorOf( + ClusterRouterPool(AdaptiveLoadBalancingPool( + SystemLoadAverageMetricsSelector), ClusterRouterPoolSettings( + totalInstances = 100, maxInstancesPerNode = 3, + allowLocalRoutees = false, useRole = Some("backend"))).props(Props[FactorialBackend]), + name = "factorialBackendRouter3") + //#router-deploy-in-code +} \ No newline at end of file diff --git a/akka-samples/akka-sample-cluster-scala/src/main/scala/sample/cluster/factorial/FactorialApp.scala b/akka-samples/akka-sample-cluster-scala/src/main/scala/sample/cluster/factorial/FactorialApp.scala new file mode 100644 index 0000000000..69fb39c4ad --- /dev/null +++ b/akka-samples/akka-sample-cluster-scala/src/main/scala/sample/cluster/factorial/FactorialApp.scala @@ -0,0 +1,11 @@ +package sample.cluster.factorial + +object FactorialApp { + def main(args: Array[String]): Unit = { + // starting 3 backend nodes and 1 frontend node + FactorialBackend.main(Seq("2551").toArray) + FactorialBackend.main(Seq("2552").toArray) + FactorialBackend.main(Array.empty) + FactorialFrontend.main(Array.empty) + } +} \ No newline at end of file diff --git a/akka-samples/akka-sample-cluster-scala/src/main/scala/sample/cluster/factorial/FactorialBackend.scala b/akka-samples/akka-sample-cluster-scala/src/main/scala/sample/cluster/factorial/FactorialBackend.scala new file mode 100644 index 0000000000..d1e41c0b9e --- /dev/null +++ b/akka-samples/akka-sample-cluster-scala/src/main/scala/sample/cluster/factorial/FactorialBackend.scala @@ -0,0 +1,46 @@ +package sample.cluster.factorial + +import scala.annotation.tailrec +import scala.concurrent.Future +import com.typesafe.config.ConfigFactory +import akka.actor.Actor +import akka.actor.ActorLogging +import akka.actor.ActorSystem +import akka.actor.Props +import akka.pattern.pipe + +//#backend +class FactorialBackend extends Actor with ActorLogging { + + import context.dispatcher + + def receive = { + case (n: Int) => + Future(factorial(n)) map { result => (n, result) } pipeTo sender + } + + def factorial(n: Int): BigInt = { + @tailrec def factorialAcc(acc: BigInt, n: Int): BigInt = { + if (n <= 1) acc + else factorialAcc(acc * n, n - 1) + } + factorialAcc(BigInt(1), n) + } + +} +//#backend + +object FactorialBackend { + def main(args: Array[String]): Unit = { + // Override the configuration of the port when specified as program argument + val port = if (args.isEmpty) "0" else args(0) + val config = ConfigFactory.parseString(s"akka.remote.netty.tcp.port=$port"). + withFallback(ConfigFactory.parseString("akka.cluster.roles = [backend]")). + withFallback(ConfigFactory.load("factorial")) + + val system = ActorSystem("ClusterSystem", config) + system.actorOf(Props[FactorialBackend], name = "factorialBackend") + + system.actorOf(Props[MetricsListener], name = "metricsListener") + } +} \ No newline at end of file diff --git a/akka-samples/akka-sample-cluster-scala/src/main/scala/sample/cluster/factorial/FactorialFrontend.scala b/akka-samples/akka-sample-cluster-scala/src/main/scala/sample/cluster/factorial/FactorialFrontend.scala new file mode 100644 index 0000000000..b9606d1456 --- /dev/null +++ b/akka-samples/akka-sample-cluster-scala/src/main/scala/sample/cluster/factorial/FactorialFrontend.scala @@ -0,0 +1,61 @@ +package sample.cluster.factorial + +import scala.concurrent.duration._ +import com.typesafe.config.ConfigFactory +import akka.actor.Actor +import akka.actor.ActorLogging +import akka.actor.ActorSystem +import akka.actor.Props +import akka.cluster.Cluster +import akka.routing.FromConfig +import akka.actor.ReceiveTimeout + +//#frontend +class FactorialFrontend(upToN: Int, repeat: Boolean) extends Actor with ActorLogging { + + val backend = context.actorOf(FromConfig.props(), + name = "factorialBackendRouter") + + override def preStart(): Unit = { + sendJobs() + if (repeat) { + context.setReceiveTimeout(10.seconds) + } + } + + def receive = { + case (n: Int, factorial: BigInt) => + if (n == upToN) { + log.debug("{}! = {}", n, factorial) + if (repeat) sendJobs() + else context.stop(self) + } + case ReceiveTimeout => + log.info("Timeout") + sendJobs() + } + + def sendJobs(): Unit = { + log.info("Starting batch of factorials up to [{}]", upToN) + 1 to upToN foreach { backend ! _ } + } +} +//#frontend + +object FactorialFrontend { + def main(args: Array[String]): Unit = { + val upToN = 200 + + val config = ConfigFactory.parseString("akka.cluster.roles = [frontend]"). + withFallback(ConfigFactory.load("factorial")) + + val system = ActorSystem("ClusterSystem", config) + system.log.info("Factorials will start when 2 backend members in the cluster.") + //#registerOnUp + Cluster(system) registerOnMemberUp { + system.actorOf(Props(classOf[FactorialFrontend], upToN, true), + name = "factorialFrontend") + } + //#registerOnUp + } +} \ No newline at end of file diff --git a/akka-samples/akka-sample-cluster-scala/src/main/scala/sample/cluster/factorial/MetricsListener.scala b/akka-samples/akka-sample-cluster-scala/src/main/scala/sample/cluster/factorial/MetricsListener.scala new file mode 100644 index 0000000000..3d6a2a890e --- /dev/null +++ b/akka-samples/akka-sample-cluster-scala/src/main/scala/sample/cluster/factorial/MetricsListener.scala @@ -0,0 +1,47 @@ +package sample.cluster.factorial + +import akka.actor.ActorLogging +import akka.actor.Actor + +//#metrics-listener +import akka.cluster.Cluster +import akka.cluster.ClusterEvent.ClusterMetricsChanged +import akka.cluster.ClusterEvent.CurrentClusterState +import akka.cluster.NodeMetrics +import akka.cluster.StandardMetrics.HeapMemory +import akka.cluster.StandardMetrics.Cpu + +class MetricsListener extends Actor with ActorLogging { + val selfAddress = Cluster(context.system).selfAddress + + // subscribe to ClusterMetricsChanged + // re-subscribe when restart + override def preStart(): Unit = + Cluster(context.system).subscribe(self, classOf[ClusterMetricsChanged]) + override def postStop(): Unit = + Cluster(context.system).unsubscribe(self) + + def receive = { + case ClusterMetricsChanged(clusterMetrics) => + clusterMetrics.filter(_.address == selfAddress) foreach { nodeMetrics => + logHeap(nodeMetrics) + logCpu(nodeMetrics) + } + case state: CurrentClusterState => // ignore + } + + def logHeap(nodeMetrics: NodeMetrics): Unit = nodeMetrics match { + case HeapMemory(address, timestamp, used, committed, max) => + log.info("Used heap: {} MB", used.doubleValue / 1024 / 1024) + case _ => // no heap info + } + + def logCpu(nodeMetrics: NodeMetrics): Unit = nodeMetrics match { + case Cpu(address, timestamp, Some(systemLoadAverage), cpuCombined, processors) => + log.info("Load: {} ({} processors)", systemLoadAverage, processors) + case _ => // no cpu info + } +} + +//#metrics-listener + diff --git a/akka-samples/akka-sample-cluster-scala/src/main/scala/sample/cluster/simple/SimpleClusterApp.scala b/akka-samples/akka-sample-cluster-scala/src/main/scala/sample/cluster/simple/SimpleClusterApp.scala new file mode 100644 index 0000000000..1e87f49d25 --- /dev/null +++ b/akka-samples/akka-sample-cluster-scala/src/main/scala/sample/cluster/simple/SimpleClusterApp.scala @@ -0,0 +1,29 @@ +package sample.cluster.simple + +import com.typesafe.config.ConfigFactory +import akka.actor.ActorSystem +import akka.actor.Props + +object SimpleClusterApp { + def main(args: Array[String]): Unit = { + if (args.isEmpty) + startup(Seq("2551", "2552", "0")) + else + startup(args) + } + + def startup(ports: Seq[String]): Unit = { + ports foreach { port => + // Override the configuration of the port + val config = ConfigFactory.parseString("akka.remote.netty.tcp.port=" + port). + withFallback(ConfigFactory.load()) + + // Create an Akka system + val system = ActorSystem("ClusterSystem", config) + // Create an actor that handles cluster domain events + system.actorOf(Props[SimpleClusterListener], name = "clusterListener") + } + } + +} + diff --git a/akka-samples/akka-sample-cluster-scala/src/main/scala/sample/cluster/simple/SimpleClusterListener.scala b/akka-samples/akka-sample-cluster-scala/src/main/scala/sample/cluster/simple/SimpleClusterListener.scala new file mode 100644 index 0000000000..7f06f38ba2 --- /dev/null +++ b/akka-samples/akka-sample-cluster-scala/src/main/scala/sample/cluster/simple/SimpleClusterListener.scala @@ -0,0 +1,28 @@ +package sample.cluster.simple + +import akka.cluster.Cluster +import akka.cluster.ClusterEvent._ +import akka.actor.ActorLogging +import akka.actor.Actor + +class SimpleClusterListener extends Actor with ActorLogging { + + val cluster = Cluster(context.system) + + // subscribe to cluster changes, re-subscribe when restart + override def preStart(): Unit = cluster.subscribe(self, classOf[ClusterDomainEvent]) + override def postStop(): Unit = cluster.unsubscribe(self) + + def receive = { + case state: CurrentClusterState => + log.info("Current members: {}", state.members.mkString(", ")) + case MemberUp(member) => + log.info("Member is Up: {}", member.address) + case UnreachableMember(member) => + log.info("Member detected as unreachable: {}", member) + case MemberRemoved(member, previousStatus) => + log.info("Member is Removed: {} after {}", + member.address, previousStatus) + case _: ClusterDomainEvent => // ignore + } +} \ No newline at end of file diff --git a/akka-samples/akka-sample-cluster-scala/src/main/scala/sample/cluster/stats/Extra.scala b/akka-samples/akka-sample-cluster-scala/src/main/scala/sample/cluster/stats/Extra.scala new file mode 100644 index 0000000000..f5163c84db --- /dev/null +++ b/akka-samples/akka-sample-cluster-scala/src/main/scala/sample/cluster/stats/Extra.scala @@ -0,0 +1,34 @@ +package sample.cluster.stats + +import akka.actor.Actor +import akka.actor.Props + +// not used, only for documentation +abstract class StatsService2 extends Actor { + //#router-lookup-in-code + import akka.cluster.routing.ClusterRouterGroup + import akka.cluster.routing.ClusterRouterGroupSettings + import akka.routing.ConsistentHashingGroup + + val workerRouter = context.actorOf( + ClusterRouterGroup(ConsistentHashingGroup(Nil), ClusterRouterGroupSettings( + totalInstances = 100, routeesPaths = List("/user/statsWorker"), + allowLocalRoutees = true, useRole = Some("compute"))).props(), + name = "workerRouter2") + //#router-lookup-in-code +} + +// not used, only for documentation +abstract class StatsService3 extends Actor { + //#router-deploy-in-code + import akka.cluster.routing.ClusterRouterPool + import akka.cluster.routing.ClusterRouterPoolSettings + import akka.routing.ConsistentHashingPool + + val workerRouter = context.actorOf( + ClusterRouterPool(ConsistentHashingPool(0), ClusterRouterPoolSettings( + totalInstances = 100, maxInstancesPerNode = 3, + allowLocalRoutees = false, useRole = None)).props(Props[StatsWorker]), + name = "workerRouter3") + //#router-deploy-in-code +} diff --git a/akka-samples/akka-sample-cluster-scala/src/main/scala/sample/cluster/stats/StatsFacade.scala b/akka-samples/akka-sample-cluster-scala/src/main/scala/sample/cluster/stats/StatsFacade.scala new file mode 100644 index 0000000000..a0cf0fe4d2 --- /dev/null +++ b/akka-samples/akka-sample-cluster-scala/src/main/scala/sample/cluster/stats/StatsFacade.scala @@ -0,0 +1,48 @@ +package sample.cluster.stats + +import scala.collection.immutable +import akka.actor.Actor +import akka.actor.ActorLogging +import akka.actor.ActorSelection +import akka.actor.RootActorPath +import akka.cluster.Cluster +import akka.cluster.ClusterEvent.CurrentClusterState +import akka.cluster.ClusterEvent.MemberEvent +import akka.cluster.ClusterEvent.MemberRemoved +import akka.cluster.ClusterEvent.MemberUp +import akka.cluster.Member + +//#facade +class StatsFacade extends Actor with ActorLogging { + import context.dispatcher + val cluster = Cluster(context.system) + + // sort by age, oldest first + val ageOrdering = Ordering.fromLessThan[Member] { (a, b) => a.isOlderThan(b) } + var membersByAge: immutable.SortedSet[Member] = immutable.SortedSet.empty(ageOrdering) + + // subscribe to cluster changes + // re-subscribe when restart + override def preStart(): Unit = cluster.subscribe(self, classOf[MemberEvent]) + override def postStop(): Unit = cluster.unsubscribe(self) + + def receive = { + case job: StatsJob if membersByAge.isEmpty => + sender ! JobFailed("Service unavailable, try again later") + case job: StatsJob => + currentMaster.tell(job, sender) + case state: CurrentClusterState => + membersByAge = immutable.SortedSet.empty(ageOrdering) ++ state.members.collect { + case m if m.hasRole("compute") => m + } + case MemberUp(m) => if (m.hasRole("compute")) membersByAge += m + case MemberRemoved(m, _) => if (m.hasRole("compute")) membersByAge -= m + case _: MemberEvent => // not interesting + } + + def currentMaster: ActorSelection = + context.actorSelection(RootActorPath(membersByAge.head.address) / + "user" / "singleton" / "statsService") + +} +//#facade \ No newline at end of file diff --git a/akka-samples/akka-sample-cluster-scala/src/main/scala/sample/cluster/stats/StatsMessages.scala b/akka-samples/akka-sample-cluster-scala/src/main/scala/sample/cluster/stats/StatsMessages.scala new file mode 100644 index 0000000000..916dfae294 --- /dev/null +++ b/akka-samples/akka-sample-cluster-scala/src/main/scala/sample/cluster/stats/StatsMessages.scala @@ -0,0 +1,7 @@ +package sample.cluster.stats + +//#messages +case class StatsJob(text: String) +case class StatsResult(meanWordLength: Double) +case class JobFailed(reason: String) +//#messages diff --git a/akka-samples/akka-sample-cluster-scala/src/main/scala/sample/cluster/stats/StatsSample.scala b/akka-samples/akka-sample-cluster-scala/src/main/scala/sample/cluster/stats/StatsSample.scala new file mode 100644 index 0000000000..ef286bd497 --- /dev/null +++ b/akka-samples/akka-sample-cluster-scala/src/main/scala/sample/cluster/stats/StatsSample.scala @@ -0,0 +1,91 @@ +package sample.cluster.stats + +import scala.concurrent.duration._ +import scala.concurrent.forkjoin.ThreadLocalRandom +import com.typesafe.config.ConfigFactory +import akka.actor.Actor +import akka.actor.ActorSystem +import akka.actor.Address +import akka.actor.PoisonPill +import akka.actor.Props +import akka.actor.RelativeActorPath +import akka.actor.RootActorPath +import akka.cluster.Cluster +import akka.cluster.ClusterEvent._ +import akka.cluster.MemberStatus + +object StatsSample { + def main(args: Array[String]): Unit = { + if (args.isEmpty) { + startup(Seq("2551", "2552", "0")) + StatsSampleClient.main(Array.empty) + } else { + startup(args) + } + } + + def startup(ports: Seq[String]): Unit = { + ports foreach { port => + // Override the configuration of the port when specified as program argument + val config = + ConfigFactory.parseString(s"akka.remote.netty.tcp.port=" + port).withFallback( + ConfigFactory.parseString("akka.cluster.roles = [compute]")). + withFallback(ConfigFactory.load("stats1")) + + val system = ActorSystem("ClusterSystem", config) + + system.actorOf(Props[StatsWorker], name = "statsWorker") + system.actorOf(Props[StatsService], name = "statsService") + } + } +} + +object StatsSampleClient { + def main(args: Array[String]): Unit = { + // note that client is not a compute node, role not defined + val system = ActorSystem("ClusterSystem") + system.actorOf(Props(classOf[StatsSampleClient], "/user/statsService"), "client") + } +} + +class StatsSampleClient(servicePath: String) extends Actor { + val cluster = Cluster(context.system) + val servicePathElements = servicePath match { + case RelativeActorPath(elements) => elements + case _ => throw new IllegalArgumentException( + "servicePath [%s] is not a valid relative actor path" format servicePath) + } + import context.dispatcher + val tickTask = context.system.scheduler.schedule(2.seconds, 2.seconds, self, "tick") + + var nodes = Set.empty[Address] + + override def preStart(): Unit = { + cluster.subscribe(self, classOf[MemberEvent]) + cluster.subscribe(self, classOf[UnreachableMember]) + } + override def postStop(): Unit = { + cluster.unsubscribe(self) + tickTask.cancel() + } + + def receive = { + case "tick" if nodes.nonEmpty => + // just pick any one + val address = nodes.toIndexedSeq(ThreadLocalRandom.current.nextInt(nodes.size)) + val service = context.actorSelection(RootActorPath(address) / servicePathElements) + service ! StatsJob("this is the text that will be analyzed") + case result: StatsResult => + println(result) + case failed: JobFailed => + println(failed) + case state: CurrentClusterState => + nodes = state.members.collect { + case m if m.hasRole("compute") && m.status == MemberStatus.Up => m.address + } + case MemberUp(m) if m.hasRole("compute") => nodes += m.address + case other: MemberEvent => nodes -= other.member.address + case UnreachableMember(m) => nodes -= m.address + } + +} diff --git a/akka-samples/akka-sample-cluster-scala/src/main/scala/sample/cluster/stats/StatsSampleOneMaster.scala b/akka-samples/akka-sample-cluster-scala/src/main/scala/sample/cluster/stats/StatsSampleOneMaster.scala new file mode 100644 index 0000000000..3fc7f5c8f1 --- /dev/null +++ b/akka-samples/akka-sample-cluster-scala/src/main/scala/sample/cluster/stats/StatsSampleOneMaster.scala @@ -0,0 +1,47 @@ +package sample.cluster.stats + +import com.typesafe.config.ConfigFactory +import akka.actor.ActorSystem +import akka.actor.PoisonPill +import akka.actor.Props +import akka.contrib.pattern.ClusterSingletonManager + +object StatsSampleOneMaster { + def main(args: Array[String]): Unit = { + if (args.isEmpty) { + startup(Seq("2551", "2552", "0")) + StatsSampleOneMasterClient.main(Array.empty) + } else { + startup(args) + } + } + + def startup(ports: Seq[String]): Unit = { + ports foreach { port => + // Override the configuration of the port when specified as program argument + val config = + ConfigFactory.parseString(s"akka.remote.netty.tcp.port=" + port).withFallback( + ConfigFactory.parseString("akka.cluster.roles = [compute]")). + withFallback(ConfigFactory.load("stats2")) + + val system = ActorSystem("ClusterSystem", config) + + //#create-singleton-manager + system.actorOf(ClusterSingletonManager.props( + singletonProps = Props[StatsService], singletonName = "statsService", + terminationMessage = PoisonPill, role = Some("compute")), + name = "singleton") + //#create-singleton-manager + system.actorOf(Props[StatsFacade], name = "statsFacade") + } + } +} + +object StatsSampleOneMasterClient { + def main(args: Array[String]): Unit = { + // note that client is not a compute node, role not defined + val system = ActorSystem("ClusterSystem") + system.actorOf(Props(classOf[StatsSampleClient], "/user/statsFacade"), "client") + } +} + diff --git a/akka-samples/akka-sample-cluster-scala/src/main/scala/sample/cluster/stats/StatsService.scala b/akka-samples/akka-sample-cluster-scala/src/main/scala/sample/cluster/stats/StatsService.scala new file mode 100644 index 0000000000..f7d21d13bb --- /dev/null +++ b/akka-samples/akka-sample-cluster-scala/src/main/scala/sample/cluster/stats/StatsService.scala @@ -0,0 +1,50 @@ +package sample.cluster.stats + +import scala.concurrent.duration._ +import akka.actor.Actor +import akka.actor.ActorRef +import akka.actor.Props +import akka.actor.ReceiveTimeout +import akka.routing.ConsistentHashingRouter.ConsistentHashableEnvelope +import akka.routing.FromConfig + +//#service +class StatsService extends Actor { + // This router is used both with lookup and deploy of routees. If you + // have a router with only lookup of routees you can use Props.empty + // instead of Props[StatsWorker.class]. + val workerRouter = context.actorOf(FromConfig.props(Props[StatsWorker]), + name = "workerRouter") + + def receive = { + case StatsJob(text) if text != "" => + val words = text.split(" ") + val replyTo = sender // important to not close over sender + // create actor that collects replies from workers + val aggregator = context.actorOf(Props( + classOf[StatsAggregator], words.size, replyTo)) + words foreach { word => + workerRouter.tell( + ConsistentHashableEnvelope(word, word), aggregator) + } + } +} + +class StatsAggregator(expectedResults: Int, replyTo: ActorRef) extends Actor { + var results = IndexedSeq.empty[Int] + context.setReceiveTimeout(3.seconds) + + def receive = { + case wordCount: Int => + results = results :+ wordCount + if (results.size == expectedResults) { + val meanWordLength = results.sum.toDouble / results.size + replyTo ! StatsResult(meanWordLength) + context.stop(self) + } + case ReceiveTimeout => + replyTo ! JobFailed("Service unavailable, try again later") + context.stop(self) + } +} +//#service diff --git a/akka-samples/akka-sample-cluster-scala/src/main/scala/sample/cluster/stats/StatsWorker.scala b/akka-samples/akka-sample-cluster-scala/src/main/scala/sample/cluster/stats/StatsWorker.scala new file mode 100644 index 0000000000..dd2ebd4bfc --- /dev/null +++ b/akka-samples/akka-sample-cluster-scala/src/main/scala/sample/cluster/stats/StatsWorker.scala @@ -0,0 +1,21 @@ +package sample.cluster.stats + +import akka.actor.Actor + +//#worker +class StatsWorker extends Actor { + var cache = Map.empty[String, Int] + def receive = { + case word: String => + val length = cache.get(word) match { + case Some(x) => x + case None => + val x = word.length + cache += (word -> x) + x + } + + sender ! length + } +} +//#worker \ No newline at end of file diff --git a/akka-samples/akka-sample-cluster-scala/src/main/scala/sample/cluster/transformation/TransformationApp.scala b/akka-samples/akka-sample-cluster-scala/src/main/scala/sample/cluster/transformation/TransformationApp.scala new file mode 100644 index 0000000000..e1ea6f1518 --- /dev/null +++ b/akka-samples/akka-sample-cluster-scala/src/main/scala/sample/cluster/transformation/TransformationApp.scala @@ -0,0 +1,14 @@ +package sample.cluster.transformation + +object TransformationApp { + + def main(args: Array[String]): Unit = { + // starting 2 frontend nodes and 3 backend nodes + TransformationFrontend.main(Seq("2551").toArray) + TransformationBackend.main(Seq("2552").toArray) + TransformationBackend.main(Array.empty) + TransformationBackend.main(Array.empty) + TransformationFrontend.main(Array.empty) + } + +} \ No newline at end of file diff --git a/akka-samples/akka-sample-cluster-scala/src/main/scala/sample/cluster/transformation/TransformationBackend.scala b/akka-samples/akka-sample-cluster-scala/src/main/scala/sample/cluster/transformation/TransformationBackend.scala new file mode 100644 index 0000000000..6148da8850 --- /dev/null +++ b/akka-samples/akka-sample-cluster-scala/src/main/scala/sample/cluster/transformation/TransformationBackend.scala @@ -0,0 +1,52 @@ +package sample.cluster.transformation + +import language.postfixOps +import scala.concurrent.duration._ +import akka.actor.Actor +import akka.actor.ActorRef +import akka.actor.ActorSystem +import akka.actor.Props +import akka.actor.RootActorPath +import akka.cluster.Cluster +import akka.cluster.ClusterEvent.CurrentClusterState +import akka.cluster.ClusterEvent.MemberUp +import akka.cluster.Member +import akka.cluster.MemberStatus +import com.typesafe.config.ConfigFactory + +//#backend +class TransformationBackend extends Actor { + + val cluster = Cluster(context.system) + + // subscribe to cluster changes, MemberUp + // re-subscribe when restart + override def preStart(): Unit = cluster.subscribe(self, classOf[MemberUp]) + override def postStop(): Unit = cluster.unsubscribe(self) + + def receive = { + case TransformationJob(text) => sender ! TransformationResult(text.toUpperCase) + case state: CurrentClusterState => + state.members.filter(_.status == MemberStatus.Up) foreach register + case MemberUp(m) => register(m) + } + + def register(member: Member): Unit = + if (member.hasRole("frontend")) + context.actorSelection(RootActorPath(member.address) / "user" / "frontend") ! + BackendRegistration +} +//#backend + +object TransformationBackend { + def main(args: Array[String]): Unit = { + // Override the configuration of the port when specified as program argument + val port = if (args.isEmpty) "0" else args(0) + val config = ConfigFactory.parseString(s"akka.remote.netty.tcp.port=$port"). + withFallback(ConfigFactory.parseString("akka.cluster.roles = [backend]")). + withFallback(ConfigFactory.load()) + + val system = ActorSystem("ClusterSystem", config) + system.actorOf(Props[TransformationBackend], name = "backend") + } +} \ No newline at end of file diff --git a/akka-samples/akka-sample-cluster-scala/src/main/scala/sample/cluster/transformation/TransformationFrontend.scala b/akka-samples/akka-sample-cluster-scala/src/main/scala/sample/cluster/transformation/TransformationFrontend.scala new file mode 100644 index 0000000000..18d5a25e86 --- /dev/null +++ b/akka-samples/akka-sample-cluster-scala/src/main/scala/sample/cluster/transformation/TransformationFrontend.scala @@ -0,0 +1,60 @@ +package sample.cluster.transformation + +import language.postfixOps +import scala.concurrent.duration._ +import akka.actor.Actor +import akka.actor.ActorRef +import akka.actor.ActorSystem +import akka.actor.Props +import akka.actor.Terminated +import akka.pattern.ask +import akka.util.Timeout +import com.typesafe.config.ConfigFactory +import java.util.concurrent.atomic.AtomicInteger + +//#frontend +class TransformationFrontend extends Actor { + + var backends = IndexedSeq.empty[ActorRef] + var jobCounter = 0 + + def receive = { + case job: TransformationJob if backends.isEmpty => + sender ! JobFailed("Service unavailable, try again later", job) + + case job: TransformationJob => + jobCounter += 1 + backends(jobCounter % backends.size) forward job + + case BackendRegistration if !backends.contains(sender) => + context watch sender + backends = backends :+ sender + + case Terminated(a) => + backends = backends.filterNot(_ == a) + } +} +//#frontend + +object TransformationFrontend { + def main(args: Array[String]): Unit = { + // Override the configuration of the port when specified as program argument + val port = if (args.isEmpty) "0" else args(0) + val config = ConfigFactory.parseString(s"akka.remote.netty.tcp.port=$port"). + withFallback(ConfigFactory.parseString("akka.cluster.roles = [frontend]")). + withFallback(ConfigFactory.load()) + + val system = ActorSystem("ClusterSystem", config) + val frontend = system.actorOf(Props[TransformationFrontend], name = "frontend") + + val counter = new AtomicInteger + import system.dispatcher + system.scheduler.schedule(2.seconds, 2.seconds) { + implicit val timeout = Timeout(5 seconds) + (frontend ? TransformationJob("hello-" + counter.incrementAndGet())) onSuccess { + case result => println(result) + } + } + + } +} \ No newline at end of file diff --git a/akka-samples/akka-sample-cluster-scala/src/main/scala/sample/cluster/transformation/TransformationMessages.scala b/akka-samples/akka-sample-cluster-scala/src/main/scala/sample/cluster/transformation/TransformationMessages.scala new file mode 100644 index 0000000000..0d4ac7c02a --- /dev/null +++ b/akka-samples/akka-sample-cluster-scala/src/main/scala/sample/cluster/transformation/TransformationMessages.scala @@ -0,0 +1,8 @@ +package sample.cluster.transformation + +//#messages +case class TransformationJob(text: String) +case class TransformationResult(text: String) +case class JobFailed(reason: String, job: TransformationJob) +case object BackendRegistration +//#messages diff --git a/akka-samples/akka-sample-cluster/src/multi-jvm/scala/sample/cluster/stats/StatsSampleSingleMasterSpec.scala b/akka-samples/akka-sample-cluster-scala/src/multi-jvm/scala/sample/cluster/stats/StatsSampleSingleMasterSpec.scala similarity index 97% rename from akka-samples/akka-sample-cluster/src/multi-jvm/scala/sample/cluster/stats/StatsSampleSingleMasterSpec.scala rename to akka-samples/akka-sample-cluster-scala/src/multi-jvm/scala/sample/cluster/stats/StatsSampleSingleMasterSpec.scala index 9388a39c40..e9db531ae7 100644 --- a/akka-samples/akka-sample-cluster/src/multi-jvm/scala/sample/cluster/stats/StatsSampleSingleMasterSpec.scala +++ b/akka-samples/akka-sample-cluster-scala/src/multi-jvm/scala/sample/cluster/stats/StatsSampleSingleMasterSpec.scala @@ -42,7 +42,7 @@ object StatsSampleSingleMasterSpecConfig extends MultiNodeConfig { cluster { enabled = on max-nr-of-instances-per-node = 3 - allow-local-routees = off + allow-local-routees = on use-role = compute } } @@ -79,7 +79,7 @@ abstract class StatsSampleSingleMasterSpec extends MultiNodeSpec(StatsSampleSing Cluster(system) join firstAddress - receiveN(3).collect { case MemberUp(m) ⇒ m.address }.toSet must be( + receiveN(3).collect { case MemberUp(m) => m.address }.toSet must be( Set(firstAddress, secondAddress, thirdAddress)) Cluster(system).unsubscribe(testActor) diff --git a/akka-samples/akka-sample-cluster/src/multi-jvm/scala/sample/cluster/stats/StatsSampleSpec.scala b/akka-samples/akka-sample-cluster-scala/src/multi-jvm/scala/sample/cluster/stats/StatsSampleSpec.scala similarity index 98% rename from akka-samples/akka-sample-cluster/src/multi-jvm/scala/sample/cluster/stats/StatsSampleSpec.scala rename to akka-samples/akka-sample-cluster-scala/src/multi-jvm/scala/sample/cluster/stats/StatsSampleSpec.scala index 39b639c289..fae44ea1fa 100644 --- a/akka-samples/akka-sample-cluster/src/multi-jvm/scala/sample/cluster/stats/StatsSampleSpec.scala +++ b/akka-samples/akka-sample-cluster-scala/src/multi-jvm/scala/sample/cluster/stats/StatsSampleSpec.scala @@ -96,7 +96,7 @@ abstract class StatsSampleSpec extends MultiNodeSpec(StatsSampleSpecConfig) system.actorOf(Props[StatsWorker], "statsWorker") system.actorOf(Props[StatsService], "statsService") - receiveN(3).collect { case MemberUp(m) ⇒ m.address }.toSet must be( + receiveN(3).collect { case MemberUp(m) => m.address }.toSet must be( Set(firstAddress, secondAddress, thirdAddress)) Cluster(system).unsubscribe(testActor) diff --git a/akka-samples/akka-sample-cluster/src/multi-jvm/scala/sample/cluster/transformation/TransformationSampleSpec.scala b/akka-samples/akka-sample-cluster-scala/src/multi-jvm/scala/sample/cluster/transformation/TransformationSampleSpec.scala similarity index 98% rename from akka-samples/akka-sample-cluster/src/multi-jvm/scala/sample/cluster/transformation/TransformationSampleSpec.scala rename to akka-samples/akka-sample-cluster-scala/src/multi-jvm/scala/sample/cluster/transformation/TransformationSampleSpec.scala index f68148b3b6..908ee67a1d 100644 --- a/akka-samples/akka-sample-cluster/src/multi-jvm/scala/sample/cluster/transformation/TransformationSampleSpec.scala +++ b/akka-samples/akka-sample-cluster-scala/src/multi-jvm/scala/sample/cluster/transformation/TransformationSampleSpec.scala @@ -66,7 +66,7 @@ abstract class TransformationSampleSpec extends MultiNodeSpec(TransformationSamp transformationFrontend ! TransformationJob("hello") expectMsgPF() { // no backends yet, service unavailble - case JobFailed(_, TransformationJob("hello")) ⇒ + case JobFailed(_, TransformationJob("hello")) => } } diff --git a/akka-samples/akka-sample-cluster-scala/tutorial/index.html b/akka-samples/akka-sample-cluster-scala/tutorial/index.html new file mode 100644 index 0000000000..88e60d1356 --- /dev/null +++ b/akka-samples/akka-sample-cluster-scala/tutorial/index.html @@ -0,0 +1,481 @@ + + +Akka Cluster Samples with Scala + + + + +
+

+This tutorial contains 4 samples illustrating different +Akka cluster features. +

+
    +
  • Subscribe to cluster membership events
  • +
  • Sending messages to actors running on nodes in the cluster
  • +
  • Cluster aware routers
  • +
  • Cluster metrics
  • +
+
+ +
+

A Simple Cluster Example

+ +

+Open application.conf +

+ +

+To enable cluster capabilities in your Akka project you should, at a minimum, add the remote settings, +and use akka.cluster.ClusterActorRefProvider. The akka.cluster.seed-nodes should +normally also be added to your application.conf file. +

+ +

+The seed nodes are configured contact points which newly started nodes will try to connect with in order to join the cluster. +

+ +

+Note that if you are going to start the nodes on different machines you need to specify the +ip-addresses or host names of the machines in application.conf instead of 127.0.0.1. +

+ +

+Open SimpleClusterApp.scala. +

+ +

+The small program together with its configuration starts an ActorSystem with the Cluster enabled. +It joins the cluster and starts an actor that logs some membership events. +Take a look at the +SimpleClusterListener.java +actor. +

+ +

+You can read more about the cluster concepts in the +documentation. +

+ +

+To run this sample, go to the Run +tab, and start the application main class sample.cluster.simple.SimpleClusterApp +if it is not already started. +

+ +

+SimpleClusterApp starts three actor systems (cluster members) in the same JVM process. It can be more +interesting to run them in separate processes. Stop the application in the +Run tab and then open three terminal windows. +

+ +

+In the first terminal window, start the first seed node with the following command (on one line): +

+ +

+<path to activator dir>/activator 
+  "runMain sample.cluster.simple.SimpleClusterApp 2551"		
+
+ +

+2551 corresponds to the port of the first seed-nodes element in the configuration. In the log +output you see that the cluster node has been started and changed status to 'Up'. +

+ +

+In the second terminal window, start the second seed node with the following command (on one line): +

+ +

+<path to activator dir>/activator 
+  "runMain sample.cluster.simple.SimpleClusterApp 2552"		
+
+ +

+2552 corresponds to the port of the second seed-nodes element in the configuration. In the +log output you see that the cluster node has been started and joins the other seed node and +becomes a member of the cluster. Its status changed to 'Up'. +

+ +

+Switch over to the first terminal window and see in the log output that the member joined. +

+ +

+Start another node in the third terminal window with the following command (on one line): +

+ +

+<path to activator dir>/activator 
+  "runMain sample.cluster.simple.SimpleClusterApp 0"		
+
+ +

+Now you don't need to specify the port number, 0 means that it will use a random available port. +It joins one of the configured seed nodes. Look at the log output in the different terminal +windows. +

+ +

+Start even more nodes in the same way, if you like. +

+ +

+Shut down one of the nodes by pressing 'ctrl-c' in one of the terminal windows. +The other nodes will detect the failure after a while, which you can see in the log +output in the other terminals. +

+ +

+Look at the +source code +of the actor again. It registers itself as subscriber of certain cluster events. It gets notified with an snapshot event, +CurrentClusterState that holds full state information of the cluster. After that it receives events for changes +that happen in the cluster. +

+ +
+ +
+

Worker Dial-in Example

+ +

+In the previous sample we saw how to subscribe to cluster membership events. +You can read more about it in the +documentation. +How can cluster membership events be used? +

+ +

+Let's take a look at an example that illustrates how workers, here named backend, +can detect and register to new master nodes, here named frontend. +

+ +

+The example application provides a service to transform text. When some text +is sent to one of the frontend services, it will be delegated to one of the +backend workers, which performs the transformation job, and sends the result back to +the original client. New backend nodes, as well as new frontend nodes, can be +added or removed to the cluster dynamically. +

+ +

+Open TransformationMessages.scala. +It defines the messages that are sent between the actors. +

+ +

+The backend worker that performs the transformation job is defined in +TransformationBackend.scala +

+ +

+Note that the TransformationBackend actor subscribes to cluster events to detect new, +potential, frontend nodes, and send them a registration message so that they know +that they can use the backend worker. +

+ +

+The frontend that receives user jobs and delegates to one of the registered backend workers is defined in +TransformationFrontend.scala +

+ +

+Note that the TransformationFrontend actor watch the registered backend +to be able to remove it from its list of available backend workers. +Death watch uses the cluster failure detector for nodes in the cluster, i.e. it detects +network failures and JVM crashes, in addition to graceful termination of watched +actor. +

+ +

+To run this sample, go to the Run +tab, and start the application main class sample.cluster.transformation.TransformationApp +if it is not already started. +

+ +

+TransformationApp starts +5 actor systems (cluster members) in the same JVM process. It can be more +interesting to run them in separate processes. Stop the application in the +Run tab and run the following commands in separate terminal windows. +

+ +

+<path to activator dir>/activator 
+  "runMain sample.cluster.transformation.TransformationFrontend 2551"		
+
+ +

+<path to activator dir>/activator 
+  "runMain sample.cluster.transformation.TransformationBackend 2552"		
+
+ +

+<path to activator dir>/activator 
+  "runMain sample.cluster.transformation.TransformationBackend 0"		
+
+ +

+<path to activator dir>/activator 
+  "runMain sample.cluster.transformation.TransformationBackend 0"		
+
+ +

+<path to activator dir>/activator 
+  "runMain sample.cluster.transformation.TransformationFrontend 0"		
+
+ +
+ +
+

Cluster Aware Routers

+ +

+All routers +can be made aware of member nodes in the cluster, i.e. deploying new routees or looking up routees +on nodes in the cluster. +When a node becomes unreachable or leaves the cluster the routees of that node are +automatically unregistered from the router. When new nodes join the cluster additional +routees are added to the router, according to the configuration. Routees are also added +when a node becomes reachable again, after having been unreachable. +

+ +

+You can read more about cluster aware routers in the +documentation. +

+ +

+Let's take a look at a few samples that make use of cluster aware routers. +

+ +
+ +
+

Router Example with Group of Routees

+ +

+Let's take a look at how to use a cluster aware router with a group of routees, +i.e. a router which does not create its routees but instead forwards incoming messages to a given +set of actors created elsewhere. +

+ +

+The example application provides a service to calculate statistics for a text. +When some text is sent to the service it splits it into words, and delegates the task +to count number of characters in each word to a separate worker, a routee of a router. +The character count for each word is sent back to an aggregator that calculates +the average number of characters per word when all results have been collected. +

+ +

+Open StatsMessages.scala. +It defines the messages that are sent between the actors. +

+ +

+The worker that counts number of characters in each word is defined in +StatsWorker.scala. +

+ +

+The service that receives text from users and splits it up into words, delegates to workers and aggregates +is defined in StatsService.scala. +

+ +

+Note, nothing cluster specific so far, just plain actors. +

+ +

+All nodes start StatsService and StatsWorker actors. Remember, routees are the workers in this case. +

+ +

+Open stats1.conf +The router is configured with routees.paths. +This means that user requests can be sent to StatsService on any node and it will use +StatsWorker on all nodes. +

+ +

+To run this sample, go to the Run +tab, and start the application main class sample.cluster.stats.StatsSample +if it is not already started. +

+ +

+StatsSample starts +4 actor systems (cluster members) in the same JVM process. It can be more +interesting to run them in separate processes. Stop the application in the +Run tab and run the following commands in separate terminal windows. +

+ +

+<path to activator dir>/activator 
+  "runMain sample.cluster.stats.StatsSample 2551"		
+
+ +

+<path to activator dir>/activator 
+  "runMain sample.cluster.stats.StatsSample 2552"		
+
+ +

+<path to activator dir>/activator 
+  "runMain sample.cluster.stats.StatsSampleClient"		
+
+ +

+<path to activator dir>/activator 
+  "runMain sample.cluster.stats.StatsSample 0"		
+
+ +
+ +
+

Router Example with Pool of Remote Deployed Routees

+ +

+Let's take a look at how to use a cluster aware router on single master node that creates +and deploys workers instead of looking them up. +

+ +

+Open StatsSampleOneMaster.scala. +To keep track of a single master we use the Cluster Singleton +in the contrib module. The ClusterSingletonManager is started on each node. +

+ +

+We also need an actor on each node that keeps track of where current single master exists and +delegates jobs to the StatsService. That is handled by the +StatsFacade.scala +

+ +

+The StatsFacade receives text from users and delegates to the current StatsService, the single +master. It listens to cluster events to lookup the StatsService on the oldest node. +

+ +

+All nodes start StatsFacade and the ClusterSingletonManager. The router is now configured in +stats2.conf +

+ +

+To run this sample, go to the Run +tab, and start the application main class sample.cluster.stats.StatsSampleOneMaster +if it is not already started. +

+ +

+StatsSampleOneMaster starts +4 actor systems (cluster members) in the same JVM process. It can be more +interesting to run them in separate processes. Stop the application in the +Run tab and run the following commands in separate terminal windows. +

+ +

+<path to activator dir>/activator 
+  "runMain sample.cluster.stats.StatsSampleOneMaster 2551"		
+
+ +

+<path to activator dir>/activator 
+  "runMain sample.cluster.stats.StatsSampleOneMaster 2552"		
+
+ +

+<path to activator dir>/activator 
+  "runMain sample.cluster.stats.StatsSampleOneMasterClient"		
+
+ +

+<path to activator dir>/activator 
+  "runMain sample.cluster.stats.StatsSampleOneMaster 0"		
+
+ +
+ +
+

Adaptive Load Balancing

+ +

+The member nodes of the cluster collects system health metrics and publishes that to other nodes and to +registered subscribers. This information is primarily used for load-balancing routers, such as +the AdaptiveLoadBalancingPool and AdaptiveLoadBalancingGroup routers. +

+ +

+You can read more about cluster metrics in the +documentation. +

+ +

+Let's take a look at this router in action. What can be more demanding than calculating factorials? +

+ +

+The backend worker that performs the factorial calculation: +FactorialBackend +

+ +

+The frontend that receives user jobs and delegates to the backends via the router: +FactorialFrontend +

+ +

+As you can see, the router is defined in the same way as other routers, and in this case it is configured in: +factorial.conf +

+ +

+It is only router type adaptive and the metrics-selector that is specific to this router, +other things work in the same way as other routers. +

+ +

+To run this sample, go to the Run +tab, and start the application main class sample.cluster.factorial.FactorialApp +if it is not already started. +

+ +

+FactorialApp starts +4 actor systems (cluster members) in the same JVM process. It can be more +interesting to run them in separate processes. Stop the application in the +Run tab and run the following commands in separate terminal windows. +

+ +

+<path to activator dir>/activator 
+  "runMain sample.cluster.factorial.FactorialBackend 2551"		
+
+ +

+<path to activator dir>/activator 
+  "runMain sample.cluster.factorial.FactorialBackend 2552"		
+
+ +

+<path to activator dir>/activator 
+  "runMain sample.cluster.factorial.FactorialBackend 0"		
+
+ +

+<path to activator dir>/activator 
+  "runMain sample.cluster.factorial.FactorialFrontend 0"		
+
+ +

+Press ctrl-c in the terminal window of the frontend to stop the factorial calculations. +

+ +
+ + + diff --git a/akka-samples/akka-sample-cluster/README.md b/akka-samples/akka-sample-cluster/README.md deleted file mode 100644 index 3a6daaca2a..0000000000 --- a/akka-samples/akka-sample-cluster/README.md +++ /dev/null @@ -1,20 +0,0 @@ -Cluster Sample -============== - -This sample is meant to be used by studying the code; it does not perform any -astounding functions when running it. If you want to run it, check out the akka -sources on your local hard drive, follow the [instructions for setting up Akka -with SBT](http://doc.akka.io/docs/akka/current/intro/getting-started.html). -When you start SBT within the checked-out akka source directory, you can run -this sample by typing - - akka-sample-cluster-experimental/run-main sample.cluster.simple.SimpleClusterApp 2551 - -and then from another terminal start more cluster nodes like this: - - akka-sample-cluster-experimental/run-main sample.cluster.simple.SimpleClusterApp - -Then you can start and stop cluster nodes and observe the messages printed by -the remaining ones, demonstrating cluster membership changes. - -You can read more in the [Akka docs](http://akka.io/docs). diff --git a/akka-samples/akka-sample-cluster/src/main/java/sample/cluster/factorial/japi/FactorialFrontend.java b/akka-samples/akka-sample-cluster/src/main/java/sample/cluster/factorial/japi/FactorialFrontend.java deleted file mode 100644 index 8b3a0333b9..0000000000 --- a/akka-samples/akka-sample-cluster/src/main/java/sample/cluster/factorial/japi/FactorialFrontend.java +++ /dev/null @@ -1,98 +0,0 @@ -package sample.cluster.factorial.japi; - -import java.util.Collections; - -import akka.actor.UntypedActor; -import akka.actor.ActorRef; -import akka.actor.Props; -import akka.event.Logging; -import akka.event.LoggingAdapter; -import akka.routing.FromConfig; -import akka.cluster.routing.AdaptiveLoadBalancingPool; -import akka.cluster.routing.AdaptiveLoadBalancingGroup; -import akka.cluster.routing.ClusterRouterPool; -import akka.cluster.routing.ClusterRouterGroup; -import akka.cluster.routing.ClusterRouterGroupSettings; -import akka.cluster.routing.ClusterRouterPoolSettings; -import akka.cluster.routing.HeapMetricsSelector; -import akka.cluster.routing.SystemLoadAverageMetricsSelector; - -//#frontend -public class FactorialFrontend extends UntypedActor { - final int upToN; - final boolean repeat; - - LoggingAdapter log = Logging.getLogger(getContext().system(), this); - - ActorRef backend = getContext().actorOf( - FromConfig.getInstance().props(), - "factorialBackendRouter"); - - public FactorialFrontend(int upToN, boolean repeat) { - this.upToN = upToN; - this.repeat = repeat; - } - - @Override - public void preStart() { - sendJobs(); - } - - @Override - public void onReceive(Object message) { - if (message instanceof FactorialResult) { - FactorialResult result = (FactorialResult) message; - if (result.n == upToN) { - log.debug("{}! = {}", result.n, result.factorial); - if (repeat) sendJobs(); - } - - } else { - unhandled(message); - } - } - - void sendJobs() { - log.info("Starting batch of factorials up to [{}]", upToN); - for (int n = 1; n <= upToN; n++) { - backend.tell(n, getSelf()); - } - } - -} -//#frontend - - -//not used, only for documentation -abstract class FactorialFrontend2 extends UntypedActor { - //#router-lookup-in-code - int totalInstances = 100; - String routeesPath = "/user/factorialBackend"; - boolean allowLocalRoutees = true; - String useRole = "backend"; - ActorRef backend = getContext().actorOf( - new ClusterRouterGroup( - new AdaptiveLoadBalancingGroup(HeapMetricsSelector.getInstance(), Collections.emptyList()), - new ClusterRouterGroupSettings( - totalInstances, routeesPath, allowLocalRoutees, useRole)).props(), - "factorialBackendRouter2"); - //#router-lookup-in-code -} - -//not used, only for documentation -abstract class FactorialFrontend3 extends UntypedActor { - //#router-deploy-in-code - int totalInstances = 100; - int maxInstancesPerNode = 3; - boolean allowLocalRoutees = false; - String useRole = "backend"; - ActorRef backend = getContext().actorOf( - new ClusterRouterPool( - new AdaptiveLoadBalancingPool( - SystemLoadAverageMetricsSelector.getInstance(), 0), - new ClusterRouterPoolSettings( - totalInstances, maxInstancesPerNode, allowLocalRoutees, useRole)). - props(Props.create(FactorialBackend.class)), - "factorialBackendRouter3"); - //#router-deploy-in-code -} \ No newline at end of file diff --git a/akka-samples/akka-sample-cluster/src/main/java/sample/cluster/simple/japi/SimpleClusterApp.java b/akka-samples/akka-sample-cluster/src/main/java/sample/cluster/simple/japi/SimpleClusterApp.java deleted file mode 100644 index 7aebbdd510..0000000000 --- a/akka-samples/akka-sample-cluster/src/main/java/sample/cluster/simple/japi/SimpleClusterApp.java +++ /dev/null @@ -1,28 +0,0 @@ -package sample.cluster.simple.japi; - -import akka.actor.ActorRef; -import akka.actor.ActorSystem; -import akka.actor.Props; -import akka.cluster.Cluster; -import akka.cluster.ClusterEvent.ClusterDomainEvent; - -public class SimpleClusterApp { - - public static void main(String[] args) { - // Override the configuration of the port - // when specified as program argument - if (args.length > 0) - System.setProperty("akka.remote.netty.tcp.port", args[0]); - - // Create an Akka system - ActorSystem system = ActorSystem.create("ClusterSystem"); - - // Create an actor that handles cluster domain events - ActorRef clusterListener = system.actorOf(Props.create( - SimpleClusterListener.class), "clusterListener"); - - // Add subscription of cluster events - Cluster.get(system).subscribe(clusterListener, - ClusterDomainEvent.class); - } -} diff --git a/akka-samples/akka-sample-cluster/src/main/java/sample/cluster/stats/japi/StatsSampleClientMain.java b/akka-samples/akka-sample-cluster/src/main/java/sample/cluster/stats/japi/StatsSampleClientMain.java deleted file mode 100644 index 55152d034f..0000000000 --- a/akka-samples/akka-sample-cluster/src/main/java/sample/cluster/stats/japi/StatsSampleClientMain.java +++ /dev/null @@ -1,13 +0,0 @@ -package sample.cluster.stats.japi; - -import akka.actor.ActorSystem; -import akka.actor.Props; - -public class StatsSampleClientMain { - - public static void main(String[] args) throws Exception { - // note that client is not a compute node, role not defined - ActorSystem system = ActorSystem.create("ClusterSystem"); - system.actorOf(Props.create(StatsSampleClient.class, "/user/statsService"), "client"); - } -} diff --git a/akka-samples/akka-sample-cluster/src/main/java/sample/cluster/stats/japi/StatsSampleMain.java b/akka-samples/akka-sample-cluster/src/main/java/sample/cluster/stats/japi/StatsSampleMain.java deleted file mode 100644 index 7a9d6cca16..0000000000 --- a/akka-samples/akka-sample-cluster/src/main/java/sample/cluster/stats/japi/StatsSampleMain.java +++ /dev/null @@ -1,25 +0,0 @@ -package sample.cluster.stats.japi; - -import com.typesafe.config.Config; -import com.typesafe.config.ConfigFactory; -import akka.actor.ActorSystem; -import akka.actor.Props; - -public class StatsSampleMain { - - public static void main(String[] args) throws Exception { - // Override the configuration of the port when specified as program argument - final Config config = - (args.length > 0 ? - ConfigFactory.parseString(String.format("akka.remote.netty.tcp.port=%s", args[0])) : - ConfigFactory.empty()). - withFallback(ConfigFactory.parseString("akka.cluster.roles = [compute]")). - withFallback(ConfigFactory.load()); - - ActorSystem system = ActorSystem.create("ClusterSystem", config); - - system.actorOf(Props.create(StatsWorker.class), "statsWorker"); - system.actorOf(Props.create(StatsService.class), "statsService"); - - } -} diff --git a/akka-samples/akka-sample-cluster/src/main/java/sample/cluster/stats/japi/StatsSampleOneMasterMain.java b/akka-samples/akka-sample-cluster/src/main/java/sample/cluster/stats/japi/StatsSampleOneMasterMain.java deleted file mode 100644 index 256032fe21..0000000000 --- a/akka-samples/akka-sample-cluster/src/main/java/sample/cluster/stats/japi/StatsSampleOneMasterMain.java +++ /dev/null @@ -1,33 +0,0 @@ -package sample.cluster.stats.japi; - -import com.typesafe.config.Config; -import com.typesafe.config.ConfigFactory; -import akka.actor.ActorSystem; -import akka.actor.PoisonPill; -import akka.actor.Props; -import akka.contrib.pattern.ClusterSingletonManager; - -public class StatsSampleOneMasterMain { - - public static void main(String[] args) throws Exception { - // Override the configuration of the port when specified as program argument - final Config config = - (args.length > 0 ? - ConfigFactory.parseString(String.format("akka.remote.netty.tcp.port=%s", args[0])) : - ConfigFactory.empty()). - withFallback(ConfigFactory.parseString("akka.cluster.roles = [compute]")). - withFallback(ConfigFactory.load()); - - ActorSystem system = ActorSystem.create("ClusterSystem", config); - - //#create-singleton-manager - system.actorOf(ClusterSingletonManager.defaultProps( - Props.create(StatsService.class), - "statsService", PoisonPill.getInstance(), "compute"), - "singleton"); - //#create-singleton-manager - - system.actorOf(Props.create(StatsFacade.class), "statsFacade"); - - } -} diff --git a/akka-samples/akka-sample-cluster/src/main/java/sample/cluster/stats/japi/StatsService.java b/akka-samples/akka-sample-cluster/src/main/java/sample/cluster/stats/japi/StatsService.java deleted file mode 100644 index 4e490925ca..0000000000 --- a/akka-samples/akka-sample-cluster/src/main/java/sample/cluster/stats/japi/StatsService.java +++ /dev/null @@ -1,89 +0,0 @@ -package sample.cluster.stats.japi; - -import java.util.Collections; - -import sample.cluster.stats.japi.StatsMessages.StatsJob; - -//#imports -import akka.actor.ActorRef; -import akka.actor.Props; -import akka.actor.UntypedActor; -import akka.cluster.routing.ClusterRouterGroup; -import akka.cluster.routing.ClusterRouterPool; -import akka.cluster.routing.ClusterRouterGroupSettings; -import akka.cluster.routing.ClusterRouterPoolSettings; -import akka.routing.ConsistentHashingGroup; -import akka.routing.ConsistentHashingPool; -import akka.routing.ConsistentHashingRouter.ConsistentHashableEnvelope; -import akka.routing.FromConfig; -//#imports - -//#service -public class StatsService extends UntypedActor { - - // This router is used both with lookup and deploy of routees. If you - // have a router with only lookup of routees you can use Props.empty() - // instead of Props.create(StatsWorker.class). - ActorRef workerRouter = getContext().actorOf( - FromConfig.getInstance().props(Props.create(StatsWorker.class)), - "workerRouter"); - - @Override - public void onReceive(Object message) { - if (message instanceof StatsJob) { - StatsJob job = (StatsJob) message; - if (job.getText().equals("")) { - unhandled(message); - } else { - final String[] words = job.getText().split(" "); - final ActorRef replyTo = getSender(); - - // create actor that collects replies from workers - ActorRef aggregator = getContext().actorOf( - Props.create(StatsAggregator.class, words.length, replyTo)); - - // send each word to a worker - for (String word : words) { - workerRouter.tell(new ConsistentHashableEnvelope(word, word), - aggregator); - } - } - - } else { - unhandled(message); - } - } -} - -//#service - -//not used, only for documentation -abstract class StatsService2 extends UntypedActor { - //#router-lookup-in-code - int totalInstances = 100; - Iterable routeesPaths = Collections.singletonList("/user/statsWorker"); - boolean allowLocalRoutees = true; - String useRole = "compute"; - ActorRef workerRouter = getContext().actorOf( - new ClusterRouterGroup( - new ConsistentHashingGroup(routeesPaths), new ClusterRouterGroupSettings( - totalInstances, routeesPaths, allowLocalRoutees, useRole)).props(), - "workerRouter2"); - //#router-lookup-in-code -} - -//not used, only for documentation -abstract class StatsService3 extends UntypedActor { - //#router-deploy-in-code - int totalInstances = 100; - int maxInstancesPerNode = 3; - boolean allowLocalRoutees = false; - String useRole = "compute"; - ActorRef workerRouter = getContext().actorOf( - new ClusterRouterPool( - new ConsistentHashingPool(0), new ClusterRouterPoolSettings( - totalInstances, maxInstancesPerNode, allowLocalRoutees, useRole)). - props(Props.create(StatsWorker.class)), - "workerRouter3"); - //#router-deploy-in-code -} diff --git a/akka-samples/akka-sample-cluster/src/main/java/sample/cluster/transformation/japi/TransformationBackendMain.java b/akka-samples/akka-sample-cluster/src/main/java/sample/cluster/transformation/japi/TransformationBackendMain.java deleted file mode 100644 index 804469e77b..0000000000 --- a/akka-samples/akka-sample-cluster/src/main/java/sample/cluster/transformation/japi/TransformationBackendMain.java +++ /dev/null @@ -1,20 +0,0 @@ -package sample.cluster.transformation.japi; - -import akka.actor.ActorSystem; -import akka.actor.Props; - -public class TransformationBackendMain { - - public static void main(String[] args) throws Exception { - // Override the configuration of the port - // when specified as program argument - if (args.length > 0) - System.setProperty("akka.remote.netty.tcp.port", args[0]); - - ActorSystem system = ActorSystem.create("ClusterSystem"); - - system.actorOf(Props.create(TransformationBackend.class), "backend"); - - } - -} diff --git a/akka-samples/akka-sample-cluster/src/main/java/sample/cluster/transformation/japi/TransformationFrontendMain.java b/akka-samples/akka-sample-cluster/src/main/java/sample/cluster/transformation/japi/TransformationFrontendMain.java deleted file mode 100644 index 8a11f78600..0000000000 --- a/akka-samples/akka-sample-cluster/src/main/java/sample/cluster/transformation/japi/TransformationFrontendMain.java +++ /dev/null @@ -1,45 +0,0 @@ -package sample.cluster.transformation.japi; - -import java.util.concurrent.TimeUnit; - -import sample.cluster.transformation.japi.TransformationMessages.TransformationJob; -import scala.concurrent.ExecutionContext; -import scala.concurrent.duration.Duration; -import akka.actor.ActorRef; -import akka.actor.ActorSystem; -import akka.actor.Props; -import akka.dispatch.OnSuccess; -import akka.util.Timeout; -import static akka.pattern.Patterns.ask; - -public class TransformationFrontendMain { - - public static void main(String[] args) throws Exception { - // Override the configuration of the port - // when specified as program argument - if (args.length > 0) - System.setProperty("akka.remote.netty.tcp.port", args[0]); - - ActorSystem system = ActorSystem.create("ClusterSystem"); - - ActorRef frontend = system.actorOf(Props.create( - TransformationFrontend.class), "frontend"); - Timeout timeout = new Timeout(Duration.create(5, TimeUnit.SECONDS)); - final ExecutionContext ec = system.dispatcher(); - for (int n = 1; n <= 120; n++) { - ask(frontend, new TransformationJob("hello-" + n), timeout) - .onSuccess(new OnSuccess() { - public void onSuccess(Object result) { - System.out.println(result); - } - }, ec); - - // wait a while until next request, - // to avoid flooding the console with output - Thread.sleep(2000); - } - system.shutdown(); - - } - -} diff --git a/akka-samples/akka-sample-cluster/src/main/resources/application.conf b/akka-samples/akka-sample-cluster/src/main/resources/application.conf deleted file mode 100644 index 2b9eb7ae19..0000000000 --- a/akka-samples/akka-sample-cluster/src/main/resources/application.conf +++ /dev/null @@ -1,71 +0,0 @@ -# //#cluster -akka { - actor { - provider = "akka.cluster.ClusterActorRefProvider" - } - remote { - log-remote-lifecycle-events = off - netty.tcp { - hostname = "127.0.0.1" - port = 0 - } - } - - cluster { - seed-nodes = [ - "akka.tcp://ClusterSystem@127.0.0.1:2551", - "akka.tcp://ClusterSystem@127.0.0.1:2552"] - - auto-down-unreachable-after = 10s - } -} -# //#cluster - -# //#config-router-lookup -akka.actor.deployment { - /statsService/workerRouter { - router = consistent-hashing-group - nr-of-instances = 100 - routees.paths = ["/user/statsWorker"] - cluster { - enabled = on - allow-local-routees = on - use-role = compute - } - } -} -# //#config-router-lookup - -# //#config-router-deploy -akka.actor.deployment { - /singleton/statsService/workerRouter { - router = consistent-hashing-pool - nr-of-instances = 100 - cluster { - enabled = on - max-nr-of-instances-per-node = 3 - allow-local-routees = off - use-role = compute - } - } -} -# //#config-router-deploy - -# //#adaptive-router -akka.actor.deployment { - /factorialFrontend/factorialBackendRouter = { - router = adaptive-group - # metrics-selector = heap - # metrics-selector = load - # metrics-selector = cpu - metrics-selector = mix - nr-of-instances = 100 - routees.paths = ["/user/factorialBackend"] - cluster { - enabled = on - use-role = backend - allow-local-routees = off - } - } -} -# //#adaptive-router \ No newline at end of file diff --git a/akka-samples/akka-sample-cluster/src/main/resources/factorial.conf b/akka-samples/akka-sample-cluster/src/main/resources/factorial.conf deleted file mode 100644 index e0c79671b6..0000000000 --- a/akka-samples/akka-sample-cluster/src/main/resources/factorial.conf +++ /dev/null @@ -1,12 +0,0 @@ -include "application" - -# //#min-nr-of-members -akka.cluster.min-nr-of-members = 3 -# //#min-nr-of-members - -# //#role-min-nr-of-members -akka.cluster.role { - frontend.min-nr-of-members = 1 - backend.min-nr-of-members = 2 -} -# //#role-min-nr-of-members \ No newline at end of file diff --git a/akka-samples/akka-sample-cluster/src/main/scala/sample/cluster/factorial/FactorialSample.scala b/akka-samples/akka-sample-cluster/src/main/scala/sample/cluster/factorial/FactorialSample.scala deleted file mode 100644 index f08d37ac6e..0000000000 --- a/akka-samples/akka-sample-cluster/src/main/scala/sample/cluster/factorial/FactorialSample.scala +++ /dev/null @@ -1,173 +0,0 @@ -package sample.cluster.factorial - -//#imports -import scala.annotation.tailrec -import scala.concurrent.Future -import com.typesafe.config.ConfigFactory -import akka.actor.Actor -import akka.actor.ActorLogging -import akka.actor.ActorRef -import akka.actor.ActorSystem -import akka.actor.Props -import akka.pattern.pipe -import akka.routing.FromConfig - -//#imports - -import akka.cluster.Cluster -import akka.cluster.ClusterEvent.CurrentClusterState -import akka.cluster.ClusterEvent.MemberUp - -object FactorialFrontend { - def main(args: Array[String]): Unit = { - val upToN = if (args.isEmpty) 200 else args(0).toInt - - val config = ConfigFactory.parseString("akka.cluster.roles = [frontend]"). - withFallback(ConfigFactory.load("factorial")) - - val system = ActorSystem("ClusterSystem", config) - system.log.info("Factorials will start when 2 backend members in the cluster.") - //#registerOnUp - Cluster(system) registerOnMemberUp { - system.actorOf(Props(classOf[FactorialFrontend], upToN, true), - name = "factorialFrontend") - } - //#registerOnUp - } -} - -//#frontend -class FactorialFrontend(upToN: Int, repeat: Boolean) extends Actor with ActorLogging { - - val backend = context.actorOf(FromConfig.props(), - name = "factorialBackendRouter") - - override def preStart(): Unit = sendJobs() - - def receive = { - case (n: Int, factorial: BigInt) ⇒ - if (n == upToN) { - log.debug("{}! = {}", n, factorial) - if (repeat) sendJobs() - } - } - - def sendJobs(): Unit = { - log.info("Starting batch of factorials up to [{}]", upToN) - 1 to upToN foreach { backend ! _ } - } -} -//#frontend - -object FactorialBackend { - def main(args: Array[String]): Unit = { - // Override the configuration of the port when specified as program argument - val config = - (if (args.nonEmpty) ConfigFactory.parseString(s"akka.remote.netty.tcp.port=${args(0)}") - else ConfigFactory.empty).withFallback( - ConfigFactory.parseString("akka.cluster.roles = [backend]")). - withFallback(ConfigFactory.load("factorial")) - - val system = ActorSystem("ClusterSystem", config) - system.actorOf(Props[FactorialBackend], name = "factorialBackend") - - system.actorOf(Props[MetricsListener], name = "metricsListener") - } -} - -//#backend -class FactorialBackend extends Actor with ActorLogging { - - import context.dispatcher - - def receive = { - case (n: Int) ⇒ - Future(factorial(n)) map { result ⇒ (n, result) } pipeTo sender - } - - def factorial(n: Int): BigInt = { - @tailrec def factorialAcc(acc: BigInt, n: Int): BigInt = { - if (n <= 1) acc - else factorialAcc(acc * n, n - 1) - } - factorialAcc(BigInt(1), n) - } - -} -//#backend - -//#metrics-listener -import akka.cluster.Cluster -import akka.cluster.ClusterEvent.ClusterMetricsChanged -import akka.cluster.ClusterEvent.CurrentClusterState -import akka.cluster.NodeMetrics -import akka.cluster.StandardMetrics.HeapMemory -import akka.cluster.StandardMetrics.Cpu - -class MetricsListener extends Actor with ActorLogging { - val selfAddress = Cluster(context.system).selfAddress - - // subscribe to ClusterMetricsChanged - // re-subscribe when restart - override def preStart(): Unit = - Cluster(context.system).subscribe(self, classOf[ClusterMetricsChanged]) - override def postStop(): Unit = - Cluster(context.system).unsubscribe(self) - - def receive = { - case ClusterMetricsChanged(clusterMetrics) ⇒ - clusterMetrics.filter(_.address == selfAddress) foreach { nodeMetrics ⇒ - logHeap(nodeMetrics) - logCpu(nodeMetrics) - } - case state: CurrentClusterState ⇒ // ignore - } - - def logHeap(nodeMetrics: NodeMetrics): Unit = nodeMetrics match { - case HeapMemory(address, timestamp, used, committed, max) ⇒ - log.info("Used heap: {} MB", used.doubleValue / 1024 / 1024) - case _ ⇒ // no heap info - } - - def logCpu(nodeMetrics: NodeMetrics): Unit = nodeMetrics match { - case Cpu(address, timestamp, Some(systemLoadAverage), cpuCombined, processors) ⇒ - log.info("Load: {} ({} processors)", systemLoadAverage, processors) - case _ ⇒ // no cpu info - } -} - -//#metrics-listener - -// not used, only for documentation -abstract class FactorialFrontend2 extends Actor { - //#router-lookup-in-code - import akka.cluster.routing.ClusterRouterGroup - import akka.cluster.routing.ClusterRouterGroupSettings - import akka.cluster.routing.AdaptiveLoadBalancingGroup - import akka.cluster.routing.HeapMetricsSelector - - val backend = context.actorOf( - ClusterRouterGroup(AdaptiveLoadBalancingGroup(HeapMetricsSelector), - ClusterRouterGroupSettings( - totalInstances = 100, routeesPaths = List("/user/factorialBackend"), - allowLocalRoutees = true, useRole = Some("backend"))).props(), - name = "factorialBackendRouter2") - //#router-lookup-in-code -} - -// not used, only for documentation -abstract class FactorialFrontend3 extends Actor { - //#router-deploy-in-code - import akka.cluster.routing.ClusterRouterPool - import akka.cluster.routing.ClusterRouterPoolSettings - import akka.cluster.routing.AdaptiveLoadBalancingPool - import akka.cluster.routing.SystemLoadAverageMetricsSelector - - val backend = context.actorOf( - ClusterRouterPool(AdaptiveLoadBalancingPool( - SystemLoadAverageMetricsSelector), ClusterRouterPoolSettings( - totalInstances = 100, maxInstancesPerNode = 3, - allowLocalRoutees = false, useRole = Some("backend"))).props(Props[FactorialBackend]), - name = "factorialBackendRouter3") - //#router-deploy-in-code -} \ No newline at end of file diff --git a/akka-samples/akka-sample-cluster/src/main/scala/sample/cluster/simple/SimpleClusterApp.scala b/akka-samples/akka-sample-cluster/src/main/scala/sample/cluster/simple/SimpleClusterApp.scala deleted file mode 100644 index 397ffaf3b6..0000000000 --- a/akka-samples/akka-sample-cluster/src/main/scala/sample/cluster/simple/SimpleClusterApp.scala +++ /dev/null @@ -1,36 +0,0 @@ -package sample.cluster.simple - -import akka.actor._ -import akka.cluster.Cluster -import akka.cluster.ClusterEvent._ - -object SimpleClusterApp { - def main(args: Array[String]): Unit = { - - // Override the configuration of the port - // when specified as program argument - if (args.nonEmpty) System.setProperty("akka.remote.netty.tcp.port", args(0)) - - // Create an Akka system - val system = ActorSystem("ClusterSystem") - val clusterListener = system.actorOf(Props[SimpleClusterListener], - name = "clusterListener") - - Cluster(system).subscribe(clusterListener, classOf[ClusterDomainEvent]) - } -} - -class SimpleClusterListener extends Actor with ActorLogging { - def receive = { - case state: CurrentClusterState ⇒ - log.info("Current members: {}", state.members.mkString(", ")) - case MemberUp(member) ⇒ - log.info("Member is Up: {}", member.address) - case UnreachableMember(member) ⇒ - log.info("Member detected as unreachable: {}", member) - case MemberRemoved(member, previousStatus) ⇒ - log.info("Member is Removed: {} after {}", - member.address, previousStatus) - case _: ClusterDomainEvent ⇒ // ignore - } -} \ No newline at end of file diff --git a/akka-samples/akka-sample-cluster/src/main/scala/sample/cluster/stats/StatsSample.scala b/akka-samples/akka-sample-cluster/src/main/scala/sample/cluster/stats/StatsSample.scala deleted file mode 100644 index 888eec38d2..0000000000 --- a/akka-samples/akka-sample-cluster/src/main/scala/sample/cluster/stats/StatsSample.scala +++ /dev/null @@ -1,252 +0,0 @@ -package sample.cluster.stats - -//#imports -import language.postfixOps -import scala.collection.immutable -import scala.concurrent.forkjoin.ThreadLocalRandom -import scala.concurrent.duration._ -import com.typesafe.config.ConfigFactory -import akka.actor.Actor -import akka.actor.ActorLogging -import akka.actor.ActorRef -import akka.actor.ActorSelection -import akka.actor.ActorSystem -import akka.actor.Address -import akka.actor.PoisonPill -import akka.actor.Props -import akka.actor.ReceiveTimeout -import akka.actor.RelativeActorPath -import akka.actor.RootActorPath -import akka.cluster.Cluster -import akka.cluster.ClusterEvent._ -import akka.cluster.MemberStatus -import akka.cluster.Member -import akka.contrib.pattern.ClusterSingletonManager -import akka.routing.FromConfig -import akka.routing.ConsistentHashingRouter.ConsistentHashableEnvelope -//#imports - -//#messages -case class StatsJob(text: String) -case class StatsResult(meanWordLength: Double) -case class JobFailed(reason: String) -//#messages - -//#service -class StatsService extends Actor { - // This router is used both with lookup and deploy of routees. If you - // have a router with only lookup of routees you can use Props.empty - // instead of Props[StatsWorker.class]. - val workerRouter = context.actorOf(FromConfig.props(Props[StatsWorker]), - name = "workerRouter") - - def receive = { - case StatsJob(text) if text != "" ⇒ - val words = text.split(" ") - val replyTo = sender // important to not close over sender - // create actor that collects replies from workers - val aggregator = context.actorOf(Props( - classOf[StatsAggregator], words.size, replyTo)) - words foreach { word ⇒ - workerRouter.tell( - ConsistentHashableEnvelope(word, word), aggregator) - } - } -} - -class StatsAggregator(expectedResults: Int, replyTo: ActorRef) extends Actor { - var results = IndexedSeq.empty[Int] - context.setReceiveTimeout(3 seconds) - - def receive = { - case wordCount: Int ⇒ - results = results :+ wordCount - if (results.size == expectedResults) { - val meanWordLength = results.sum.toDouble / results.size - replyTo ! StatsResult(meanWordLength) - context.stop(self) - } - case ReceiveTimeout ⇒ - replyTo ! JobFailed("Service unavailable, try again later") - context.stop(self) - } -} -//#service - -//#worker -class StatsWorker extends Actor { - var cache = Map.empty[String, Int] - def receive = { - case word: String ⇒ - val length = cache.get(word) match { - case Some(x) ⇒ x - case None ⇒ - val x = word.length - cache += (word -> x) - x - } - - sender ! length - } -} -//#worker - -//#facade -class StatsFacade extends Actor with ActorLogging { - import context.dispatcher - val cluster = Cluster(context.system) - - // sort by age, oldest first - val ageOrdering = Ordering.fromLessThan[Member] { (a, b) ⇒ a.isOlderThan(b) } - var membersByAge: immutable.SortedSet[Member] = immutable.SortedSet.empty(ageOrdering) - - // subscribe to cluster changes - // re-subscribe when restart - override def preStart(): Unit = cluster.subscribe(self, classOf[MemberEvent]) - override def postStop(): Unit = cluster.unsubscribe(self) - - def receive = { - case job: StatsJob if membersByAge.isEmpty ⇒ - sender ! JobFailed("Service unavailable, try again later") - case job: StatsJob ⇒ - currentMaster.tell(job, sender) - case state: CurrentClusterState ⇒ - membersByAge = immutable.SortedSet.empty(ageOrdering) ++ state.members.collect { - case m if m.hasRole("compute") ⇒ m - } - case MemberUp(m) ⇒ if (m.hasRole("compute")) membersByAge += m - case MemberRemoved(m, _) ⇒ if (m.hasRole("compute")) membersByAge -= m - case _: MemberEvent ⇒ // not interesting - } - - def currentMaster: ActorSelection = - context.actorSelection(RootActorPath(membersByAge.head.address) / - "user" / "singleton" / "statsService") - -} -//#facade - -object StatsSample { - def main(args: Array[String]): Unit = { - // Override the configuration of the port when specified as program argument - val config = - (if (args.nonEmpty) ConfigFactory.parseString(s"akka.remote.netty.tcp.port=${args(0)}") - else ConfigFactory.empty).withFallback( - ConfigFactory.parseString("akka.cluster.roles = [compute]")). - withFallback(ConfigFactory.load()) - - val system = ActorSystem("ClusterSystem", config) - - system.actorOf(Props[StatsWorker], name = "statsWorker") - system.actorOf(Props[StatsService], name = "statsService") - } -} - -object StatsSampleOneMaster { - def main(args: Array[String]): Unit = { - // Override the configuration of the port when specified as program argument - val config = - (if (args.nonEmpty) ConfigFactory.parseString(s"akka.remote.netty.tcp.port=${args(0)}") - else ConfigFactory.empty).withFallback( - ConfigFactory.parseString("akka.cluster.roles = [compute]")). - withFallback(ConfigFactory.load()) - - val system = ActorSystem("ClusterSystem", config) - - //#create-singleton-manager - system.actorOf(ClusterSingletonManager.props( - singletonProps = Props[StatsService], singletonName = "statsService", - terminationMessage = PoisonPill, role = Some("compute")), - name = "singleton") - //#create-singleton-manager - system.actorOf(Props[StatsFacade], name = "statsFacade") - } -} - -object StatsSampleClient { - def main(args: Array[String]): Unit = { - // note that client is not a compute node, role not defined - val system = ActorSystem("ClusterSystem") - system.actorOf(Props(classOf[StatsSampleClient], "/user/statsService"), "client") - } -} - -object StatsSampleOneMasterClient { - def main(args: Array[String]): Unit = { - // note that client is not a compute node, role not defined - val system = ActorSystem("ClusterSystem") - system.actorOf(Props(classOf[StatsSampleClient], "/user/statsFacade"), "client") - } -} - -class StatsSampleClient(servicePath: String) extends Actor { - val cluster = Cluster(context.system) - val servicePathElements = servicePath match { - case RelativeActorPath(elements) ⇒ elements - case _ ⇒ throw new IllegalArgumentException( - "servicePath [%s] is not a valid relative actor path" format servicePath) - } - import context.dispatcher - val tickTask = context.system.scheduler.schedule(2 seconds, 2 seconds, self, "tick") - - var nodes = Set.empty[Address] - - override def preStart(): Unit = { - cluster.subscribe(self, classOf[MemberEvent]) - cluster.subscribe(self, classOf[UnreachableMember]) - } - override def postStop(): Unit = { - cluster.unsubscribe(self) - tickTask.cancel() - } - - def receive = { - case "tick" if nodes.nonEmpty ⇒ - // just pick any one - val address = nodes.toIndexedSeq(ThreadLocalRandom.current.nextInt(nodes.size)) - val service = context.actorSelection(RootActorPath(address) / servicePathElements) - service ! StatsJob("this is the text that will be analyzed") - case result: StatsResult ⇒ - println(result) - case failed: JobFailed ⇒ - println(failed) - case state: CurrentClusterState ⇒ - nodes = state.members.collect { - case m if m.hasRole("compute") && m.status == MemberStatus.Up ⇒ m.address - } - case MemberUp(m) if m.hasRole("compute") ⇒ nodes += m.address - case other: MemberEvent ⇒ nodes -= other.member.address - case UnreachableMember(m) ⇒ nodes -= m.address - } - -} - -// not used, only for documentation -abstract class StatsService2 extends Actor { - //#router-lookup-in-code - import akka.cluster.routing.ClusterRouterGroup - import akka.cluster.routing.ClusterRouterGroupSettings - import akka.routing.ConsistentHashingGroup - - val workerRouter = context.actorOf( - ClusterRouterGroup(ConsistentHashingGroup(Nil), ClusterRouterGroupSettings( - totalInstances = 100, routeesPaths = List("/user/statsWorker"), - allowLocalRoutees = true, useRole = Some("compute"))).props(), - name = "workerRouter2") - //#router-lookup-in-code -} - -// not used, only for documentation -abstract class StatsService3 extends Actor { - //#router-deploy-in-code - import akka.cluster.routing.ClusterRouterPool - import akka.cluster.routing.ClusterRouterPoolSettings - import akka.routing.ConsistentHashingPool - - val workerRouter = context.actorOf( - ClusterRouterPool(ConsistentHashingPool(0), ClusterRouterPoolSettings( - totalInstances = 100, maxInstancesPerNode = 3, - allowLocalRoutees = false, useRole = None)).props(Props[StatsWorker]), - name = "workerRouter3") - //#router-deploy-in-code -} diff --git a/akka-samples/akka-sample-cluster/src/main/scala/sample/cluster/transformation/TransformationSample.scala b/akka-samples/akka-sample-cluster/src/main/scala/sample/cluster/transformation/TransformationSample.scala deleted file mode 100644 index c05edd01d1..0000000000 --- a/akka-samples/akka-sample-cluster/src/main/scala/sample/cluster/transformation/TransformationSample.scala +++ /dev/null @@ -1,115 +0,0 @@ -package sample.cluster.transformation - -//#imports -import language.postfixOps -import scala.concurrent.duration._ -import akka.actor.Actor -import akka.actor.ActorRef -import akka.actor.ActorSystem -import akka.actor.Props -import akka.actor.RootActorPath -import akka.actor.Terminated -import akka.cluster.Cluster -import akka.cluster.ClusterEvent.CurrentClusterState -import akka.cluster.ClusterEvent.MemberUp -import akka.cluster.Member -import akka.cluster.MemberStatus -import akka.pattern.ask -import akka.util.Timeout -import com.typesafe.config.ConfigFactory -//#imports - -//#messages -case class TransformationJob(text: String) -case class TransformationResult(text: String) -case class JobFailed(reason: String, job: TransformationJob) -case object BackendRegistration -//#messages - -object TransformationFrontend { - def main(args: Array[String]): Unit = { - // Override the configuration of the port when specified as program argument - val config = - (if (args.nonEmpty) ConfigFactory.parseString(s"akka.remote.netty.tcp.port=${args(0)}") - else ConfigFactory.empty).withFallback( - ConfigFactory.parseString("akka.cluster.roles = [frontend]")). - withFallback(ConfigFactory.load()) - - val system = ActorSystem("ClusterSystem", config) - val frontend = system.actorOf(Props[TransformationFrontend], name = "frontend") - - import system.dispatcher - implicit val timeout = Timeout(5 seconds) - for (n ← 1 to 120) { - (frontend ? TransformationJob("hello-" + n)) onSuccess { - case result ⇒ println(result) - } - // wait a while until next request, - // to avoid flooding the console with output - Thread.sleep(2000) - } - system.shutdown() - } -} - -//#frontend -class TransformationFrontend extends Actor { - - var backends = IndexedSeq.empty[ActorRef] - var jobCounter = 0 - - def receive = { - case job: TransformationJob if backends.isEmpty ⇒ - sender ! JobFailed("Service unavailable, try again later", job) - - case job: TransformationJob ⇒ - jobCounter += 1 - backends(jobCounter % backends.size) forward job - - case BackendRegistration if !backends.contains(sender) ⇒ - context watch sender - backends = backends :+ sender - - case Terminated(a) ⇒ - backends = backends.filterNot(_ == a) - } -} -//#frontend - -object TransformationBackend { - def main(args: Array[String]): Unit = { - // Override the configuration of the port when specified as program argument - val config = - (if (args.nonEmpty) ConfigFactory.parseString(s"akka.remote.netty.tcp.port=${args(0)}") - else ConfigFactory.empty).withFallback( - ConfigFactory.parseString("akka.cluster.roles = [backend]")). - withFallback(ConfigFactory.load()) - - val system = ActorSystem("ClusterSystem", config) - system.actorOf(Props[TransformationBackend], name = "backend") - } -} - -//#backend -class TransformationBackend extends Actor { - - val cluster = Cluster(context.system) - - // subscribe to cluster changes, MemberUp - // re-subscribe when restart - override def preStart(): Unit = cluster.subscribe(self, classOf[MemberUp]) - override def postStop(): Unit = cluster.unsubscribe(self) - - def receive = { - case TransformationJob(text) ⇒ sender ! TransformationResult(text.toUpperCase) - case state: CurrentClusterState ⇒ - state.members.filter(_.status == MemberStatus.Up) foreach register - case MemberUp(m) ⇒ register(m) - } - - def register(member: Member): Unit = - if (member.hasRole("frontend")) - context.actorSelection(RootActorPath(member.address) / "user" / "frontend") ! - BackendRegistration -} -//#backend \ No newline at end of file diff --git a/akka-samples/akka-sample-fsm/src/main/scala/Buncher.scala b/akka-samples/akka-sample-fsm/src/main/scala/Buncher.scala index b773f316d4..ebca4573d7 100644 --- a/akka-samples/akka-sample-fsm/src/main/scala/Buncher.scala +++ b/akka-samples/akka-sample-fsm/src/main/scala/Buncher.scala @@ -56,21 +56,21 @@ abstract class GenericBuncher[A: ClassTag, B](val singleTimeout: FiniteDuration, startWith(Idle, empty) when(Idle) { - case Event(Msg(m), acc) ⇒ + case Event(Msg(m), acc) => setTimer("multi", StateTimeout, multiTimeout, false) goto(Active) using merge(acc, m) - case Event(Flush, _) ⇒ stay - case Event(Stop, _) ⇒ stop + case Event(Flush, _) => stay + case Event(Stop, _) => stop } when(Active, stateTimeout = singleTimeout) { - case Event(Msg(m), acc) ⇒ + case Event(Msg(m), acc) => stay using merge(acc, m) - case Event(StateTimeout, acc) ⇒ + case Event(StateTimeout, acc) => flush(acc) - case Event(Flush, acc) ⇒ + case Event(Flush, acc) => flush(acc) - case Event(Stop, acc) ⇒ + case Event(Stop, acc) => send(acc) cancelTimer("multi") stop @@ -99,7 +99,7 @@ class Buncher[A: ClassTag](singleTimeout: FiniteDuration, multiTimeout: FiniteDu protected def merge(l: List[A], elem: A) = elem :: l whenUnhandled { - case Event(Target(t), _) ⇒ + case Event(Target(t), _) => target = Some(t) stay } diff --git a/akka-samples/akka-sample-fsm/src/main/scala/DiningHakkersOnBecome.scala b/akka-samples/akka-sample-fsm/src/main/scala/DiningHakkersOnBecome.scala index 914d37c2fe..dee48463fa 100644 --- a/akka-samples/akka-sample-fsm/src/main/scala/DiningHakkersOnBecome.scala +++ b/akka-samples/akka-sample-fsm/src/main/scala/DiningHakkersOnBecome.scala @@ -33,15 +33,15 @@ class Chopstick extends Actor { //It will refuse to be taken by other hakkers //But the owning hakker can put it back def takenBy(hakker: ActorRef): Receive = { - case Take(otherHakker) ⇒ + case Take(otherHakker) => otherHakker ! Busy(self) - case Put(`hakker`) ⇒ + case Put(`hakker`) => become(available) } //When a Chopstick is available, it can be taken by a hakker def available: Receive = { - case Take(hakker) ⇒ + case Take(hakker) => become(takenBy(hakker)) hakker ! Taken(self) } @@ -60,7 +60,7 @@ class Hakker(name: String, left: ActorRef, right: ActorRef) extends Actor { //When a hakker is thinking it can become hungry //and try to pick up its chopsticks and eat def thinking: Receive = { - case Eat ⇒ + case Eat => become(hungry) left ! Take(self) right ! Take(self) @@ -71,11 +71,11 @@ class Hakker(name: String, left: ActorRef, right: ActorRef) extends Actor { //If the hakkers first attempt at grabbing a chopstick fails, //it starts to wait for the response of the other grab def hungry: Receive = { - case Taken(`left`) ⇒ + case Taken(`left`) => become(waiting_for(right, left)) - case Taken(`right`) ⇒ + case Taken(`right`) => become(waiting_for(left, right)) - case Busy(chopstick) ⇒ + case Busy(chopstick) => become(denied_a_chopstick) } @@ -83,12 +83,12 @@ class Hakker(name: String, left: ActorRef, right: ActorRef) extends Actor { //and start eating, or the other chopstick was busy, and the hakker goes //back to think about how he should obtain his chopsticks :-) def waiting_for(chopstickToWaitFor: ActorRef, otherChopstick: ActorRef): Receive = { - case Taken(`chopstickToWaitFor`) ⇒ + case Taken(`chopstickToWaitFor`) => println("%s has picked up %s and %s and starts to eat".format(name, left.path.name, right.path.name)) become(eating) system.scheduler.scheduleOnce(5 seconds, self, Think) - case Busy(chopstick) ⇒ + case Busy(chopstick) => become(thinking) otherChopstick ! Put(self) self ! Eat @@ -98,11 +98,11 @@ class Hakker(name: String, left: ActorRef, right: ActorRef) extends Actor { //he needs to put it back if he got the other one. //Then go back and think and try to grab the chopsticks again def denied_a_chopstick: Receive = { - case Taken(chopstick) ⇒ + case Taken(chopstick) => become(thinking) chopstick ! Put(self) self ! Eat - case Busy(chopstick) ⇒ + case Busy(chopstick) => become(thinking) self ! Eat } @@ -110,7 +110,7 @@ class Hakker(name: String, left: ActorRef, right: ActorRef) extends Actor { //When a hakker is eating, he can decide to start to think, //then he puts down his chopsticks and starts to think def eating: Receive = { - case Think ⇒ + case Think => become(thinking) left ! Put(self) right ! Put(self) @@ -120,7 +120,7 @@ class Hakker(name: String, left: ActorRef, right: ActorRef) extends Actor { //All hakkers start in a non-eating state def receive = { - case Think ⇒ + case Think => println("%s starts to think".format(name)) become(thinking) system.scheduler.scheduleOnce(5 seconds, self, Eat) @@ -137,11 +137,11 @@ object DiningHakkers { def run(): Unit = { //Create 5 chopsticks - val chopsticks = for (i ← 1 to 5) yield system.actorOf(Props[Chopstick], "Chopstick" + i) + val chopsticks = for (i <- 1 to 5) yield system.actorOf(Props[Chopstick], "Chopstick" + i) //Create 5 awesome hakkers and assign them their left and right chopstick val hakkers = for { - (name, i) ← List("Ghosh", "Boner", "Klang", "Krasser", "Manie").zipWithIndex + (name, i) <- List("Ghosh", "Boner", "Klang", "Krasser", "Manie").zipWithIndex } yield system.actorOf(Props(classOf[Hakker], name, chopsticks(i), chopsticks((i + 1) % 5))) //Signal all hakkers that they should start thinking, and watch the show diff --git a/akka-samples/akka-sample-fsm/src/main/scala/DiningHakkersOnFsm.scala b/akka-samples/akka-sample-fsm/src/main/scala/DiningHakkersOnFsm.scala index 6c0c8cd459..5357d5c36f 100644 --- a/akka-samples/akka-sample-fsm/src/main/scala/DiningHakkersOnFsm.scala +++ b/akka-samples/akka-sample-fsm/src/main/scala/DiningHakkersOnFsm.scala @@ -40,7 +40,7 @@ class Chopstick extends Actor with FSM[ChopstickState, TakenBy] { // When a chopstick is available, it can be taken by a some hakker when(Available) { - case Event(Take, _) ⇒ + case Event(Take, _) => goto(Taken) using TakenBy(sender) replying Taken(self) } @@ -48,9 +48,9 @@ class Chopstick extends Actor with FSM[ChopstickState, TakenBy] { // It will refuse to be taken by other hakkers // But the owning hakker can put it back when(Taken) { - case Event(Take, currentState) ⇒ + case Event(Take, currentState) => stay replying Busy(self) - case Event(Put, TakenBy(hakker)) if sender == hakker ⇒ + case Event(Put, TakenBy(hakker)) if sender == hakker => goto(Available) using TakenBy(system.deadLetters) } @@ -89,7 +89,7 @@ class FSMHakker(name: String, left: ActorRef, right: ActorRef) extends Actor wit startWith(Waiting, TakenChopsticks(None, None)) when(Waiting) { - case Event(Think, _) ⇒ + case Event(Think, _) => println("%s starts to think".format(name)) startThinking(5 seconds) } @@ -97,7 +97,7 @@ class FSMHakker(name: String, left: ActorRef, right: ActorRef) extends Actor wit //When a hakker is thinking it can become hungry //and try to pick up its chopsticks and eat when(Thinking) { - case Event(StateTimeout, _) ⇒ + case Event(StateTimeout, _) => left ! Take right ! Take goto(Hungry) @@ -108,11 +108,11 @@ class FSMHakker(name: String, left: ActorRef, right: ActorRef) extends Actor wit // If the hakkers first attempt at grabbing a chopstick fails, // it starts to wait for the response of the other grab when(Hungry) { - case Event(Taken(`left`), _) ⇒ + case Event(Taken(`left`), _) => goto(WaitForOtherChopstick) using TakenChopsticks(Some(left), None) - case Event(Taken(`right`), _) ⇒ + case Event(Taken(`right`), _) => goto(WaitForOtherChopstick) using TakenChopsticks(None, Some(right)) - case Event(Busy(_), _) ⇒ + case Event(Busy(_), _) => goto(FirstChopstickDenied) } @@ -120,9 +120,9 @@ class FSMHakker(name: String, left: ActorRef, right: ActorRef) extends Actor wit // and start eating, or the other chopstick was busy, and the hakker goes // back to think about how he should obtain his chopsticks :-) when(WaitForOtherChopstick) { - case Event(Taken(`left`), TakenChopsticks(None, Some(right))) ⇒ startEating(left, right) - case Event(Taken(`right`), TakenChopsticks(Some(left), None)) ⇒ startEating(left, right) - case Event(Busy(chopstick), TakenChopsticks(leftOption, rightOption)) ⇒ + case Event(Taken(`left`), TakenChopsticks(None, Some(right))) => startEating(left, right) + case Event(Taken(`right`), TakenChopsticks(Some(left), None)) => startEating(left, right) + case Event(Busy(chopstick), TakenChopsticks(leftOption, rightOption)) => leftOption.foreach(_ ! Put) rightOption.foreach(_ ! Put) startThinking(10 milliseconds) @@ -137,17 +137,17 @@ class FSMHakker(name: String, left: ActorRef, right: ActorRef) extends Actor wit // he needs to put it back if he got the other one. // Then go back and think and try to grab the chopsticks again when(FirstChopstickDenied) { - case Event(Taken(secondChopstick), _) ⇒ + case Event(Taken(secondChopstick), _) => secondChopstick ! Put startThinking(10 milliseconds) - case Event(Busy(chopstick), _) ⇒ + case Event(Busy(chopstick), _) => startThinking(10 milliseconds) } // When a hakker is eating, he can decide to start to think, // then he puts down his chopsticks and starts to think when(Eating) { - case Event(StateTimeout, _) ⇒ + case Event(StateTimeout, _) => println("%s puts down his chopsticks and starts to think".format(name)) left ! Put right ! Put @@ -173,10 +173,10 @@ object DiningHakkersOnFsm { def run(): Unit = { // Create 5 chopsticks - val chopsticks = for (i ← 1 to 5) yield system.actorOf(Props[Chopstick], "Chopstick" + i) + val chopsticks = for (i <- 1 to 5) yield system.actorOf(Props[Chopstick], "Chopstick" + i) // Create 5 awesome fsm hakkers and assign them their left and right chopstick val hakkers = for { - (name, i) ← List("Ghosh", "Boner", "Klang", "Krasser", "Manie").zipWithIndex + (name, i) <- List("Ghosh", "Boner", "Klang", "Krasser", "Manie").zipWithIndex } yield system.actorOf(Props(classOf[FSMHakker], name, chopsticks(i), chopsticks((i + 1) % 5))) hakkers.foreach(_ ! Think) diff --git a/akka-samples/akka-sample-hello-kernel/src/main/scala/sample/kernel/hello/HelloKernel.scala b/akka-samples/akka-sample-hello-kernel/src/main/scala/sample/kernel/hello/HelloKernel.scala index 0878ee7ee7..70f5ab47a9 100644 --- a/akka-samples/akka-sample-hello-kernel/src/main/scala/sample/kernel/hello/HelloKernel.scala +++ b/akka-samples/akka-sample-hello-kernel/src/main/scala/sample/kernel/hello/HelloKernel.scala @@ -12,15 +12,15 @@ class HelloActor extends Actor { val worldActor = context.actorOf(Props[WorldActor]) def receive = { - case Start ⇒ worldActor ! "Hello" - case message: String ⇒ + case Start => worldActor ! "Hello" + case message: String => println("Received message '%s'" format message) } } class WorldActor extends Actor { def receive = { - case message: String ⇒ sender ! (message.toUpperCase + " world!") + case message: String => sender ! (message.toUpperCase + " world!") } } diff --git a/akka-samples/akka-sample-hello/README.md b/akka-samples/akka-sample-hello/README.md deleted file mode 100644 index ebce03e807..0000000000 --- a/akka-samples/akka-sample-hello/README.md +++ /dev/null @@ -1,13 +0,0 @@ -HELLO -===== - -This sample is meant to be used by studying the code; it does not perform any -astounding functions when running it. If you want to run it, check out the akka -sources on your local hard drive, follow the [instructions for setting up Akka -with SBT](http://doc.akka.io/docs/akka/current/intro/getting-started.html). -When you start SBT within the checked-out akka source directory, you can run -this sample by typing - - akka-sample-hello/run - -You can read more in the [Akka docs](http://akka.io/docs). diff --git a/akka-samples/akka-sample-hello/src/main/scala/sample/hello/Main.scala b/akka-samples/akka-sample-hello/src/main/scala/sample/hello/Main.scala deleted file mode 100644 index 3767bfb890..0000000000 --- a/akka-samples/akka-sample-hello/src/main/scala/sample/hello/Main.scala +++ /dev/null @@ -1,32 +0,0 @@ -/** - * Copyright (C) 2009-2013 Typesafe Inc. - */ -package sample.hello - -import akka.actor.{ ActorSystem, Actor, Props } - -case object Start - -object Main { - def main(args: Array[String]): Unit = { - val system = ActorSystem() - system.actorOf(Props[HelloActor]) ! Start - } -} - -class HelloActor extends Actor { - val worldActor = context.actorOf(Props[WorldActor]) - def receive = { - case Start ⇒ worldActor ! "Hello" - case s: String ⇒ - println("Received message: %s".format(s)) - context.system.shutdown() - } -} - -class WorldActor extends Actor { - def receive = { - case s: String ⇒ sender ! s.toUpperCase + " world!" - } -} - diff --git a/akka-samples/akka-sample-main-java/LICENSE b/akka-samples/akka-sample-main-java/LICENSE new file mode 100644 index 0000000000..a02154466b --- /dev/null +++ b/akka-samples/akka-sample-main-java/LICENSE @@ -0,0 +1,13 @@ +Copyright 2013 Typesafe, Inc. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + +http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. diff --git a/akka-samples/akka-sample-main-java/activator.properties b/akka-samples/akka-sample-main-java/activator.properties new file mode 100644 index 0000000000..c980185eb2 --- /dev/null +++ b/akka-samples/akka-sample-main-java/activator.properties @@ -0,0 +1,4 @@ +name=akka-sample-main-java +title=Akka Main in Java +description=Actor based version of obligatory Hello World program using the generic launcher class akka.Main. +tags=Basics,akka,java,starter diff --git a/akka-samples/akka-sample-main-java/build.sbt b/akka-samples/akka-sample-main-java/build.sbt new file mode 100644 index 0000000000..4d6728a29c --- /dev/null +++ b/akka-samples/akka-sample-main-java/build.sbt @@ -0,0 +1,10 @@ +name := "akka-sample-main-java" + +version := "1.0" + +scalaVersion := "2.10.3" + +libraryDependencies ++= Seq( + "com.typesafe.akka" %% "akka-actor" % "2.3-SNAPSHOT" +) + diff --git a/akka-samples/akka-sample-main-java/project/build.properties b/akka-samples/akka-sample-main-java/project/build.properties new file mode 100644 index 0000000000..0974fce44d --- /dev/null +++ b/akka-samples/akka-sample-main-java/project/build.properties @@ -0,0 +1 @@ +sbt.version=0.13.0 diff --git a/akka-docs/rst/java/code/docs/actor/japi/Greeter.java b/akka-samples/akka-sample-main-java/src/main/java/sample/hello/Greeter.java similarity index 76% rename from akka-docs/rst/java/code/docs/actor/japi/Greeter.java rename to akka-samples/akka-sample-main-java/src/main/java/sample/hello/Greeter.java index 239b615a05..2669d619e8 100644 --- a/akka-docs/rst/java/code/docs/actor/japi/Greeter.java +++ b/akka-samples/akka-sample-main-java/src/main/java/sample/hello/Greeter.java @@ -1,26 +1,23 @@ /** * Copyright (C) 2009-2013 Typesafe Inc. */ - -package docs.actor.japi; +package sample.hello; import akka.actor.UntypedActor; -import java.io.Serializable; -//#greeter public class Greeter extends UntypedActor { - + public static enum Msg { GREET, DONE; } - + @Override public void onReceive(Object msg) { if (msg == Msg.GREET) { System.out.println("Hello World!"); getSender().tell(Msg.DONE, getSelf()); - } else unhandled(msg); + } else + unhandled(msg); } - + } -//#greeter diff --git a/akka-docs/rst/java/code/docs/actor/japi/HelloWorld.java b/akka-samples/akka-sample-main-java/src/main/java/sample/hello/HelloWorld.java similarity index 76% rename from akka-docs/rst/java/code/docs/actor/japi/HelloWorld.java rename to akka-samples/akka-sample-main-java/src/main/java/sample/hello/HelloWorld.java index 396b1881b3..7005a20387 100644 --- a/akka-docs/rst/java/code/docs/actor/japi/HelloWorld.java +++ b/akka-samples/akka-sample-main-java/src/main/java/sample/hello/HelloWorld.java @@ -2,9 +2,8 @@ * Copyright (C) 2009-2013 Typesafe Inc. */ -package docs.actor.japi; +package sample.hello; -//#hello-world import akka.actor.Props; import akka.actor.UntypedActor; import akka.actor.ActorRef; @@ -14,8 +13,7 @@ public class HelloWorld extends UntypedActor { @Override public void preStart() { // create the greeter actor - final ActorRef greeter = - getContext().actorOf(Props.create(Greeter.class), "greeter"); + final ActorRef greeter = getContext().actorOf(Props.create(Greeter.class), "greeter"); // tell it to perform the greeting greeter.tell(Greeter.Msg.GREET, getSelf()); } @@ -25,7 +23,7 @@ public class HelloWorld extends UntypedActor { if (msg == Greeter.Msg.DONE) { // when the greeter is done, stop this actor and with it the application getContext().stop(getSelf()); - } else unhandled(msg); + } else + unhandled(msg); } } -//#hello-world \ No newline at end of file diff --git a/akka-samples/akka-sample-main-java/src/main/java/sample/hello/Main.java b/akka-samples/akka-sample-main-java/src/main/java/sample/hello/Main.java new file mode 100644 index 0000000000..c5a88313e6 --- /dev/null +++ b/akka-samples/akka-sample-main-java/src/main/java/sample/hello/Main.java @@ -0,0 +1,11 @@ +/** + * Copyright (C) 2009-2013 Typesafe Inc. + */ +package sample.hello; + +public class Main { + + public static void main(String[] args) { + akka.Main.main(new String[] { HelloWorld.class.getName() }); + } +} diff --git a/akka-samples/akka-sample-main-java/src/main/java/sample/hello/Main2.java b/akka-samples/akka-sample-main-java/src/main/java/sample/hello/Main2.java new file mode 100644 index 0000000000..42db74b69f --- /dev/null +++ b/akka-samples/akka-sample-main-java/src/main/java/sample/hello/Main2.java @@ -0,0 +1,43 @@ +/** + * Copyright (C) 2009-2013 Typesafe Inc. + */ +package sample.hello; + +import akka.actor.ActorRef; +import akka.actor.ActorSystem; +import akka.actor.Props; +import akka.actor.Terminated; +import akka.actor.UntypedActor; +import akka.event.Logging; +import akka.event.LoggingAdapter; + +public class Main2 { + + public static void main(String[] args) { + ActorSystem system = ActorSystem.create("Hello"); + ActorRef a = system.actorOf(Props.create(HelloWorld.class), "helloWorld"); + system.actorOf(Props.create(Terminator.class, a), "terminator"); + } + + public static class Terminator extends UntypedActor { + + private final LoggingAdapter log = Logging.getLogger(getContext().system(), this); + private final ActorRef ref; + + public Terminator(ActorRef ref) { + this.ref = ref; + getContext().watch(ref); + } + + @Override + public void onReceive(Object msg) { + if (msg instanceof Terminated) { + log.info("{} has terminated, shutting down system", ref.path()); + getContext().system().shutdown(); + } else { + unhandled(msg); + } + } + + } +} diff --git a/akka-samples/akka-sample-main-java/tutorial/index.html b/akka-samples/akka-sample-main-java/tutorial/index.html new file mode 100644 index 0000000000..bcd533053e --- /dev/null +++ b/akka-samples/akka-sample-main-java/tutorial/index.html @@ -0,0 +1,96 @@ + + + The Obligatory Hello World + + + + +
+

The Obligatory Hello World

+ +

+Since every programming paradigm needs to solve the tough problem of printing a +well-known greeting to the console we’ll introduce you to the actor-based +version. +

+ +

+Open HelloWorld.java +

+ +

+The HelloWorld actor is the application’s “main” class; when it terminates +the application will shut down—more on that later. The main business logic +happens in the preStart method, where a Greeter actor is created +and instructed to issue that greeting we crave for. When the greeter is done it +will tell us so by sending back a message, and when that message has been +received it will be passed into the behavior described by the receive +method where we can conclude the demonstration by stopping the HelloWorld +actor. +

+ +
+
+ +

The Greeter

+ +

+You will be very curious to see how the Greeter actor performs the +actual task. Open Greeter.java. +

+ +

+ +This is extremely simple now: after its creation this actor will not do +anything until someone sends it a message, and if that happens to be an +invitation to greet the world then the Greeter complies and informs the +requester that the deed has been done. +

+ +
+
+ +

Main class

+ +

+Go to the Run tab, and start the application main class +sample.hello.Main. In the log output you can see the "Hello World!" greeting. +

+ +

+Main.java +is actually just a small wrapper around the generic launcher class akka.Main, +which expects only one argument: the class name of the application’s main actor. This main +method will then create the infrastructure needed for running the actors, start the +given main actor and arrange for the whole application to shut down once the +main actor terminates. Thus you will be able to run the application with a +command similar to the following: +

+ +

+java -classpath  akka.Main sample.hello.HelloWorld
+
+ +

+This conveniently assumes placement of the above class definitions in package +sample.hello and it further assumes that you have the required JAR files for +scala-library, typesafe-config and akka-actor available. +The easiest would be to manage these dependencies with a +build tool. +

+ +

+If you need more control of the startup code than what is provided by akka.Main +you can easily write your own main class such as +Main2.java +

+ +

+Try to run the sample.hello.Main2 class +by selecting it in the 'Main class' menu in the Run tab. +

+ +
+ + + diff --git a/akka-samples/akka-sample-main-scala/LICENSE b/akka-samples/akka-sample-main-scala/LICENSE new file mode 100644 index 0000000000..a02154466b --- /dev/null +++ b/akka-samples/akka-sample-main-scala/LICENSE @@ -0,0 +1,13 @@ +Copyright 2013 Typesafe, Inc. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + +http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. diff --git a/akka-samples/akka-sample-main-scala/activator.properties b/akka-samples/akka-sample-main-scala/activator.properties new file mode 100644 index 0000000000..733461e341 --- /dev/null +++ b/akka-samples/akka-sample-main-scala/activator.properties @@ -0,0 +1,4 @@ +name=akka-sample-main-scala +title=Akka Main in Scala +description=Actor based version of obligatory Hello World program using the generic launcher class akka.Main. +tags=Basics,akka,scala,starter diff --git a/akka-samples/akka-sample-main-scala/build.sbt b/akka-samples/akka-sample-main-scala/build.sbt new file mode 100644 index 0000000000..410850f86b --- /dev/null +++ b/akka-samples/akka-sample-main-scala/build.sbt @@ -0,0 +1,10 @@ +name := "akka-sample-main-scala" + +version := "1.0" + +scalaVersion := "2.10.3" + +libraryDependencies ++= Seq( + "com.typesafe.akka" %% "akka-actor" % "2.3-SNAPSHOT" +) + diff --git a/akka-samples/akka-sample-main-scala/project/build.properties b/akka-samples/akka-sample-main-scala/project/build.properties new file mode 100644 index 0000000000..0974fce44d --- /dev/null +++ b/akka-samples/akka-sample-main-scala/project/build.properties @@ -0,0 +1 @@ +sbt.version=0.13.0 diff --git a/akka-samples/akka-sample-main-scala/src/main/scala/sample/hello/Greeter.scala b/akka-samples/akka-sample-main-scala/src/main/scala/sample/hello/Greeter.scala new file mode 100644 index 0000000000..a4a15726b9 --- /dev/null +++ b/akka-samples/akka-sample-main-scala/src/main/scala/sample/hello/Greeter.scala @@ -0,0 +1,19 @@ +/** + * Copyright (C) 2009-2013 Typesafe Inc. + */ +package sample.hello + +import akka.actor.Actor + +object Greeter { + case object Greet + case object Done +} + +class Greeter extends Actor { + def receive = { + case Greeter.Greet => + println("Hello World!") + sender ! Greeter.Done + } +} \ No newline at end of file diff --git a/akka-samples/akka-sample-main-scala/src/main/scala/sample/hello/HelloWorld.scala b/akka-samples/akka-sample-main-scala/src/main/scala/sample/hello/HelloWorld.scala new file mode 100644 index 0000000000..e75ad79552 --- /dev/null +++ b/akka-samples/akka-sample-main-scala/src/main/scala/sample/hello/HelloWorld.scala @@ -0,0 +1,23 @@ +/** + * Copyright (C) 2009-2013 Typesafe Inc. + */ +package sample.hello + +import akka.actor.Actor +import akka.actor.Props + +class HelloWorld extends Actor { + + override def preStart(): Unit = { + // create the greeter actor + val greeter = context.actorOf(Props[Greeter], "greeter") + // tell it to perform the greeting + greeter ! Greeter.Greet + } + + def receive = { + // when the greeter is done, stop this actor and with it the application + case Greeter.Done => context.stop(self) + } +} + diff --git a/akka-samples/akka-sample-main-scala/src/main/scala/sample/hello/Main.scala b/akka-samples/akka-sample-main-scala/src/main/scala/sample/hello/Main.scala new file mode 100644 index 0000000000..0b2c907a94 --- /dev/null +++ b/akka-samples/akka-sample-main-scala/src/main/scala/sample/hello/Main.scala @@ -0,0 +1,12 @@ +/** + * Copyright (C) 2009-2013 Typesafe Inc. + */ +package sample.hello + +object Main { + + def main(args: Array[String]): Unit = { + akka.Main.main(Array(classOf[HelloWorld].getName)) + } + +} \ No newline at end of file diff --git a/akka-samples/akka-sample-main-scala/src/main/scala/sample/hello/Main2.scala b/akka-samples/akka-sample-main-scala/src/main/scala/sample/hello/Main2.scala new file mode 100644 index 0000000000..3d8e37d984 --- /dev/null +++ b/akka-samples/akka-sample-main-scala/src/main/scala/sample/hello/Main2.scala @@ -0,0 +1,30 @@ +/** + * Copyright (C) 2009-2013 Typesafe Inc. + */ +package sample.hello + +import akka.actor.ActorSystem +import akka.actor.Props +import akka.actor.ActorRef +import akka.actor.Actor +import akka.actor.ActorLogging +import akka.actor.Terminated + +object Main2 { + + def main(args: Array[String]): Unit = { + val system = ActorSystem("Hello") + val a = system.actorOf(Props[HelloWorld], "helloWorld") + system.actorOf(Props(classOf[Terminator], a), "terminator") + } + + class Terminator(ref: ActorRef) extends Actor with ActorLogging { + context watch ref + def receive = { + case Terminated(_) => + log.info("{} has terminated, shutting down system", ref.path) + context.system.shutdown() + } + } + +} \ No newline at end of file diff --git a/akka-samples/akka-sample-main-scala/tutorial/index.html b/akka-samples/akka-sample-main-scala/tutorial/index.html new file mode 100644 index 0000000000..d643ee7ee9 --- /dev/null +++ b/akka-samples/akka-sample-main-scala/tutorial/index.html @@ -0,0 +1,96 @@ + + + The Obligatory Hello World + + + + +
+

The Obligatory Hello World

+ +

+Since every programming paradigm needs to solve the tough problem of printing a +well-known greeting to the console we’ll introduce you to the actor-based +version. +

+ +

+Open HelloWorld.scala +

+ +

+The HelloWorld actor is the application’s “main” class; when it terminates +the application will shut down—more on that later. The main business logic +happens in the preStart method, where a Greeter actor is created +and instructed to issue that greeting we crave for. When the greeter is done it +will tell us so by sending back a message, and when that message has been +received it will be passed into the behavior described by the receive +method where we can conclude the demonstration by stopping the HelloWorld +actor. +

+ +
+
+ +

The Greeter

+ +

+You will be very curious to see how the Greeter actor performs the +actual task. Open Greeter.scala. +

+ +

+ +This is extremely simple now: after its creation this actor will not do +anything until someone sends it a message, and if that happens to be an +invitation to greet the world then the Greeter complies and informs the +requester that the deed has been done. +

+ +
+
+ +

Main class

+ +

+Go to the Run tab, and start the application main class +sample.hello.Main. In the log output you can see the "Hello World!" greeting. +

+ +

+Main.scala +is actually just a small wrapper around the generic launcher class akka.Main, +which expects only one argument: the class name of the application’s main actor. This main +method will then create the infrastructure needed for running the actors, start the +given main actor and arrange for the whole application to shut down once the +main actor terminates. Thus you will be able to run the application with a +command similar to the following: +

+ +

+java -classpath  akka.Main sample.hello.HelloWorld
+
+ +

+This conveniently assumes placement of the above class definitions in package +sample.hello and it further assumes that you have the required JAR files for +scala-library, typesafe-config and akka-actor available. +The easiest would be to manage these dependencies with a +build tool. +

+ +

+If you need more control of the startup code than what is provided by akka.Main +you can easily write your own main class such as +Main2.scala +

+ +

+Try to run the sample.hello.Main2 class +by selecting it in the 'Main class' menu in the Run tab. +

+ +
+ + + diff --git a/akka-samples/akka-sample-multi-node/src/multi-jvm/scala/sample/multinode/MultiNodeSample.scala b/akka-samples/akka-sample-multi-node/src/multi-jvm/scala/sample/multinode/MultiNodeSample.scala index e1aed49bc7..1da8c3ac3e 100644 --- a/akka-samples/akka-sample-multi-node/src/multi-jvm/scala/sample/multinode/MultiNodeSample.scala +++ b/akka-samples/akka-sample-multi-node/src/multi-jvm/scala/sample/multinode/MultiNodeSample.scala @@ -46,7 +46,7 @@ class MultiNodeSample extends MultiNodeSpec(MultiNodeSampleConfig) runOn(node2) { system.actorOf(Props(new Actor { def receive = { - case "ping" ⇒ sender ! "pong" + case "ping" => sender ! "pong" } }), "ponger") enterBarrier("deployed") diff --git a/akka-samples/akka-sample-osgi-dining-hakkers/core/src/main/scala/akka/sample/osgi/internal/Hakker.scala b/akka-samples/akka-sample-osgi-dining-hakkers/core/src/main/scala/akka/sample/osgi/internal/Hakker.scala index 59a635cdf2..c028fcb31f 100644 --- a/akka-samples/akka-sample-osgi-dining-hakkers/core/src/main/scala/akka/sample/osgi/internal/Hakker.scala +++ b/akka-samples/akka-sample-osgi-dining-hakkers/core/src/main/scala/akka/sample/osgi/internal/Hakker.scala @@ -27,15 +27,15 @@ class Chopstick extends Actor { //It will refuse to be taken by other hakkers //But the owning hakker can put it back def takenBy(hakker: ActorRef): Receive = { - case Take(otherHakker) ⇒ + case Take(otherHakker) => otherHakker ! Busy(self) - case Put(`hakker`) ⇒ + case Put(`hakker`) => become(available) } //When a Chopstick is available, it can be taken by a hakker def available: Receive = { - case Take(hakker) ⇒ + case Take(hakker) => log.info(self.path + " is taken by " + hakker) become(takenBy(hakker)) hakker ! Taken(self) @@ -71,11 +71,11 @@ class Hakker(name: String, chair: Int) extends Actor { //When a hakker is thinking it can become hungry //and try to pick up its chopsticks and eat def thinking(left: ActorRef, right: ActorRef): Receive = { - case Eat ⇒ + case Eat => become(hungry(left, right) orElse (clusterEvents)) left ! Take(self) right ! Take(self) - case Identify ⇒ identify("Thinking") + case Identify => identify("Thinking") } //When a hakker is hungry it tries to pick up its chopsticks and eat @@ -83,28 +83,28 @@ class Hakker(name: String, chair: Int) extends Actor { //If the hakkers first attempt at grabbing a chopstick fails, //it starts to wait for the response of the other grab def hungry(left: ActorRef, right: ActorRef): Receive = { - case Taken(`left`) ⇒ + case Taken(`left`) => become(waiting_for(left, right, false) orElse (clusterEvents)) - case Taken(`right`) ⇒ + case Taken(`right`) => become(waiting_for(left, right, true) orElse (clusterEvents)) - case Busy(chopstick) ⇒ + case Busy(chopstick) => become(denied_a_chopstick(left, right) orElse (clusterEvents)) - case Identify ⇒ identify("Hungry") + case Identify => identify("Hungry") } //When a hakker is waiting for the last chopstick it can either obtain it //and start eating, or the other chopstick was busy, and the hakker goes //back to think about how he should obtain his chopsticks :-) def waiting_for(left: ActorRef, right: ActorRef, waitingForLeft: Boolean): Receive = { - case Taken(`left`) if waitingForLeft ⇒ + case Taken(`left`) if waitingForLeft => log.info("%s has picked up %s and %s and starts to eat".format(name, left.path.name, right.path.name)) become(eating(left, right) orElse (clusterEvents)) system.scheduler.scheduleOnce(5 seconds, self, Think) - case Taken(`right`) if !waitingForLeft ⇒ + case Taken(`right`) if !waitingForLeft => log.info("%s has picked up %s and %s and starts to eat".format(name, left.path.name, right.path.name)) become(eating(left, right) orElse (clusterEvents)) system.scheduler.scheduleOnce(5 seconds, self, Think) - case Busy(chopstick) ⇒ + case Busy(chopstick) => become(thinking(left, right) orElse (clusterEvents)) if (waitingForLeft) { right ! Put(self) @@ -112,44 +112,44 @@ class Hakker(name: String, chair: Int) extends Actor { left ! Put(self) } self ! Eat - case Identify ⇒ identify("Waiting for Chopstick") + case Identify => identify("Waiting for Chopstick") } //When the results of the other grab comes back, //he needs to put it back if he got the other one. //Then go back and think and try to grab the chopsticks again def denied_a_chopstick(left: ActorRef, right: ActorRef): Receive = { - case Taken(chopstick) ⇒ + case Taken(chopstick) => become(thinking(left, right) orElse (clusterEvents)) chopstick ! Put(self) self ! Eat - case Busy(chopstick) ⇒ + case Busy(chopstick) => become(thinking(left, right) orElse (clusterEvents)) self ! Eat - case Identify ⇒ identify("Denied a Chopstick") + case Identify => identify("Denied a Chopstick") } //When a hakker is eating, he can decide to start to think, //then he puts down his chopsticks and starts to think def eating(left: ActorRef, right: ActorRef): Receive = { - case Think ⇒ + case Think => become(thinking(left, right) orElse (clusterEvents)) left ! Put(self) right ! Put(self) log.info("%s puts down his chopsticks and starts to think".format(name)) system.scheduler.scheduleOnce(5 seconds, self, Eat) - case Identify ⇒ identify("Eating") + case Identify => identify("Eating") } def waitForChopsticks: Receive = { - case (left: ActorRef, right: ActorRef) ⇒ + case (left: ActorRef, right: ActorRef) => become(thinking(left, right) orElse (clusterEvents)) system.scheduler.scheduleOnce(5 seconds, self, Eat) } def clusterEvents: Receive = { - case state: CurrentClusterState ⇒ state.leader foreach updateTable - case LeaderChanged(Some(leaderAddress)) ⇒ updateTable(leaderAddress) + case state: CurrentClusterState => state.leader foreach updateTable + case LeaderChanged(Some(leaderAddress)) => updateTable(leaderAddress) } def identify(busyWith: String) { diff --git a/akka-samples/akka-sample-osgi-dining-hakkers/core/src/main/scala/akka/sample/osgi/internal/Table.scala b/akka-samples/akka-sample-osgi-dining-hakkers/core/src/main/scala/akka/sample/osgi/internal/Table.scala index 3db0c7512d..0c09ce4fd2 100644 --- a/akka-samples/akka-sample-osgi-dining-hakkers/core/src/main/scala/akka/sample/osgi/internal/Table.scala +++ b/akka-samples/akka-sample-osgi-dining-hakkers/core/src/main/scala/akka/sample/osgi/internal/Table.scala @@ -18,9 +18,9 @@ package akka.sample.osgi.internal import akka.actor.{ Props, Actor } class Table extends Actor { - val chopsticks = for (i ← 1 to 5) yield context.actorOf(Props[Chopstick], "Chopstick" + i) + val chopsticks = for (i <- 1 to 5) yield context.actorOf(Props[Chopstick], "Chopstick" + i) def receive = { - case x: Int ⇒ sender ! ((chopsticks(x), chopsticks((x + 1) % 5))) + case x: Int => sender ! ((chopsticks(x), chopsticks((x + 1) % 5))) } } diff --git a/akka-samples/akka-sample-osgi-dining-hakkers/integration-test/src/test/scala/akka/sample/osgi/test/HakkerStatusTest.scala b/akka-samples/akka-sample-osgi-dining-hakkers/integration-test/src/test/scala/akka/sample/osgi/test/HakkerStatusTest.scala index 6694c1cbbf..e3f1a1b113 100644 --- a/akka-samples/akka-sample-osgi-dining-hakkers/integration-test/src/test/scala/akka/sample/osgi/test/HakkerStatusTest.scala +++ b/akka-samples/akka-sample-osgi-dining-hakkers/integration-test/src/test/scala/akka/sample/osgi/test/HakkerStatusTest.scala @@ -100,7 +100,7 @@ java.io.EOFException object HakkerStatusTest { class Interrogator(queue: SynchronousQueue[(String, String)]) extends Actor { def receive = { - case msg: Identification ⇒ { + case msg: Identification => { queue.put((msg.name, msg.busyWith)) } } diff --git a/akka-samples/akka-sample-persistence/src/main/scala/sample/persistence/ConversationRecoveryExample.scala b/akka-samples/akka-sample-persistence/src/main/scala/sample/persistence/ConversationRecoveryExample.scala index 2dc6eabde6..b7d6090844 100644 --- a/akka-samples/akka-sample-persistence/src/main/scala/sample/persistence/ConversationRecoveryExample.scala +++ b/akka-samples/akka-sample-persistence/src/main/scala/sample/persistence/ConversationRecoveryExample.scala @@ -16,13 +16,13 @@ object ConversationRecoveryExample extends App { var counter = 0 def receive = { - case m @ ConfirmablePersistent(Ping, _, _) ⇒ + case m @ ConfirmablePersistent(Ping, _, _) => counter += 1 println(s"received ping ${counter} times ...") m.confirm() if (!recoveryRunning) Thread.sleep(1000) pongChannel ! Deliver(m.withPayload(Pong), sender, Resolve.Destination) - case "init" ⇒ if (counter == 0) pongChannel ! Deliver(Persistent(Pong), sender) + case "init" => if (counter == 0) pongChannel ! Deliver(Persistent(Pong), sender) } override def preStart() = () @@ -33,7 +33,7 @@ object ConversationRecoveryExample extends App { var counter = 0 def receive = { - case m @ ConfirmablePersistent(Pong, _, _) ⇒ + case m @ ConfirmablePersistent(Pong, _, _) => counter += 1 println(s"received pong ${counter} times ...") m.confirm() diff --git a/akka-samples/akka-sample-persistence/src/main/scala/sample/persistence/EventsourcedExample.scala b/akka-samples/akka-sample-persistence/src/main/scala/sample/persistence/EventsourcedExample.scala index c4ba215241..19a5eef071 100644 --- a/akka-samples/akka-sample-persistence/src/main/scala/sample/persistence/EventsourcedExample.scala +++ b/akka-samples/akka-sample-persistence/src/main/scala/sample/persistence/EventsourcedExample.scala @@ -27,30 +27,30 @@ class ExampleProcessor extends EventsourcedProcessor { state.size val receiveReplay: Receive = { - case evt: Evt ⇒ updateState(evt) - case SnapshotOffer(_, snapshot: ExampleState) ⇒ state = snapshot + case evt: Evt => updateState(evt) + case SnapshotOffer(_, snapshot: ExampleState) => state = snapshot } val receiveCommand: Receive = { - case Cmd(data) ⇒ + case Cmd(data) => persist(Evt(s"${data}-${numEvents}"))(updateState) - persist(Evt(s"${data}-${numEvents + 1}")) { event ⇒ + persist(Evt(s"${data}-${numEvents + 1}")) { event => updateState(event) context.system.eventStream.publish(event) if (data == "foo") context.become(otherCommandHandler) } - case "snap" ⇒ saveSnapshot(state) - case "print" ⇒ println(state) + case "snap" => saveSnapshot(state) + case "print" => println(state) } val otherCommandHandler: Receive = { - case Cmd("bar") ⇒ - persist(Evt(s"bar-${numEvents}")) { event ⇒ + case Cmd("bar") => + persist(Evt(s"bar-${numEvents}")) { event => updateState(event) context.unbecome() } unstashAll() - case other ⇒ stash() + case other => stash() } } //#eventsourced-example diff --git a/akka-samples/akka-sample-persistence/src/main/scala/sample/persistence/ProcessorChannelExample.scala b/akka-samples/akka-sample-persistence/src/main/scala/sample/persistence/ProcessorChannelExample.scala index e4b6045bf6..1292d39f17 100644 --- a/akka-samples/akka-sample-persistence/src/main/scala/sample/persistence/ProcessorChannelExample.scala +++ b/akka-samples/akka-sample-persistence/src/main/scala/sample/persistence/ProcessorChannelExample.scala @@ -16,7 +16,7 @@ object ProcessorChannelExample extends App { var received: List[Persistent] = Nil def receive = { - case p @ Persistent(payload, _) ⇒ + case p @ Persistent(payload, _) => println(s"processed ${payload}") channel forward Deliver(p.withPayload(s"processed ${payload}"), destination) } @@ -24,7 +24,7 @@ object ProcessorChannelExample extends App { class ExampleDestination extends Actor { def receive = { - case p @ ConfirmablePersistent(payload, snr, _) ⇒ + case p @ ConfirmablePersistent(payload, snr, _) => println(s"received ${payload}") sender ! s"re: ${payload} (${snr})" p.confirm() @@ -37,8 +37,8 @@ object ProcessorChannelExample extends App { implicit val timeout = Timeout(3000) import system.dispatcher - processor ? Persistent("a") onSuccess { case reply ⇒ println(s"reply = ${reply}") } - processor ? Persistent("b") onSuccess { case reply ⇒ println(s"reply = ${reply}") } + processor ? Persistent("a") onSuccess { case reply => println(s"reply = ${reply}") } + processor ? Persistent("b") onSuccess { case reply => println(s"reply = ${reply}") } Thread.sleep(1000) system.shutdown() diff --git a/akka-samples/akka-sample-persistence/src/main/scala/sample/persistence/ProcessorFailureExample.scala b/akka-samples/akka-sample-persistence/src/main/scala/sample/persistence/ProcessorFailureExample.scala index 5cbc29bdd2..3ca4c682ba 100644 --- a/akka-samples/akka-sample-persistence/src/main/scala/sample/persistence/ProcessorFailureExample.scala +++ b/akka-samples/akka-sample-persistence/src/main/scala/sample/persistence/ProcessorFailureExample.scala @@ -12,16 +12,16 @@ object ProcessorFailureExample extends App { var received: List[String] = Nil // state def receive = { - case "print" ⇒ println(s"received ${received.reverse}") - case "boom" ⇒ throw new Exception("boom") - case Persistent("boom", _) ⇒ throw new Exception("boom") - case Persistent(payload: String, _) ⇒ received = payload :: received + case "print" => println(s"received ${received.reverse}") + case "boom" => throw new Exception("boom") + case Persistent("boom", _) => throw new Exception("boom") + case Persistent(payload: String, _) => received = payload :: received } override def preRestart(reason: Throwable, message: Option[Any]) { message match { - case Some(p: Persistent) if !recoveryRunning ⇒ deleteMessage(p.sequenceNr) // mark failing message as deleted - case _ ⇒ // ignore + case Some(p: Persistent) if !recoveryRunning => deleteMessage(p.sequenceNr) // mark failing message as deleted + case _ => // ignore } super.preRestart(reason, message) } diff --git a/akka-samples/akka-sample-persistence/src/main/scala/sample/persistence/SnapshotExample.scala b/akka-samples/akka-sample-persistence/src/main/scala/sample/persistence/SnapshotExample.scala index e38d05fd77..bf709eccc1 100644 --- a/akka-samples/akka-sample-persistence/src/main/scala/sample/persistence/SnapshotExample.scala +++ b/akka-samples/akka-sample-persistence/src/main/scala/sample/persistence/SnapshotExample.scala @@ -17,14 +17,14 @@ object SnapshotExample extends App { var state = ExampleState() def receive = { - case Persistent(s, snr) ⇒ state = state.update(s"${s}-${snr}") - case SaveSnapshotSuccess(metadata) ⇒ // ... - case SaveSnapshotFailure(metadata, reason) ⇒ // ... - case SnapshotOffer(_, s: ExampleState) ⇒ + case Persistent(s, snr) => state = state.update(s"${s}-${snr}") + case SaveSnapshotSuccess(metadata) => // ... + case SaveSnapshotFailure(metadata, reason) => // ... + case SnapshotOffer(_, s: ExampleState) => println("offered state = " + s) state = s - case "print" ⇒ println("current state = " + state) - case "snap" ⇒ saveSnapshot(state) + case "print" => println("current state = " + state) + case "snap" => saveSnapshot(state) } } diff --git a/akka-samples/akka-sample-remote-java/.gitignore b/akka-samples/akka-sample-remote-java/.gitignore new file mode 100644 index 0000000000..660c959e44 --- /dev/null +++ b/akka-samples/akka-sample-remote-java/.gitignore @@ -0,0 +1,17 @@ +*# +*.iml +*.ipr +*.iws +*.pyc +*.tm.epoch +*.vim +*-shim.sbt +.idea/ +/project/plugins/project +project/boot +target/ +/logs +.cache +.classpath +.project +.settings \ No newline at end of file diff --git a/akka-samples/akka-sample-remote-java/LICENSE b/akka-samples/akka-sample-remote-java/LICENSE new file mode 100644 index 0000000000..a02154466b --- /dev/null +++ b/akka-samples/akka-sample-remote-java/LICENSE @@ -0,0 +1,13 @@ +Copyright 2013 Typesafe, Inc. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + +http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. diff --git a/akka-samples/akka-sample-remote-java/activator.properties b/akka-samples/akka-sample-remote-java/activator.properties new file mode 100644 index 0000000000..7ddeb19576 --- /dev/null +++ b/akka-samples/akka-sample-remote-java/activator.properties @@ -0,0 +1,4 @@ +name=akka-sample-remote-java +title=Akka Remote Samples with Java +description=Akka Remote Samples with Java +tags=akka,remote,java,sample diff --git a/akka-samples/akka-sample-remote-java/build.sbt b/akka-samples/akka-sample-remote-java/build.sbt new file mode 100644 index 0000000000..57f9e1e148 --- /dev/null +++ b/akka-samples/akka-sample-remote-java/build.sbt @@ -0,0 +1,10 @@ +name := "akka-sample-remote-java" + +version := "1.0" + +scalaVersion := "2.10.3" + +libraryDependencies ++= Seq( + "com.typesafe.akka" %% "akka-remote" % "2.3-SNAPSHOT" +) + diff --git a/akka-samples/akka-sample-remote-java/project/build.properties b/akka-samples/akka-sample-remote-java/project/build.properties new file mode 100644 index 0000000000..0974fce44d --- /dev/null +++ b/akka-samples/akka-sample-remote-java/project/build.properties @@ -0,0 +1 @@ +sbt.version=0.13.0 diff --git a/akka-samples/akka-sample-remote-java/src/main/java/sample/remote/calculator/CalculatorActor.java b/akka-samples/akka-sample-remote-java/src/main/java/sample/remote/calculator/CalculatorActor.java new file mode 100644 index 0000000000..73a0a1451d --- /dev/null +++ b/akka-samples/akka-sample-remote-java/src/main/java/sample/remote/calculator/CalculatorActor.java @@ -0,0 +1,48 @@ +/** + * Copyright (C) 2009-2013 Typesafe Inc. + */ +package sample.remote.calculator; + +import akka.actor.UntypedActor; + +public class CalculatorActor extends UntypedActor { + @Override + public void onReceive(Object message) { + + if (message instanceof Op.Add) { + Op.Add add = (Op.Add) message; + System.out.println("Calculating " + add.getN1() + " + " + add.getN2()); + Op.AddResult result = new Op.AddResult(add.getN1(), add.getN2(), + add.getN1() + add.getN2()); + getSender().tell(result, getSelf()); + + } else if (message instanceof Op.Subtract) { + Op.Subtract subtract = (Op.Subtract) message; + System.out.println("Calculating " + subtract.getN1() + " - " + + subtract.getN2()); + Op.SubtractResult result = new Op.SubtractResult(subtract.getN1(), + subtract.getN2(), subtract.getN1() - subtract.getN2()); + getSender().tell(result, getSelf()); + + } else if (message instanceof Op.Multiply) { + Op.Multiply multiply = (Op.Multiply) message; + System.out.println("Calculating " + multiply.getN1() + " * " + + multiply.getN2()); + Op.MultiplicationResult result = new Op.MultiplicationResult( + multiply.getN1(), multiply.getN2(), multiply.getN1() + * multiply.getN2()); + getSender().tell(result, getSelf()); + + } else if (message instanceof Op.Divide) { + Op.Divide divide = (Op.Divide) message; + System.out.println("Calculating " + divide.getN1() + " / " + + divide.getN2()); + Op.DivisionResult result = new Op.DivisionResult(divide.getN1(), + divide.getN2(), divide.getN1() / divide.getN2()); + getSender().tell(result, getSelf()); + + } else { + unhandled(message); + } + } +} diff --git a/akka-samples/akka-sample-remote-java/src/main/java/sample/remote/calculator/CreationActor.java b/akka-samples/akka-sample-remote-java/src/main/java/sample/remote/calculator/CreationActor.java new file mode 100644 index 0000000000..e1638a88cb --- /dev/null +++ b/akka-samples/akka-sample-remote-java/src/main/java/sample/remote/calculator/CreationActor.java @@ -0,0 +1,36 @@ +/** + * Copyright (C) 2009-2013 Typesafe Inc. + */ +package sample.remote.calculator; + +import akka.actor.ActorRef; +import akka.actor.Props; +import akka.actor.UntypedActor; + +public class CreationActor extends UntypedActor { + + @Override + public void onReceive(Object message) throws Exception { + + if (message instanceof Op.MathOp) { + ActorRef calculator = getContext().actorOf( + Props.create(CalculatorActor.class)); + calculator.tell(message, getSelf()); + + } else if (message instanceof Op.MultiplicationResult) { + Op.MultiplicationResult result = (Op.MultiplicationResult) message; + System.out.printf("Mul result: %d * %d = %d\n", result.getN1(), + result.getN2(), result.getResult()); + getContext().stop(getSender()); + + } else if (message instanceof Op.DivisionResult) { + Op.DivisionResult result = (Op.DivisionResult) message; + System.out.printf("Div result: %.0f / %d = %.2f\n", result.getN1(), + result.getN2(), result.getResult()); + getContext().stop(getSender()); + + } else { + unhandled(message); + } + } +} diff --git a/akka-samples/akka-sample-remote-java/src/main/java/sample/remote/calculator/CreationApplication.java b/akka-samples/akka-sample-remote-java/src/main/java/sample/remote/calculator/CreationApplication.java new file mode 100644 index 0000000000..6cb34ee3ce --- /dev/null +++ b/akka-samples/akka-sample-remote-java/src/main/java/sample/remote/calculator/CreationApplication.java @@ -0,0 +1,51 @@ +/** + * Copyright (C) 2009-2013 Typesafe Inc. + */ +package sample.remote.calculator; + +import static java.util.concurrent.TimeUnit.SECONDS; +import java.util.Random; +import scala.concurrent.duration.Duration; +import akka.actor.ActorRef; +import akka.actor.ActorSystem; +import akka.actor.Props; + +import com.typesafe.config.ConfigFactory; + +public class CreationApplication { + + public static void main(String[] args) { + if (args.length == 0 || args[0].equals("CalculatorWorker")) + startRemoteWorkerSystem(); + if (args.length == 0 || args[0].equals("Creation")) + startRemoteCreationSystem(); + } + + public static void startRemoteWorkerSystem() { + ActorSystem.create("CalculatorWorkerSystem", + ConfigFactory.load(("calculator"))); + System.out.println("Started CalculatorWorkerSystem"); + } + + public static void startRemoteCreationSystem() { + final ActorSystem system = ActorSystem.create("CreationSystem", + ConfigFactory.load("remotecreation")); + final ActorRef actor = system.actorOf(Props.create(CreationActor.class), + "creationActor"); + + System.out.println("Started CreationSystem"); + final Random r = new Random(); + system.scheduler().schedule(Duration.create(1, SECONDS), + Duration.create(1, SECONDS), new Runnable() { + @Override + public void run() { + if (r.nextInt(100) % 2 == 0) { + actor.tell(new Op.Multiply(r.nextInt(100), r.nextInt(100)), null); + } else { + actor.tell(new Op.Divide(r.nextInt(10000), r.nextInt(99) + 1), + null); + } + } + }, system.dispatcher()); + } +} diff --git a/akka-samples/akka-sample-remote-java/src/main/java/sample/remote/calculator/LookupActor.java b/akka-samples/akka-sample-remote-java/src/main/java/sample/remote/calculator/LookupActor.java new file mode 100644 index 0000000000..589394fd05 --- /dev/null +++ b/akka-samples/akka-sample-remote-java/src/main/java/sample/remote/calculator/LookupActor.java @@ -0,0 +1,86 @@ +/** + * Copyright (C) 2009-2013 Typesafe Inc. + */ +package sample.remote.calculator; + +import static java.util.concurrent.TimeUnit.SECONDS; +import scala.concurrent.duration.Duration; +import akka.actor.ActorRef; +import akka.actor.ActorIdentity; +import akka.actor.Identify; +import akka.actor.Terminated; +import akka.actor.UntypedActor; +import akka.actor.ReceiveTimeout; +import akka.japi.Procedure; + +public class LookupActor extends UntypedActor { + + private final String path; + private ActorRef calculator = null; + + public LookupActor(String path) { + this.path = path; + sendIdentifyRequest(); + } + + private void sendIdentifyRequest() { + getContext().actorSelection(path).tell(new Identify(path), getSelf()); + getContext() + .system() + .scheduler() + .scheduleOnce(Duration.create(3, SECONDS), getSelf(), + ReceiveTimeout.getInstance(), getContext().dispatcher(), getSelf()); + } + + @Override + public void onReceive(Object message) throws Exception { + if (message instanceof ActorIdentity) { + calculator = ((ActorIdentity) message).getRef(); + if (calculator == null) { + System.out.println("Remote actor not available: " + path); + } else { + getContext().watch(calculator); + getContext().become(active, true); + } + + } else if (message instanceof ReceiveTimeout) { + sendIdentifyRequest(); + + } else { + System.out.println("Not ready yet"); + + } + } + + Procedure active = new Procedure() { + @Override + public void apply(Object message) { + if (message instanceof Op.MathOp) { + // send message to server actor + calculator.tell(message, getSelf()); + + } else if (message instanceof Op.AddResult) { + Op.AddResult result = (Op.AddResult) message; + System.out.printf("Add result: %d + %d = %d\n", result.getN1(), + result.getN2(), result.getResult()); + + } else if (message instanceof Op.SubtractResult) { + Op.SubtractResult result = (Op.SubtractResult) message; + System.out.printf("Sub result: %d - %d = %d\n", result.getN1(), + result.getN2(), result.getResult()); + + } else if (message instanceof Terminated) { + System.out.println("Calculator terminated"); + sendIdentifyRequest(); + getContext().unbecome(); + + } else if (message instanceof ReceiveTimeout) { + // ignore + + } else { + unhandled(message); + } + + } + }; +} diff --git a/akka-samples/akka-sample-remote-java/src/main/java/sample/remote/calculator/LookupApplication.java b/akka-samples/akka-sample-remote-java/src/main/java/sample/remote/calculator/LookupApplication.java new file mode 100644 index 0000000000..d7c0d4f57e --- /dev/null +++ b/akka-samples/akka-sample-remote-java/src/main/java/sample/remote/calculator/LookupApplication.java @@ -0,0 +1,53 @@ +/** + * Copyright (C) 2009-2013 Typesafe Inc. + */ +package sample.remote.calculator; + +import static java.util.concurrent.TimeUnit.SECONDS; +import java.util.Random; +import scala.concurrent.duration.Duration; +import akka.actor.ActorRef; +import akka.actor.ActorSystem; +import akka.actor.Props; +import com.typesafe.config.ConfigFactory; + +public class LookupApplication { + public static void main(String[] args) { + if (args.length == 0 || args[0].equals("Calculator")) + startRemoteCalculatorSystem(); + if (args.length == 0 || args[0].equals("Lookup")) + startRemoteLookupSystem(); + } + + public static void startRemoteCalculatorSystem() { + final ActorSystem system = ActorSystem.create("CalculatorSystem", + ConfigFactory.load(("calculator"))); + system.actorOf(Props.create(CalculatorActor.class), "calculator"); + System.out.println("Started CalculatorSystem"); + } + + public static void startRemoteLookupSystem() { + + final ActorSystem system = ActorSystem.create("LookupSystem", + ConfigFactory.load("remotelookup")); + final String path = "akka.tcp://CalculatorSystem@127.0.0.1:2552/user/calculator"; + final ActorRef actor = system.actorOf( + Props.create(LookupActor.class, path), "lookupActor"); + + System.out.println("Started LookupSystem"); + final Random r = new Random(); + system.scheduler().schedule(Duration.create(1, SECONDS), + Duration.create(1, SECONDS), new Runnable() { + @Override + public void run() { + if (r.nextInt(100) % 2 == 0) { + actor.tell(new Op.Add(r.nextInt(100), r.nextInt(100)), null); + } else { + actor.tell(new Op.Subtract(r.nextInt(100), r.nextInt(100)), null); + } + + } + }, system.dispatcher()); + + } +} diff --git a/akka-samples/akka-sample-remote/src/main/java/sample/remote/calculator/java/Op.java b/akka-samples/akka-sample-remote-java/src/main/java/sample/remote/calculator/Op.java similarity index 98% rename from akka-samples/akka-sample-remote/src/main/java/sample/remote/calculator/java/Op.java rename to akka-samples/akka-sample-remote-java/src/main/java/sample/remote/calculator/Op.java index 4656f58521..6bef8538dd 100644 --- a/akka-samples/akka-sample-remote/src/main/java/sample/remote/calculator/java/Op.java +++ b/akka-samples/akka-sample-remote-java/src/main/java/sample/remote/calculator/Op.java @@ -1,7 +1,7 @@ /** * Copyright (C) 2009-2013 Typesafe Inc. */ -package sample.remote.calculator.java; +package sample.remote.calculator; import java.io.Serializable; diff --git a/akka-samples/akka-sample-remote-java/src/main/resources/calculator.conf b/akka-samples/akka-sample-remote-java/src/main/resources/calculator.conf new file mode 100644 index 0000000000..948c1f2929 --- /dev/null +++ b/akka-samples/akka-sample-remote-java/src/main/resources/calculator.conf @@ -0,0 +1,6 @@ +include "common" + +akka { + # LISTEN on tcp port 2552 + remote.netty.tcp.port = 2552 +} diff --git a/akka-samples/akka-sample-remote-java/src/main/resources/common.conf b/akka-samples/akka-sample-remote-java/src/main/resources/common.conf new file mode 100644 index 0000000000..2c8f881372 --- /dev/null +++ b/akka-samples/akka-sample-remote-java/src/main/resources/common.conf @@ -0,0 +1,13 @@ +akka { + + actor { + provider = "akka.remote.RemoteActorRefProvider" + } + + remote { + netty.tcp { + hostname = "127.0.0.1" + } + } + +} diff --git a/akka-samples/akka-sample-remote-java/src/main/resources/remotecreation.conf b/akka-samples/akka-sample-remote-java/src/main/resources/remotecreation.conf new file mode 100644 index 0000000000..76292f999f --- /dev/null +++ b/akka-samples/akka-sample-remote-java/src/main/resources/remotecreation.conf @@ -0,0 +1,13 @@ +include "common" + +akka { + actor { + deployment { + "/creationActor/*" { + remote = "akka.tcp://CalculatorWorkerSystem@127.0.0.1:2552" + } + } + } + + remote.netty.tcp.port = 2554 +} diff --git a/akka-samples/akka-sample-remote-java/src/main/resources/remotelookup.conf b/akka-samples/akka-sample-remote-java/src/main/resources/remotelookup.conf new file mode 100644 index 0000000000..336f557a08 --- /dev/null +++ b/akka-samples/akka-sample-remote-java/src/main/resources/remotelookup.conf @@ -0,0 +1,5 @@ +include "common" + +akka { + remote.netty.tcp.port = 2553 +} diff --git a/akka-samples/akka-sample-remote-java/tutorial/index.html b/akka-samples/akka-sample-remote-java/tutorial/index.html new file mode 100644 index 0000000000..3051741cae --- /dev/null +++ b/akka-samples/akka-sample-remote-java/tutorial/index.html @@ -0,0 +1,256 @@ + + +Akka Remote Samples with Java + + + + +
+

+In order to showcase the remote capabilities of Akka +we thought a remote calculator could do the trick. +This sample demonstrates both remote deployment and look-up of remote actors. +

+
+ +
+

Lookup Remote Actors

+

+This sample involves two actor systems. +

+ +
    +
  • CalculatorSystem listens on port 2552 and starts one actor, the +CalculatorActor +that provides a service for arithmetic operations.
  • +
  • LookupSystem listens on port 2553 and starts one actor, the +LookupActor +that sends operations to the remote calculator service.
  • +
+ +

+Open LookupApplication.java. +

+ +

+There you see how the two actor systems and actors are started. In this first step they are running in the same JVM process, +but you can run them in separate processes as described later. Note that this changes nothing in the configuration or implementation. +

+ +

+The two actor systems use different configuration, which is where the listen port is defined. +The CalculatorSystem uses calculator.conf +and the LookupSystem uses remotelookup.conf. +

+ +

+Note that the configuration files also import the +common.conf. +This enables the remoting by installing the RemoteActorRefProvider and chooses the default remote transport. +Be sure to replace the default IP 127.0.0.1 with the real address the system is reachable +by if you deploy onto multiple machines! +

+ +

+The CalculatorActor +does not illustrate anything exciting. More interesting is the +LookupActor. +It takes a String path as constructor parameter. This is the full path, including the remote +address of the calculator service. Observe how the actor system name of the path matches the remote system’s +name, as do IP and port number. Top-level actors are always created below the "/user" guardian, which supervises them. +

+ +

+"akka.tcp://CalculatorSystem@127.0.0.1:2552/user/calculator"
+
+ +

+First it sends an Identify message to the actor selection of the path. +The remote calculator actor will reply with ActorIdentity containing its ActorRef. +Identify is a built-in message that all Actors will understand and automatically reply to with a +ActorIdentity. If the identification fails it will be retried after the scheduled timeout +by the LookupActor. +

+ +

+Note how none of the code is specific to remoting, this also applies when talking to a local actor which +might terminate and be recreated. That is what we call Location Transparency. +

+ +

+Once it has the ActorRef of the remote service it can watch it. The remote system +might be shutdown and later started up again, then Terminated is received on the watching +side and it can retry the identification to establish a connection to the new remote system. +

+ +
+ +
+

Run the Lookup Sample

+ +

+To run this sample, go to the Run +tab, and start the application main class sample.remote.calculator.LookupApplication if it is not already started. +

+ +

+In the log pane you should see something like: +

+ +

+Started LookupSystem
+Calculating 74 - 42
+Sub result: 74 - 42 = 32
+Calculating 15 + 71
+Add result: 15 + 71 = 86
+
+ +

+The two actor systems are running in the same JVM process. It can be more interesting to run them in separate +processes. Stop the application in the Run tab and then open two +terminal windows. +

+ +

+Start the CalculatorSystem in the first terminal window with the following command (on one line): +

+ +

+<path to activator dir>/activator 
+  "run-main sample.remote.calculator.LookupApplication Calculator"		
+
+ +

+Start the LookupSystem in the second terminal window with the following command (on one line): +

+ +

+<path to activator dir>/activator 
+  "run-main sample.remote.calculator.LookupApplication Lookup"		
+
+ +

+Thereafter you can try to shutdown the CalculatorSystem in the first terminal window with +'ctrl-c' and then start it again. In the second terminal window you should see the +failure detection and then how the successful calculation results are logged again when it has +established a connection to the new system. +

+ +
+ +
+

Create Remote Actors

+

+This sample involves two actor systems. +

+ +
    +
  • CalculatorWorkerSystem listens on port 2552
  • +
  • CreationSystem listens on port 2554 and starts one actor, the +CreationActor +that creates remote calculator worker actors in the CalculatorWorkerSystem and sends operations to them.
  • +
+ +

+Open CreationApplication.java. +

+ +

+There you see how the two actor systems and actors are started. In this first step they are running in the same JVM process, +but you can run them in separate processes as described later. +

+ +

+The two actor systems use different configuration, which is where the listen port is defined. +The CalculatorWorkerSystem uses calculator.conf +and the CreationSystem uses remotecreation.conf. +

+ +

+Note that the configuration files also import the +common.conf. +This enables the remoting by installing the RemoteActorRefProvider and chooses the default remote transport. +Be sure to replace the default IP 127.0.0.1 with the real address the system is reachable +by if you deploy onto multiple machines! +

+ +

+The CreationActor +creates a child CalculatorActor +for each incoming MathOp message. The +configuration contains a deployment section that +matches these child actors and defines that the actors are to be deployed at the remote system. The wildcard (*) is needed +because the child actors are created with unique anonymous names. +

+ +

+akka.actor.deployment {
+  /creationActor/* {
+    remote = "akka.tcp://CalculatorWorkerSystem@127.0.0.1:2552"
+  }
+}
+
+ +

+Error handling, i.e. supervision, works exactly in the same way as if the child actor was a local child actor. +In addtion, in case of network failures or JVM crash the child will be terminated and automatically removed +from the parent even though they are located on different machines. +

+ +
+ +
+

Run the Creation Sample

+ +

+To run this sample, go to the Run +tab, and start the application main class sample.remote.calculator.CreationApplication if it is not already started. +

+ +

+In the log pane you should see something like: +

+ +

+Started CreationSystem
+Calculating 7135 / 62
+Div result: 7135 / 62 = 115.08
+Calculating 0 * 9
+Mul result: 0 * 9 = 0
+
+ +

+The two actor systems are running in the same JVM process. It can be more interesting to run them in separate +processes. Stop the application in the Run tab and then open two +terminal windows. +

+ +

+Start the CalculatorWorkerSystem in the first terminal window with the following command (on one line): +

+ +

+<path to activator dir>/activator 
+  "run-main sample.remote.calculator.CreationApplication CalculatorWorker"		
+
+ +

+Start the CreationSystem in the second terminal window with the following command (on one line): +

+ +

+<path to activator dir>/activator 
+  "run-main sample.remote.calculator.CreationApplication Creation"		
+
+ +

+Thereafter you can try to shutdown the CalculatorWorkerSystem in the first terminal window with +'ctrl-c' and then start it again. In the second terminal window you should see the +failure detection and then how the successful calculation results are logged again when it has +established a connection to the new system. +

+ +
+ + + diff --git a/akka-samples/akka-sample-remote-scala/.gitignore b/akka-samples/akka-sample-remote-scala/.gitignore new file mode 100644 index 0000000000..660c959e44 --- /dev/null +++ b/akka-samples/akka-sample-remote-scala/.gitignore @@ -0,0 +1,17 @@ +*# +*.iml +*.ipr +*.iws +*.pyc +*.tm.epoch +*.vim +*-shim.sbt +.idea/ +/project/plugins/project +project/boot +target/ +/logs +.cache +.classpath +.project +.settings \ No newline at end of file diff --git a/akka-samples/akka-sample-remote-scala/LICENSE b/akka-samples/akka-sample-remote-scala/LICENSE new file mode 100644 index 0000000000..a02154466b --- /dev/null +++ b/akka-samples/akka-sample-remote-scala/LICENSE @@ -0,0 +1,13 @@ +Copyright 2013 Typesafe, Inc. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + +http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. diff --git a/akka-samples/akka-sample-remote-scala/activator.properties b/akka-samples/akka-sample-remote-scala/activator.properties new file mode 100644 index 0000000000..aab920960a --- /dev/null +++ b/akka-samples/akka-sample-remote-scala/activator.properties @@ -0,0 +1,4 @@ +name=akka-sample-remote-scala +title=Akka Remote Samples with Scala +description=Akka Remote Samples with Scala +tags=akka,remote,scala,sample diff --git a/akka-samples/akka-sample-remote-scala/build.sbt b/akka-samples/akka-sample-remote-scala/build.sbt new file mode 100644 index 0000000000..e81d7c337d --- /dev/null +++ b/akka-samples/akka-sample-remote-scala/build.sbt @@ -0,0 +1,10 @@ +name := "akka-sample-remote-scala" + +version := "1.0" + +scalaVersion := "2.10.3" + +libraryDependencies ++= Seq( + "com.typesafe.akka" %% "akka-remote" % "2.3-SNAPSHOT" +) + diff --git a/akka-samples/akka-sample-remote-scala/project/build.properties b/akka-samples/akka-sample-remote-scala/project/build.properties new file mode 100644 index 0000000000..0974fce44d --- /dev/null +++ b/akka-samples/akka-sample-remote-scala/project/build.properties @@ -0,0 +1 @@ +sbt.version=0.13.0 diff --git a/akka-samples/akka-sample-remote-scala/src/main/resources/calculator.conf b/akka-samples/akka-sample-remote-scala/src/main/resources/calculator.conf new file mode 100644 index 0000000000..948c1f2929 --- /dev/null +++ b/akka-samples/akka-sample-remote-scala/src/main/resources/calculator.conf @@ -0,0 +1,6 @@ +include "common" + +akka { + # LISTEN on tcp port 2552 + remote.netty.tcp.port = 2552 +} diff --git a/akka-samples/akka-sample-remote-scala/src/main/resources/common.conf b/akka-samples/akka-sample-remote-scala/src/main/resources/common.conf new file mode 100644 index 0000000000..2c8f881372 --- /dev/null +++ b/akka-samples/akka-sample-remote-scala/src/main/resources/common.conf @@ -0,0 +1,13 @@ +akka { + + actor { + provider = "akka.remote.RemoteActorRefProvider" + } + + remote { + netty.tcp { + hostname = "127.0.0.1" + } + } + +} diff --git a/akka-samples/akka-sample-remote-scala/src/main/resources/remotecreation.conf b/akka-samples/akka-sample-remote-scala/src/main/resources/remotecreation.conf new file mode 100644 index 0000000000..76292f999f --- /dev/null +++ b/akka-samples/akka-sample-remote-scala/src/main/resources/remotecreation.conf @@ -0,0 +1,13 @@ +include "common" + +akka { + actor { + deployment { + "/creationActor/*" { + remote = "akka.tcp://CalculatorWorkerSystem@127.0.0.1:2552" + } + } + } + + remote.netty.tcp.port = 2554 +} diff --git a/akka-samples/akka-sample-remote-scala/src/main/resources/remotelookup.conf b/akka-samples/akka-sample-remote-scala/src/main/resources/remotelookup.conf new file mode 100644 index 0000000000..336f557a08 --- /dev/null +++ b/akka-samples/akka-sample-remote-scala/src/main/resources/remotelookup.conf @@ -0,0 +1,5 @@ +include "common" + +akka { + remote.netty.tcp.port = 2553 +} diff --git a/akka-samples/akka-sample-remote-scala/src/main/scala/sample/remote/calculator/CalculatorActor.scala b/akka-samples/akka-sample-remote-scala/src/main/scala/sample/remote/calculator/CalculatorActor.scala new file mode 100644 index 0000000000..4428d39c3e --- /dev/null +++ b/akka-samples/akka-sample-remote-scala/src/main/scala/sample/remote/calculator/CalculatorActor.scala @@ -0,0 +1,25 @@ +/** + * Copyright (C) 2009-2013 Typesafe Inc. + */ +package sample.remote.calculator + +import akka.actor.Props +import akka.actor.Actor + +class CalculatorActor extends Actor { + def receive = { + case Add(n1, n2) => + println("Calculating %d + %d".format(n1, n2)) + sender ! AddResult(n1, n2, n1 + n2) + case Subtract(n1, n2) => + println("Calculating %d - %d".format(n1, n2)) + sender ! SubtractResult(n1, n2, n1 - n2) + case Multiply(n1, n2) => + println("Calculating %d * %d".format(n1, n2)) + sender ! MultiplicationResult(n1, n2, n1 * n2) + case Divide(n1, n2) => + println("Calculating %.0f / %d".format(n1, n2)) + sender ! DivisionResult(n1, n2, n1 / n2) + } +} + diff --git a/akka-samples/akka-sample-remote-scala/src/main/scala/sample/remote/calculator/CreationActor.scala b/akka-samples/akka-sample-remote-scala/src/main/scala/sample/remote/calculator/CreationActor.scala new file mode 100644 index 0000000000..b0705efd4c --- /dev/null +++ b/akka-samples/akka-sample-remote-scala/src/main/scala/sample/remote/calculator/CreationActor.scala @@ -0,0 +1,25 @@ +/** + * Copyright (C) 2009-2013 Typesafe Inc. + */ +package sample.remote.calculator + +import akka.actor.Actor +import akka.actor.ActorRef +import akka.actor.Props + +class CreationActor extends Actor { + + def receive = { + case op: MathOp => + val calculator = context.actorOf(Props[CalculatorActor]) + calculator ! op + case result: MathResult => result match { + case MultiplicationResult(n1, n2, r) => + printf("Mul result: %d * %d = %d\n", n1, n2, r) + context.stop(sender) + case DivisionResult(n1, n2, r) => + printf("Div result: %.0f / %d = %.2f\n", n1, n2, r) + context.stop(sender) + } + } +} diff --git a/akka-samples/akka-sample-remote-scala/src/main/scala/sample/remote/calculator/CreationApplication.scala b/akka-samples/akka-sample-remote-scala/src/main/scala/sample/remote/calculator/CreationApplication.scala new file mode 100644 index 0000000000..3bafb3e485 --- /dev/null +++ b/akka-samples/akka-sample-remote-scala/src/main/scala/sample/remote/calculator/CreationApplication.scala @@ -0,0 +1,41 @@ +/** + * Copyright (C) 2009-2013 Typesafe Inc. + */ +package sample.remote.calculator + +import scala.concurrent.duration._ +import com.typesafe.config.ConfigFactory +import scala.util.Random +import akka.actor.ActorSystem +import akka.actor.Props + +object CreationApplication { + def main(args: Array[String]): Unit = { + if (args.isEmpty || args.head == "CalculatorWorker") + startRemoteWorkerSystem() + if (args.isEmpty || args.head == "Creation") + startRemoteCreationSystem() + } + + def startRemoteWorkerSystem(): Unit = { + ActorSystem("CalculatorWorkerSystem", ConfigFactory.load("calculator")) + println("Started CalculatorWorkerSystem") + } + + def startRemoteCreationSystem(): Unit = { + val system = + ActorSystem("CreationSystem", ConfigFactory.load("remotecreation")) + val actor = system.actorOf(Props[CreationActor], + name = "creationActor") + + println("Started CreationSystem") + import system.dispatcher + system.scheduler.schedule(1.second, 1.second) { + if (Random.nextInt(100) % 2 == 0) + actor ! Multiply(Random.nextInt(20), Random.nextInt(20)) + else + actor ! Divide(Random.nextInt(10000), (Random.nextInt(99) + 1)) + } + + } +} diff --git a/akka-samples/akka-sample-remote-scala/src/main/scala/sample/remote/calculator/LookupActor.scala b/akka-samples/akka-sample-remote-scala/src/main/scala/sample/remote/calculator/LookupActor.scala new file mode 100644 index 0000000000..2c4ecc6ae0 --- /dev/null +++ b/akka-samples/akka-sample-remote-scala/src/main/scala/sample/remote/calculator/LookupActor.scala @@ -0,0 +1,51 @@ +/** + * Copyright (C) 2009-2013 Typesafe Inc. + */ +package sample.remote.calculator + +import scala.concurrent.duration._ +import akka.actor.Actor +import akka.actor.ActorIdentity +import akka.actor.ActorRef +import akka.actor.Identify +import akka.actor.ReceiveTimeout +import akka.actor.Terminated + +class LookupActor(path: String) extends Actor { + + sendIdentifyRequest() + + def sendIdentifyRequest(): Unit = { + context.actorSelection(path) ! Identify(path) + import context.dispatcher + context.system.scheduler.scheduleOnce(3.seconds, self, ReceiveTimeout) + } + + def receive = identifying + + def identifying: Actor.Receive = { + case ActorIdentity(`path`, Some(actor)) => + context.watch(actor) + context.become(active(actor)) + case ActorIdentity(`path`, None) => println(s"Remote actor not available: $path") + case ReceiveTimeout => sendIdentifyRequest() + case _ => println("Not ready yet") + } + + def active(actor: ActorRef): Actor.Receive = { + case op: MathOp => actor ! op + case result: MathResult => result match { + case AddResult(n1, n2, r) => + printf("Add result: %d + %d = %d\n", n1, n2, r) + case SubtractResult(n1, n2, r) => + printf("Sub result: %d - %d = %d\n", n1, n2, r) + } + case Terminated(`actor`) => + println("Calculator terminated") + sendIdentifyRequest() + context.become(identifying) + case ReceiveTimeout => + // ignore + + } +} diff --git a/akka-samples/akka-sample-remote-scala/src/main/scala/sample/remote/calculator/LookupApplication.scala b/akka-samples/akka-sample-remote-scala/src/main/scala/sample/remote/calculator/LookupApplication.scala new file mode 100644 index 0000000000..f2fc03e1de --- /dev/null +++ b/akka-samples/akka-sample-remote-scala/src/main/scala/sample/remote/calculator/LookupApplication.scala @@ -0,0 +1,44 @@ +/** + * Copyright (C) 2009-2013 Typesafe Inc. + */ +package sample.remote.calculator + +import scala.concurrent.duration._ +import scala.util.Random +import com.typesafe.config.ConfigFactory +import akka.actor.ActorSystem +import akka.actor.Props + +object LookupApplication { + def main(args: Array[String]): Unit = { + if (args.isEmpty || args.head == "Calculator") + startRemoteCalculatorSystem() + if (args.isEmpty || args.head == "Lookup") + startRemoteLookupSystem() + } + + def startRemoteCalculatorSystem(): Unit = { + val system = ActorSystem("CalculatorSystem", + ConfigFactory.load("calculator")) + system.actorOf(Props[CalculatorActor], "calculator") + + println("Started CalculatorSystem - waiting for messages") + } + + def startRemoteLookupSystem(): Unit = { + val system = + ActorSystem("LookupSystem", ConfigFactory.load("remotelookup")) + val remotePath = + "akka.tcp://CalculatorSystem@127.0.0.1:2552/user/calculator" + val actor = system.actorOf(Props(classOf[LookupActor], remotePath), "lookupActor") + + println("Started LookupSystem") + import system.dispatcher + system.scheduler.schedule(1.second, 1.second) { + if (Random.nextInt(100) % 2 == 0) + actor ! Add(Random.nextInt(100), Random.nextInt(100)) + else + actor ! Subtract(Random.nextInt(100), Random.nextInt(100)) + } + } +} diff --git a/akka-samples/akka-sample-remote/src/main/scala/sample/remote/calculator/MathOp.scala b/akka-samples/akka-sample-remote-scala/src/main/scala/sample/remote/calculator/MathOp.scala similarity index 66% rename from akka-samples/akka-sample-remote/src/main/scala/sample/remote/calculator/MathOp.scala rename to akka-samples/akka-sample-remote-scala/src/main/scala/sample/remote/calculator/MathOp.scala index bd92530d98..5cb1d10490 100644 --- a/akka-samples/akka-sample-remote/src/main/scala/sample/remote/calculator/MathOp.scala +++ b/akka-samples/akka-sample-remote-scala/src/main/scala/sample/remote/calculator/MathOp.scala @@ -3,8 +3,6 @@ */ package sample.remote.calculator -import akka.actor.Actor - trait MathOp case class Add(nbr1: Int, nbr2: Int) extends MathOp @@ -25,13 +23,3 @@ case class MultiplicationResult(nbr1: Int, nbr2: Int, result: Int) extends MathR case class DivisionResult(nbr1: Double, nbr2: Int, result: Double) extends MathResult -class AdvancedCalculatorActor extends Actor { - def receive = { - case Multiply(n1, n2) ⇒ - println("Calculating %d * %d".format(n1, n2)) - sender ! MultiplicationResult(n1, n2, n1 * n2) - case Divide(n1, n2) ⇒ - println("Calculating %.0f / %d".format(n1, n2)) - sender ! DivisionResult(n1, n2, n1 / n2) - } -} diff --git a/akka-samples/akka-sample-remote-scala/tutorial/index.html b/akka-samples/akka-sample-remote-scala/tutorial/index.html new file mode 100644 index 0000000000..0c22f1949a --- /dev/null +++ b/akka-samples/akka-sample-remote-scala/tutorial/index.html @@ -0,0 +1,256 @@ + + +Akka Remote Samples with Scala + + + + +
+

+In order to showcase the remote capabilities of Akka +we thought a remote calculator could do the trick. +This sample demonstrates both remote deployment and look-up of remote actors. +

+
+ +
+

Lookup Remote Actors

+

+This sample involves two actor systems. +

+ +
    +
  • CalculatorSystem listens on port 2552 and starts one actor, the +CalculatorActor +that provides a service for arithmetic operations.
  • +
  • LookupSystem listens on port 2553 and starts one actor, the +LookupActor +that sends operations to the remote calculator service.
  • +
+ +

+Open LookupApplication.scala. +

+ +

+There you see how the two actor systems and actors are started. In this first step they are running in the same JVM process, +but you can run them in separate processes as described later. Note that this changes nothing in the configuration or implementation. +

+ +

+The two actor systems use different configuration, which is where the listen port is defined. +The CalculatorSystem uses calculator.conf +and the LookupSystem uses remotelookup.conf. +

+ +

+Note that the configuration files also import the +common.conf. +This enables the remoting by installing the RemoteActorRefProvider and chooses the default remote transport. +Be sure to replace the default IP 127.0.0.1 with the real address the system is reachable +by if you deploy onto multiple machines! +

+ +

+The CalculatorActor +does not illustrate anything exciting. More interesting is the +LookupActor. +It takes a String path as constructor parameter. This is the full path, including the remote +address of the calculator service. Observe how the actor system name of the path matches the remote system’s +name, as do IP and port number. Top-level actors are always created below the "/user" guardian, which supervises them. +

+ +

+"akka.tcp://CalculatorSystem@127.0.0.1:2552/user/calculator"
+
+ +

+First it sends an Identify message to the actor selection of the path. +The remote calculator actor will reply with ActorIdentity containing its ActorRef. +Identify is a built-in message that all Actors will understand and automatically reply to with a +ActorIdentity. If the identification fails it will be retried after the scheduled timeout +by the LookupActor. +

+ +

+Note how none of the code is specific to remoting, this also applies when talking to a local actor which +might terminate and be recreated. That is what we call Location Transparency. +

+ +

+Once it has the ActorRef of the remote service it can watch it. The remote system +might be shutdown and later started up again, then Terminated is received on the watching +side and it can retry the identification to establish a connection to the new remote system. +

+ +
+ +
+

Run the Lookup Sample

+ +

+To run this sample, go to the Run +tab, and start the application main class sample.remote.calculator.LookupApplication if it is not already started. +

+ +

+In the log pane you should see something like: +

+ +

+Started LookupSystem
+Calculating 74 - 42
+Sub result: 74 - 42 = 32
+Calculating 15 + 71
+Add result: 15 + 71 = 86
+
+ +

+The two actor systems are running in the same JVM process. It can be more interesting to run them in separate +processes. Stop the application in the Run tab and then open two +terminal windows. +

+ +

+Start the CalculatorSystem in the first terminal window with the following command (on one line): +

+ +

+<path to activator dir>/activator 
+  "run-main sample.remote.calculator.LookupApplication Calculator"		
+
+ +

+Start the LookupSystem in the second terminal window with the following command (on one line): +

+ +

+<path to activator dir>/activator 
+  "run-main sample.remote.calculator.LookupApplication Lookup"		
+
+ +

+Thereafter you can try to shutdown the CalculatorSystem in the first terminal window with +'ctrl-c' and then start it again. In the second terminal window you should see the +failure detection and then how the successful calculation results are logged again when it has +established a connection to the new system. +

+ +
+ +
+

Create Remote Actors

+

+This sample involves two actor systems. +

+ +
    +
  • CalculatorWorkerSystem listens on port 2552
  • +
  • CreationSystem listens on port 2554 and starts one actor, the +CreationActor +that creates remote calculator worker actors in the CalculatorWorkerSystem and sends operations to them.
  • +
+ +

+Open CreationApplication.scala. +

+ +

+There you see how the two actor systems and actors are started. In this first step they are running in the same JVM process, +but you can run them in separate processes as described later. +

+ +

+The two actor systems use different configuration, which is where the listen port is defined. +The CalculatorWorkerSystem uses calculator.conf +and the CreationSystem uses remotecreation.conf. +

+ +

+Note that the configuration files also import the +common.conf. +This enables the remoting by installing the RemoteActorRefProvider and chooses the default remote transport. +Be sure to replace the default IP 127.0.0.1 with the real address the system is reachable +by if you deploy onto multiple machines! +

+ +

+The CreationActor +creates a child CalculatorActor +for each incoming MathOp message. The +configuration contains a deployment section that +matches these child actors and defines that the actors are to be deployed at the remote system. The wildcard (*) is needed +because the child actors are created with unique anonymous names. +

+ +

+akka.actor.deployment {
+  /creationActor/* {
+    remote = "akka.tcp://CalculatorWorkerSystem@127.0.0.1:2552"
+  }
+}
+
+ +

+Error handling, i.e. supervision, works exactly in the same way as if the child actor was a local child actor. +In addtion, in case of network failures or JVM crash the child will be terminated and automatically removed +from the parent even though they are located on different machines. +

+ +
+ +
+

Run the Creation Sample

+ +

+To run this sample, go to the Run +tab, and start the application main class sample.remote.calculator.CreationApplication if it is not already started. +

+ +

+In the log pane you should see something like: +

+ +

+Started CreationSystem
+Calculating 7135 / 62
+Div result: 7135 / 62 = 115.08
+Calculating 0 * 9
+Mul result: 0 * 9 = 0
+
+ +

+The two actor systems are running in the same JVM process. It can be more interesting to run them in separate +processes. Stop the application in the Run tab and then open two +terminal windows. +

+ +

+Start the CalculatorWorkerSystem in the first terminal window with the following command (on one line): +

+ +

+<path to activator dir>/activator 
+  "run-main sample.remote.calculator.CreationApplication CalculatorWorker"		
+
+ +

+Start the CreationSystem in the second terminal window with the following command (on one line): +

+ +

+<path to activator dir>/activator 
+  "run-main sample.remote.calculator.CreationApplication Creation"		
+
+ +

+Thereafter you can try to shutdown the CalculatorWorkerSystem in the first terminal window with +'ctrl-c' and then start it again. In the second terminal window you should see the +failure detection and then how the successful calculation results are logged again when it has +established a connection to the new system. +

+ +
+ + + diff --git a/akka-samples/akka-sample-remote/README.rst b/akka-samples/akka-sample-remote/README.rst deleted file mode 100644 index 6cf7e85178..0000000000 --- a/akka-samples/akka-sample-remote/README.rst +++ /dev/null @@ -1,247 +0,0 @@ -REMOTE CALCULATOR -================= - -Requirements ------------- - -To build and run remote calculator you need [Simple Build Tool][sbt] (sbt). - -The Sample Explained --------------------- - -In order to showcase the remote capabilities of Akka 2.0 we thought a remote calculator could do the trick. - -There are two implementations of the sample; one in Scala and one in Java. -The explanation below is for Scala, but everything is similar in Java except that the class names begin with a ``J``, -e.g. ``JCalcApp`` instead of ``CalcApp``, and that the Java classes reside in another package structure. - -There are three actor systems used in the sample: - -* CalculatorApplication : the actor system performing the number crunching (ie: the server) -* LookupApplication : illustrates how to look up an actor on a remote node and communicate with that actor (ie: as a client) -* CreationApplication : illustrates how to create an actor on a remote node and how to communicate with that actor (ie: as a client) - -The CalculatorApplication contains an actor, SimpleCalculatorActor, which can handle simple math operations such as -addition and subtraction. The JVM this actor is run on is connected to and then this actor is looked up and used from the LookupApplication. - -The CreationApplication wants to use more "advanced" mathematical operations, such as multiplication and division, -but as the CalculatorApplication does not have any actor that can perform those type of calculations the -CreationApplication has to remote deploy an actor that can (which in our case is AdvancedCalculatorActor). -So this actor is deployed, over the network, onto the CalculatorApplication actor system and thereafter the -CreationApplication will send messages to it. - -It is important to point out that as the actor systems run on different ports it is possible to run all three in parallel. -See the next section for more information of how to run the sample application. - -Running -------- - -In order to run all three actor systems you have to start SBT in three different terminal windows. - -We start off by running the CalculatorApplication: - -First type 'sbt' to start SBT interactively, the run 'update' and 'run': -> cd $AKKA_HOME - -> sbt - -> project akka-sample-remote - -> run - -Select to run "sample.remote.calculator.CalcApp" which in the case below is number 3: - - Multiple main classes detected, select one to run: - - [1] sample.remote.calculator.LookupApp - [2] sample.remote.calculator.CreationApp - [3] sample.remote.calculator.java.JCreationApp - [4] sample.remote.calculator.CalcApp - [5] sample.remote.calculator.java.JCalcApp - [6] sample.remote.calculator.java.JLookupApp - - Enter number: 4 - -You should see something similar to this:: - - [info] Running sample.remote.calculator.CalcApp - [INFO] [01/25/2013 15:02:51.355] [run-main] [Remoting] Starting remoting - [INFO] [01/25/2013 15:02:52.121] [run-main] [Remoting] Remoting started; listening on addresses :[akka.tcp://CalculatorApplication@127.0.0.1:2552] - Started Calculator Application - waiting for messages - -Open up a new terminal window and run SBT once more: - -> sbt - -> project akka-sample-remote - -> run - -Select to run "sample.remote.calculator.LookupApp" which in the case below is number 1:: - - Multiple main classes detected, select one to run: - - [1] sample.remote.calculator.LookupApp - [2] sample.remote.calculator.CreationApp - [3] sample.remote.calculator.java.JCreationApp - [4] sample.remote.calculator.CalcApp - [5] sample.remote.calculator.java.JCalcApp - [6] sample.remote.calculator.java.JLookupApp - - Enter number: 1 - -Now you should see something like this:: - - [info] Running sample.remote.calculator.LookupApp - [INFO] [01/25/2013 15:05:53.954] [run-main] [Remoting] Starting remoting - [INFO] [01/25/2013 15:05:54.769] [run-main] [Remoting] Remoting started; listening on addresses :[akka.tcp://LookupApplication@127.0.0.1:2553] - Started Lookup Application - Not ready yet - Not ready yet - Add result: 0 + 22 = 22 - Add result: 41 + 71 = 112 - Add result: 61 + 14 = 75 - Add result: 77 + 82 = 159 - -Congrats! You have now successfully looked up a remote actor and communicated with it. -The next step is to have an actor deployed on a remote note. -Once more you should open a new terminal window and run SBT: - -> sbt - -> project akka-sample-remote - -> run - -Select to run "sample.remote.calculator.CreationApp" which in the case below is number 2:: - - Multiple main classes detected, select one to run: - - [1] sample.remote.calculator.LookupApp - [2] sample.remote.calculator.CreationApp - [3] sample.remote.calculator.java.JCreationApp - [4] sample.remote.calculator.CalcApp - [5] sample.remote.calculator.java.JCalcApp - [6] sample.remote.calculator.java.JLookupApp - - Enter number: 2 - -Now you should see something like this:: - - [info] Running sample.remote.calculator.CreationApp - [INFO] [01/14/2013 15:08:08.890] [run-main] [Remoting] Starting remoting - Started Creation Application - Mul result: 15 * 12 = 180 - Div result: 3840 / 10 = 384,00 - Mul result: 1 * 5 = 5 - Div result: 3240 / 45 = 72,00 - -That's it! - - -Secure Cookie Handshake ------------------------ - -An improvement that can be made is to have the CalculatorApplication verify that a known trusted actor is connecting to -it. This can be done using the 'Secure Cookie Handshake' mechanism. An example of enabling this is in the common.conf -file and looks as follows: - - # Uncomment the following four lines to employ the 'secure cookie handshake' - # This requires the client to have the known secure-cookie and properly - # transmit it to the server upon connection. Because both the client and server - # programs use this common.conf file, they will both have the cookie - #remote { - # secure-cookie = "0009090D040C030E03070D0509020F050B080400" - # require-cookie = on - #} - -In order to enable Secure Cookie Handshake, simply remove the #s from the 4 relevant lines as follows: - - # Uncomment the following four lines to employ the 'secure cookie handshake' - # This requires the client to have the known secure-cookie and properly - # transmit it to the server upon connection. Because both the client and server - # programs use this common.conf file, they will both have the cookie - remote { - secure-cookie = "0009090D040C030E03070D0509020F050B080400" - require-cookie = on - } - -Your CalculatorApplication actor will now verify the 'authenticity' of your LookupApp actor and the CreationApp actor. - -In order to test that an invalid secure-cookie is rejected, you can simply do the following: - -Select to run "sample.remote.calculator.CalcApp" which in the case below is number 3: - - Multiple main classes detected, select one to run: - - [1] sample.remote.calculator.LookupApp - [2] sample.remote.calculator.CreationApp - [3] sample.remote.calculator.java.JCreationApp - [4] sample.remote.calculator.CalcApp - [5] sample.remote.calculator.java.JCalcApp - [6] sample.remote.calculator.java.JLookupApp - - Enter number: 4 - -You should see something similar to this:: - - [info] Running sample.remote.calculator.CalcApp - [INFO] [01/25/2013 15:02:51.355] [run-main] [Remoting] Starting remoting - [INFO] [01/25/2013 15:02:52.121] [run-main] [Remoting] Remoting started; listening on addresses :[akka.tcp://CalculatorApplication@127.0.0.1:2552] - Started Calculator Application - waiting for messages - - - -Now edit the common.conf file to alter the value of secure-cookie. - - -Now, in a separate terminal, run the following: - -> sbt - -> project akka-sample-remote - -> run - -Select to run "sample.remote.calculator.LookupApp" which in the case below is number 1:: - - Multiple main classes detected, select one to run: - - [1] sample.remote.calculator.LookupApp - [2] sample.remote.calculator.CreationApp - [3] sample.remote.calculator.java.JCreationApp - [4] sample.remote.calculator.CalcApp - [5] sample.remote.calculator.java.JCalcApp - [6] sample.remote.calculator.java.JLookupApp - - Enter number: 1 - -Now you should see something like this:: - - - [info] Running sample.remote.calculator.LookupApp - [INFO] [07/26/2013 16:27:31.265] [run-main] [Remoting] Starting remoting - [INFO] [07/26/2013 16:27:31.489] [run-main] [Remoting] Remoting started; listening on addresses :[akka.tcp://LookupApplication@127.0.0.1:2553] - [INFO] [07/26/2013 16:27:31.492] [run-main] [Remoting] Remoting now listens on addresses: [akka.tcp://LookupApplication@127.0.0.1:2553] - Started Lookup Application - Not ready yet - [ERROR] [07/26/2013 16:27:31.691] [LookupApplication-akka.actor.default-dispatcher-2] [akka://LookupApplication/system/endpointManager/reliableEndpointWriter-akka.tcp%3A%2F%2FCalculatorApplication%40127.0.0.1%3A2552-0/endpointWriter] AssociationError [akka.tcp://LookupApplication@127.0.0.1:2553] -> [akka.tcp://CalculatorApplication@127.0.0.1:2552]: Error [Association failed with [akka.tcp://CalculatorApplication@127.0.0.1:2552]] [ - akka.remote.EndpointAssociationException: Association failed with [akka.tcp://CalculatorApplication@127.0.0.1:2552] - Caused by: akka.remote.transport.AkkaProtocolException: The remote system explicitly disassociated (reason unknown). - ] - -You can see that the client LookupApp was unable to connect to the CalculatorApplication's AKKA system. - - -Notice ------- - -The sample application is just that, i.e. a sample. Parts of it are not the way you would do a "real" application. -Some improvements are: - - remove all hard coded addresses from the code as they reduce the flexibility of how and - where the application can be run. We leave this to the astute reader to refine the sample into a real-world app. - - handle the akka.remote.EndpointAssociationException in the case of failed Secure Cookie Handshake - - -* `Akka `_ -* `SBT `_ diff --git a/akka-samples/akka-sample-remote/src/main/java/sample/remote/calculator/java/JAdvancedCalculatorActor.java b/akka-samples/akka-sample-remote/src/main/java/sample/remote/calculator/java/JAdvancedCalculatorActor.java deleted file mode 100644 index adb5581657..0000000000 --- a/akka-samples/akka-sample-remote/src/main/java/sample/remote/calculator/java/JAdvancedCalculatorActor.java +++ /dev/null @@ -1,34 +0,0 @@ -/** - * Copyright (C) 2009-2013 Typesafe Inc. - */ -package sample.remote.calculator.java; - -import akka.actor.UntypedActor; - -//#actor -public class JAdvancedCalculatorActor extends UntypedActor { - @Override - public void onReceive(Object message) throws Exception { - - if (message instanceof Op.Multiply) { - Op.Multiply multiply = (Op.Multiply) message; - System.out.println("Calculating " + multiply.getN1() + " * " - + multiply.getN2()); - getSender().tell( - new Op.MultiplicationResult(multiply.getN1(), multiply.getN2(), - multiply.getN1() * multiply.getN2()), getSelf()); - - } else if (message instanceof Op.Divide) { - Op.Divide divide = (Op.Divide) message; - System.out.println("Calculating " + divide.getN1() + " / " - + divide.getN2()); - getSender().tell( - new Op.DivisionResult(divide.getN1(), divide.getN2(), divide.getN1() - / divide.getN2()), getSelf()); - - } else { - unhandled(message); - } - } -} -// #actor diff --git a/akka-samples/akka-sample-remote/src/main/java/sample/remote/calculator/java/JCalcApp.java b/akka-samples/akka-sample-remote/src/main/java/sample/remote/calculator/java/JCalcApp.java deleted file mode 100644 index 89ecf70238..0000000000 --- a/akka-samples/akka-sample-remote/src/main/java/sample/remote/calculator/java/JCalcApp.java +++ /dev/null @@ -1,13 +0,0 @@ -/** - * Copyright (C) 2009-2013 Typesafe Inc. - */ -package sample.remote.calculator.java; - -public class JCalcApp { - - public static void main(String[] args) { - JCalculatorApplication app = new JCalculatorApplication(); - System.out.println("Started Calculator Application - waiting for messages"); - } - -} diff --git a/akka-samples/akka-sample-remote/src/main/java/sample/remote/calculator/java/JCalculatorApplication.java b/akka-samples/akka-sample-remote/src/main/java/sample/remote/calculator/java/JCalculatorApplication.java deleted file mode 100644 index 2ea8d2a4bf..0000000000 --- a/akka-samples/akka-sample-remote/src/main/java/sample/remote/calculator/java/JCalculatorApplication.java +++ /dev/null @@ -1,32 +0,0 @@ -/** - * Copyright (C) 2009-2013 Typesafe Inc. - */ -package sample.remote.calculator.java; - -import akka.actor.ActorRef; -import akka.actor.ActorSystem; -import akka.actor.Props; -import akka.kernel.Bootable; -import com.typesafe.config.ConfigFactory; - -//#setup -public class JCalculatorApplication implements Bootable { - private ActorSystem system; - - public JCalculatorApplication() { - system = ActorSystem.create("CalculatorApplication", ConfigFactory.load() - .getConfig("calculator")); - ActorRef actor = system.actorOf(Props.create(JSimpleCalculatorActor.class), - "simpleCalculator"); - } - - @Override - public void startup() { - } - - @Override - public void shutdown() { - system.shutdown(); - } -} -// #setup diff --git a/akka-samples/akka-sample-remote/src/main/java/sample/remote/calculator/java/JCreationActor.java b/akka-samples/akka-sample-remote/src/main/java/sample/remote/calculator/java/JCreationActor.java deleted file mode 100644 index cadb0abde1..0000000000 --- a/akka-samples/akka-sample-remote/src/main/java/sample/remote/calculator/java/JCreationActor.java +++ /dev/null @@ -1,40 +0,0 @@ -/** - * Copyright (C) 2009-2013 Typesafe Inc. - */ -package sample.remote.calculator.java; - -import akka.actor.ActorRef; -import akka.actor.UntypedActor; - -//#actor -public class JCreationActor extends UntypedActor { - - private final ActorRef remoteActor; - - public JCreationActor(ActorRef remoteActor) { - this.remoteActor = remoteActor; - } - - @Override - public void onReceive(Object message) throws Exception { - - if (message instanceof Op.MathOp) { - // send message to server actor - remoteActor.tell(message, getSelf()); - - } else if (message instanceof Op.MultiplicationResult) { - Op.MultiplicationResult result = (Op.MultiplicationResult) message; - System.out.printf("Mul result: %d * %d = %d\n", - result.getN1(), result.getN2(), result.getResult()); - - } else if (message instanceof Op.DivisionResult) { - Op.DivisionResult result = (Op.DivisionResult) message; - System.out.printf("Div result: %.0f / %d = %.2f\n", - result.getN1(), result.getN2(), result.getResult()); - - } else { - unhandled(message); - } - } -} -//#actor diff --git a/akka-samples/akka-sample-remote/src/main/java/sample/remote/calculator/java/JCreationApp.java b/akka-samples/akka-sample-remote/src/main/java/sample/remote/calculator/java/JCreationApp.java deleted file mode 100644 index fd5ef8c2dc..0000000000 --- a/akka-samples/akka-sample-remote/src/main/java/sample/remote/calculator/java/JCreationApp.java +++ /dev/null @@ -1,26 +0,0 @@ -/** - * Copyright (C) 2009-2013 Typesafe Inc. - */ -package sample.remote.calculator.java; - -import java.util.Random; - -public class JCreationApp { - public static void main(String[] args) { - JCreationApplication app = new JCreationApplication(); - System.out.println("Started Creation Application"); - Random r = new Random(); - while (true) { - if (r.nextInt(100) % 2 == 0) { - app.doSomething(new Op.Multiply(r.nextInt(100), r.nextInt(100))); - } else { - app.doSomething(new Op.Divide(r.nextInt(10000), r.nextInt(99) + 1)); - } - - try { - Thread.sleep(200); - } catch (InterruptedException e) { - } - } - } -} diff --git a/akka-samples/akka-sample-remote/src/main/java/sample/remote/calculator/java/JCreationApplication.java b/akka-samples/akka-sample-remote/src/main/java/sample/remote/calculator/java/JCreationApplication.java deleted file mode 100644 index 8a46b25410..0000000000 --- a/akka-samples/akka-sample-remote/src/main/java/sample/remote/calculator/java/JCreationApplication.java +++ /dev/null @@ -1,41 +0,0 @@ -/** - * Copyright (C) 2009-2013 Typesafe Inc. - */ -package sample.remote.calculator.java; - -import akka.actor.ActorRef; -import akka.actor.ActorSystem; -import akka.actor.Props; -import akka.actor.UntypedActor; -import akka.kernel.Bootable; -import com.typesafe.config.ConfigFactory; - -//#setup -public class JCreationApplication implements Bootable { - private ActorSystem system; - private ActorRef actor; - - public JCreationApplication() { - system = ActorSystem.create("CreationApplication", ConfigFactory.load() - .getConfig("remotecreation")); - final ActorRef remoteActor = system.actorOf(Props.create( - JAdvancedCalculatorActor.class), "advancedCalculator"); - actor = system.actorOf(Props.create(JCreationActor.class, remoteActor), - "creationActor"); - - } - - public void doSomething(Op.MathOp mathOp) { - actor.tell(mathOp, null); - } - - @Override - public void startup() { - } - - @Override - public void shutdown() { - system.shutdown(); - } -} -//#setup diff --git a/akka-samples/akka-sample-remote/src/main/java/sample/remote/calculator/java/JLookupActor.java b/akka-samples/akka-sample-remote/src/main/java/sample/remote/calculator/java/JLookupActor.java deleted file mode 100644 index f77607a1c3..0000000000 --- a/akka-samples/akka-sample-remote/src/main/java/sample/remote/calculator/java/JLookupActor.java +++ /dev/null @@ -1,58 +0,0 @@ -/** - * Copyright (C) 2009-2013 Typesafe Inc. - */ -package sample.remote.calculator.java; - -import akka.actor.ActorRef; -import akka.actor.ActorIdentity; -import akka.actor.Identify; -import akka.actor.UntypedActor; -import akka.actor.ReceiveTimeout; - -//#actor -public class JLookupActor extends UntypedActor { - - private final String path; - private ActorRef remoteActor = null; - - public JLookupActor(String path) { - this.path = path; - sendIdentifyRequest(); - } - - private void sendIdentifyRequest() { - getContext().actorSelection(path).tell(new Identify(path), getSelf()); - } - - @Override - public void onReceive(Object message) throws Exception { - - if (message instanceof ActorIdentity) { - remoteActor = ((ActorIdentity) message).getRef(); - - } else if (message.equals(ReceiveTimeout.getInstance())) { - sendIdentifyRequest(); - - } else if (remoteActor == null) { - System.out.println("Not ready yet"); - - } else if (message instanceof Op.MathOp) { - // send message to server actor - remoteActor.tell(message, getSelf()); - - } else if (message instanceof Op.AddResult) { - Op.AddResult result = (Op.AddResult) message; - System.out.printf("Add result: %d + %d = %d\n", result.getN1(), - result.getN2(), result.getResult()); - - } else if (message instanceof Op.SubtractResult) { - Op.SubtractResult result = (Op.SubtractResult) message; - System.out.printf("Sub result: %d - %d = %d\n", result.getN1(), - result.getN2(), result.getResult()); - - } else { - unhandled(message); - } - } -} -//#actor diff --git a/akka-samples/akka-sample-remote/src/main/java/sample/remote/calculator/java/JLookupApp.java b/akka-samples/akka-sample-remote/src/main/java/sample/remote/calculator/java/JLookupApp.java deleted file mode 100644 index 614f90798b..0000000000 --- a/akka-samples/akka-sample-remote/src/main/java/sample/remote/calculator/java/JLookupApp.java +++ /dev/null @@ -1,26 +0,0 @@ -/** - * Copyright (C) 2009-2013 Typesafe Inc. - */ -package sample.remote.calculator.java; - -import java.util.Random; - -public class JLookupApp { - public static void main(String[] args) { - JLookupApplication app = new JLookupApplication(); - System.out.println("Started Lookup Application"); - Random r = new Random(); - while (true) { - if (r.nextInt(100) % 2 == 0) { - app.doSomething(new Op.Add(r.nextInt(100), r.nextInt(100))); - } else { - app.doSomething(new Op.Subtract(r.nextInt(100), r.nextInt(100))); - } - - try { - Thread.sleep(200); - } catch (InterruptedException e) { - } - } - } -} diff --git a/akka-samples/akka-sample-remote/src/main/java/sample/remote/calculator/java/JLookupApplication.java b/akka-samples/akka-sample-remote/src/main/java/sample/remote/calculator/java/JLookupApplication.java deleted file mode 100644 index c1398a1469..0000000000 --- a/akka-samples/akka-sample-remote/src/main/java/sample/remote/calculator/java/JLookupApplication.java +++ /dev/null @@ -1,41 +0,0 @@ -/** - * Copyright (C) 2009-2013 Typesafe Inc. - */ -package sample.remote.calculator.java; - -//#imports -import akka.actor.ActorRef; -import akka.actor.ActorSystem; -import akka.actor.Props; -import akka.actor.UntypedActor; -import akka.kernel.Bootable; -import com.typesafe.config.ConfigFactory; -//#imports - -//#setup -public class JLookupApplication implements Bootable { - private ActorSystem system; - private ActorRef actor; - - public JLookupApplication() { - system = ActorSystem.create("LookupApplication", ConfigFactory.load().getConfig( - "remotelookup")); - final String path = - "akka.tcp://CalculatorApplication@127.0.0.1:2552/user/simpleCalculator"; - actor = system.actorOf(Props.create(JLookupActor.class, path), "lookupActor"); - } - - public void doSomething(Op.MathOp mathOp) { - actor.tell(mathOp, null); - } - - @Override - public void startup() { - } - - @Override - public void shutdown() { - system.shutdown(); - } -} -// #setup diff --git a/akka-samples/akka-sample-remote/src/main/java/sample/remote/calculator/java/JSimpleCalculatorActor.java b/akka-samples/akka-sample-remote/src/main/java/sample/remote/calculator/java/JSimpleCalculatorActor.java deleted file mode 100644 index c9e42ed697..0000000000 --- a/akka-samples/akka-sample-remote/src/main/java/sample/remote/calculator/java/JSimpleCalculatorActor.java +++ /dev/null @@ -1,33 +0,0 @@ -/** - * Copyright (C) 2009-2013 Typesafe Inc. - */ -package sample.remote.calculator.java; - -import akka.actor.UntypedActor; - -//#actor -public class JSimpleCalculatorActor extends UntypedActor { - @Override - public void onReceive(Object message) { - - if (message instanceof Op.Add) { - Op.Add add = (Op.Add) message; - System.out.println("Calculating " + add.getN1() + " + " + add.getN2()); - getSender().tell(new Op.AddResult( - add.getN1(), add.getN2(), add.getN1() + add.getN2()), - getSelf()); - - } else if (message instanceof Op.Subtract) { - Op.Subtract subtract = (Op.Subtract) message; - System.out.println("Calculating " + subtract.getN1() + " - " + - subtract.getN2()); - getSender().tell(new Op.SubtractResult( - subtract.getN1(), subtract.getN2(), subtract.getN1() - subtract.getN2()), - getSelf()); - - } else { - unhandled(message); - } - } -} -// #actor diff --git a/akka-samples/akka-sample-remote/src/main/resources/application.conf b/akka-samples/akka-sample-remote/src/main/resources/application.conf deleted file mode 100644 index 8d38f04e42..0000000000 --- a/akka-samples/akka-sample-remote/src/main/resources/application.conf +++ /dev/null @@ -1,38 +0,0 @@ -//#calculator -calculator { - include "common" - - akka { - # LISTEN on tcp port 2552 - remote.netty.tcp.port = 2552 - } -} -//#calculator - -//#remotelookup -remotelookup { - include "common" - - akka { - remote.netty.tcp.port = 2553 - } -} -//#remotelookup - -//#remotecreation -remotecreation { - include "common" - - akka { - actor { - deployment { - /advancedCalculator { - remote = "akka.tcp://CalculatorApplication@127.0.0.1:2552" - } - } - } - - remote.netty.tcp.port = 2554 - } -} -//#remotecreation diff --git a/akka-samples/akka-sample-remote/src/main/resources/common.conf b/akka-samples/akka-sample-remote/src/main/resources/common.conf deleted file mode 100644 index 3431218a8d..0000000000 --- a/akka-samples/akka-sample-remote/src/main/resources/common.conf +++ /dev/null @@ -1,21 +0,0 @@ -akka { - - actor { - provider = "akka.remote.RemoteActorRefProvider" - } - - remote { - netty.tcp { - hostname = "127.0.0.1" - } - } - - # Uncomment the following four lines to employ the 'secure cookie handshake' - # This requires the client to have the known secure-cookie and properly - # transmit it to the server upon connection. Because both the client and server - # programs use this common.conf file, they will both have the cookie - #remote { - # secure-cookie = "0009090D040C030E03070D0509020F050B080400" - # require-cookie = on - #} -} diff --git a/akka-samples/akka-sample-remote/src/main/scala/sample/remote/calculator/CalculatorApplication.scala b/akka-samples/akka-sample-remote/src/main/scala/sample/remote/calculator/CalculatorApplication.scala deleted file mode 100644 index f87d259268..0000000000 --- a/akka-samples/akka-sample-remote/src/main/scala/sample/remote/calculator/CalculatorApplication.scala +++ /dev/null @@ -1,47 +0,0 @@ -/** - * Copyright (C) 2009-2013 Typesafe Inc. - */ -package sample.remote.calculator - -/* - * comments like //# are there for inclusion into docs, please don’t remove - */ - -import akka.kernel.Bootable -import akka.actor.{ Props, Actor, ActorSystem } -import com.typesafe.config.ConfigFactory - -//#actor -class SimpleCalculatorActor extends Actor { - def receive = { - case Add(n1, n2) ⇒ - println("Calculating %d + %d".format(n1, n2)) - sender ! AddResult(n1, n2, n1 + n2) - case Subtract(n1, n2) ⇒ - println("Calculating %d - %d".format(n1, n2)) - sender ! SubtractResult(n1, n2, n1 - n2) - } -} -//#actor - -class CalculatorApplication extends Bootable { - //#setup - val system = ActorSystem("CalculatorApplication", - ConfigFactory.load.getConfig("calculator")) - val actor = system.actorOf(Props[SimpleCalculatorActor], "simpleCalculator") - //#setup - - def startup() { - } - - def shutdown() { - system.shutdown() - } -} - -object CalcApp { - def main(args: Array[String]) { - new CalculatorApplication - println("Started Calculator Application - waiting for messages") - } -} diff --git a/akka-samples/akka-sample-remote/src/main/scala/sample/remote/calculator/CreationApplication.scala b/akka-samples/akka-sample-remote/src/main/scala/sample/remote/calculator/CreationApplication.scala deleted file mode 100644 index c6f8cbb6c4..0000000000 --- a/akka-samples/akka-sample-remote/src/main/scala/sample/remote/calculator/CreationApplication.scala +++ /dev/null @@ -1,63 +0,0 @@ -/** - * Copyright (C) 2009-2013 Typesafe Inc. - */ -package sample.remote.calculator - -/* - * comments like //# are there for inclusion into docs, please don’t remove - */ - -import akka.kernel.Bootable -import com.typesafe.config.ConfigFactory -import scala.util.Random -import akka.actor._ - -class CreationApplication extends Bootable { - //#setup - val system = - ActorSystem("RemoteCreation", ConfigFactory.load.getConfig("remotecreation")) - val remoteActor = system.actorOf(Props[AdvancedCalculatorActor], - name = "advancedCalculator") - val localActor = system.actorOf(Props(classOf[CreationActor], remoteActor), - name = "creationActor") - - def doSomething(op: MathOp): Unit = - localActor ! op - //#setup - - def startup() { - } - - def shutdown() { - system.shutdown() - } -} - -//#actor -class CreationActor(remoteActor: ActorRef) extends Actor { - def receive = { - case op: MathOp ⇒ remoteActor ! op - case result: MathResult ⇒ result match { - case MultiplicationResult(n1, n2, r) ⇒ - printf("Mul result: %d * %d = %d\n", n1, n2, r) - case DivisionResult(n1, n2, r) ⇒ - printf("Div result: %.0f / %d = %.2f\n", n1, n2, r) - } - } -} -//#actor - -object CreationApp { - def main(args: Array[String]) { - val app = new CreationApplication - println("Started Creation Application") - while (true) { - if (Random.nextInt(100) % 2 == 0) - app.doSomething(Multiply(Random.nextInt(20), Random.nextInt(20))) - else - app.doSomething(Divide(Random.nextInt(10000), (Random.nextInt(99) + 1))) - - Thread.sleep(200) - } - } -} diff --git a/akka-samples/akka-sample-remote/src/main/scala/sample/remote/calculator/LookupApplication.scala b/akka-samples/akka-sample-remote/src/main/scala/sample/remote/calculator/LookupApplication.scala deleted file mode 100644 index 86024cc9b9..0000000000 --- a/akka-samples/akka-sample-remote/src/main/scala/sample/remote/calculator/LookupApplication.scala +++ /dev/null @@ -1,83 +0,0 @@ -/** - * Copyright (C) 2009-2013 Typesafe Inc. - */ -package sample.remote.calculator - -/* - * comments like //# are there for inclusion into docs, please don’t remove - */ - -import scala.util.Random -import scala.concurrent.duration._ -import com.typesafe.config.ConfigFactory -import akka.actor.{ ActorRef, Props, Actor, ActorSystem } -import akka.actor.Identify -import akka.actor.ActorIdentity -import akka.kernel.Bootable -import akka.actor.ReceiveTimeout -//#imports - -class LookupApplication extends Bootable { - //#setup - val system = - ActorSystem("LookupApplication", ConfigFactory.load.getConfig("remotelookup")) - val remotePath = - "akka.tcp://CalculatorApplication@127.0.0.1:2552/user/simpleCalculator" - val actor = system.actorOf(Props(classOf[LookupActor], remotePath), "lookupActor") - - def doSomething(op: MathOp): Unit = - actor ! op - //#setup - - def startup() { - } - - def shutdown() { - system.shutdown() - } -} - -//#actor -class LookupActor(path: String) extends Actor { - - context.setReceiveTimeout(3.seconds) - sendIdentifyRequest() - - def sendIdentifyRequest(): Unit = - context.actorSelection(path) ! Identify(path) - - def receive = { - case ActorIdentity(`path`, Some(actor)) ⇒ - context.setReceiveTimeout(Duration.Undefined) - context.become(active(actor)) - case ActorIdentity(`path`, None) ⇒ println(s"Remote actor not availible: $path") - case ReceiveTimeout ⇒ sendIdentifyRequest() - case _ ⇒ println("Not ready yet") - } - - def active(actor: ActorRef): Actor.Receive = { - case op: MathOp ⇒ actor ! op - case result: MathResult ⇒ result match { - case AddResult(n1, n2, r) ⇒ - printf("Add result: %d + %d = %d\n", n1, n2, r) - case SubtractResult(n1, n2, r) ⇒ - printf("Sub result: %d - %d = %d\n", n1, n2, r) - } - } -} -//#actor - -object LookupApp { - def main(args: Array[String]) { - val app = new LookupApplication - println("Started Lookup Application") - while (true) { - if (Random.nextInt(100) % 2 == 0) - app.doSomething(Add(Random.nextInt(100), Random.nextInt(100))) - else - app.doSomething(Subtract(Random.nextInt(100), Random.nextInt(100))) - - Thread.sleep(200) - } - } -} diff --git a/project/ActivatorDist.scala b/project/ActivatorDist.scala new file mode 100644 index 0000000000..7d6298859f --- /dev/null +++ b/project/ActivatorDist.scala @@ -0,0 +1,49 @@ +package akka + +import sbt._ +import sbt.Keys._ +import sbt.classpath.ClasspathUtilities +import sbt.Project.Initialize +import java.io.File + +object ActivatorDist { + + val activatorDistDirectory = SettingKey[File]("activator-dist-directory") + val activatorDist = TaskKey[File]("activator-dist", "Create a zipped distribution of each activator sample.") + + lazy val settings: Seq[Setting[_]] = Seq( + activatorDistDirectory <<= crossTarget / "activator-dist", + activatorDist <<= activatorDistTask + ) + + def aggregatedProjects(projectRef: ProjectRef, structure: Load.BuildStructure): Seq[ProjectRef] = { + val aggregate = Project.getProject(projectRef, structure).toSeq.flatMap(_.aggregate) + aggregate flatMap { ref => + ref +: aggregatedProjects(ref, structure) + } + } + + def activatorDistTask: Initialize[Task[File]] = { + (thisProjectRef, baseDirectory, activatorDistDirectory, version, buildStructure, streams) map { + (project, projectBase, activatorDistDirectory, version, structure, s) => { + val allProjects = aggregatedProjects(project, structure).flatMap(p => Project.getProject(p, structure)) + val rootGitignoreLines = IO.readLines(AkkaBuild.akka.base / ".gitignore") + for (p <- allProjects) { + val localGitignoreLines = if ((p.base / ".gitignore").exists) IO.readLines(p.base / ".gitignore") else Nil + val gitignorePathFinder = (".gitignore" :: localGitignoreLines ::: rootGitignoreLines).foldLeft(PathFinder.empty)( + (acc, x) => acc +++ (p.base * x)) + val filteredPathFinder = (p.base * "*") --- gitignorePathFinder + for (f <- filteredPathFinder.get) { + val target = activatorDistDirectory / p.id / f.name + println("copy: " + target) + IO.copyDirectory(f, target, overwrite = true, preserveLastModified = true) + } + Dist.zip(activatorDistDirectory / p.id, activatorDistDirectory / (p.id + "-" + version + ".zip")) + } + + activatorDistDirectory + } + } + } + +} diff --git a/project/AkkaBuild.scala b/project/AkkaBuild.scala index b73358c725..d76946a623 100644 --- a/project/AkkaBuild.scala +++ b/project/AkkaBuild.scala @@ -70,11 +70,9 @@ object AkkaBuild extends Build { S3.host in S3.upload := "downloads.typesafe.com.s3.amazonaws.com", S3.progress in S3.upload := true, mappings in S3.upload <<= (Release.releaseDirectory, version) map { (d, v) => - def distMapping(extension: String): (File, String) = { - val file = d / "downloads" / ("akka-" + v + "." + extension) - file -> ("akka/" + file.getName) - } - Seq(distMapping("zip"), distMapping("tgz")) + val downloads = d / "downloads" + val archivesPathFinder = (downloads * ("*" + v + ".zip")) +++ (downloads * ("*" + v + ".tgz")) + archivesPathFinder.get.map(file => (file -> ("akka/" + file.getName))) } ), @@ -151,7 +149,7 @@ object AkkaBuild extends Build { lazy val actor = Project( id = "akka-actor", base = file("akka-actor"), - settings = defaultSettings ++ scaladocSettings ++ javadocSettings ++ Seq( + settings = defaultSettings ++ formatSettings ++ scaladocSettings ++ javadocSettings ++ Seq( // to fix scaladoc generation fullClasspath in doc in Compile <<= fullClasspath in Compile, libraryDependencies ++= Dependencies.actor, @@ -168,7 +166,7 @@ object AkkaBuild extends Build { id = "akka-dataflow", base = file("akka-dataflow"), dependencies = Seq(testkit % "test->test"), - settings = defaultSettings ++ scaladocSettings ++ OSGi.dataflow ++ cpsPlugin ++ Seq( + settings = defaultSettings ++ formatSettings ++ scaladocSettings ++ OSGi.dataflow ++ cpsPlugin ++ Seq( previousArtifact := akkaPreviousArtifact("akka-dataflow") ) ) @@ -177,7 +175,7 @@ object AkkaBuild extends Build { id = "akka-testkit", base = file("akka-testkit"), dependencies = Seq(actor), - settings = defaultSettings ++ scaladocSettings ++ javadocSettings ++ OSGi.testkit ++ Seq( + settings = defaultSettings ++ formatSettings ++ scaladocSettings ++ javadocSettings ++ OSGi.testkit ++ Seq( libraryDependencies ++= Dependencies.testkit, initialCommands += "import akka.testkit._", previousArtifact := akkaPreviousArtifact("akka-testkit") @@ -188,7 +186,7 @@ object AkkaBuild extends Build { id = "akka-actor-tests", base = file("akka-actor-tests"), dependencies = Seq(testkit % "compile;test->test"), - settings = defaultSettings ++ scaladocSettings ++ Seq( + settings = defaultSettings ++ formatSettings ++ scaladocSettings ++ Seq( publishArtifact in Compile := false, libraryDependencies ++= Dependencies.actorTests, testOptions += Tests.Argument(TestFrameworks.JUnit, "-v", "-a"), @@ -200,7 +198,7 @@ object AkkaBuild extends Build { id = "akka-remote", base = file("akka-remote"), dependencies = Seq(actor, actorTests % "test->test", testkit % "test->test"), - settings = defaultSettings ++ scaladocSettings ++ javadocSettings ++ OSGi.remote ++ Seq( + settings = defaultSettings ++ formatSettings ++ scaladocSettings ++ javadocSettings ++ OSGi.remote ++ Seq( libraryDependencies ++= Dependencies.remote, // disable parallel tests parallelExecution in Test := false, @@ -212,7 +210,7 @@ object AkkaBuild extends Build { id = "akka-multi-node-testkit", base = file("akka-multi-node-testkit"), dependencies = Seq(remote, testkit), - settings = defaultSettings ++ scaladocSettings ++ javadocSettings ++ Seq( + settings = defaultSettings ++ formatSettings ++ scaladocSettings ++ javadocSettings ++ Seq( previousArtifact := akkaPreviousArtifact("akka-multi-node-testkit") ) ) @@ -221,7 +219,7 @@ object AkkaBuild extends Build { id = "akka-remote-tests", base = file("akka-remote-tests"), dependencies = Seq(actorTests % "test->test", multiNodeTestkit), - settings = defaultSettings ++ scaladocSettings ++ multiJvmSettings ++ Seq( + settings = defaultSettings ++ formatSettings ++ scaladocSettings ++ multiJvmSettings ++ Seq( libraryDependencies ++= Dependencies.remoteTests, // disable parallel tests parallelExecution in Test := false, @@ -238,7 +236,7 @@ object AkkaBuild extends Build { id = "akka-cluster", base = file("akka-cluster"), dependencies = Seq(remote, remoteTests % "test->test" , testkit % "test->test"), - settings = defaultSettings ++ scaladocSettings ++ javadocSettings ++ multiJvmSettings ++ OSGi.cluster ++ Seq( + settings = defaultSettings ++ formatSettings ++ scaladocSettings ++ javadocSettings ++ multiJvmSettings ++ OSGi.cluster ++ Seq( libraryDependencies ++= Dependencies.cluster, // disable parallel tests parallelExecution in Test := false, @@ -254,7 +252,7 @@ object AkkaBuild extends Build { id = "akka-slf4j", base = file("akka-slf4j"), dependencies = Seq(actor, testkit % "test->test"), - settings = defaultSettings ++ scaladocSettings ++ javadocSettings ++ OSGi.slf4j ++ Seq( + settings = defaultSettings ++ formatSettings ++ scaladocSettings ++ javadocSettings ++ OSGi.slf4j ++ Seq( libraryDependencies ++= Dependencies.slf4j, previousArtifact := akkaPreviousArtifact("akka-slf4j") ) @@ -264,7 +262,7 @@ object AkkaBuild extends Build { id = "akka-agent", base = file("akka-agent"), dependencies = Seq(actor, testkit % "test->test"), - settings = defaultSettings ++ scaladocSettings ++ javadocSettings ++ OSGi.agent ++ Seq( + settings = defaultSettings ++ formatSettings ++ scaladocSettings ++ javadocSettings ++ OSGi.agent ++ Seq( libraryDependencies ++= Dependencies.agent, previousArtifact := akkaPreviousArtifact("akka-agent") ) @@ -274,7 +272,7 @@ object AkkaBuild extends Build { id = "akka-transactor", base = file("akka-transactor"), dependencies = Seq(actor, testkit % "test->test"), - settings = defaultSettings ++ scaladocSettings ++ javadocSettings ++ OSGi.transactor ++ Seq( + settings = defaultSettings ++ formatSettings ++ scaladocSettings ++ javadocSettings ++ OSGi.transactor ++ Seq( libraryDependencies ++= Dependencies.transactor, previousArtifact := akkaPreviousArtifact("akka-transactor") ) @@ -284,7 +282,7 @@ object AkkaBuild extends Build { id = "akka-persistence-experimental", base = file("akka-persistence"), dependencies = Seq(actor, remote % "test->test", testkit % "test->test"), - settings = defaultSettings ++ scaladocSettings ++ experimentalSettings ++ javadocSettings ++ OSGi.persistence ++ Seq( + settings = defaultSettings ++ formatSettings ++ scaladocSettings ++ experimentalSettings ++ javadocSettings ++ OSGi.persistence ++ Seq( fork in Test := true, libraryDependencies ++= Dependencies.persistence, previousArtifact := akkaPreviousArtifact("akka-persistence") @@ -304,7 +302,7 @@ object AkkaBuild extends Build { id = "akka-mailboxes-common", base = file("akka-durable-mailboxes/akka-mailboxes-common"), dependencies = Seq(remote, testkit % "compile;test->test"), - settings = defaultSettings ++ scaladocSettings ++ javadocSettings ++ OSGi.mailboxesCommon ++ Seq( + settings = defaultSettings ++ formatSettings ++ scaladocSettings ++ javadocSettings ++ OSGi.mailboxesCommon ++ Seq( libraryDependencies ++= Dependencies.mailboxes, previousArtifact := akkaPreviousArtifact("akka-mailboxes-common"), publishArtifact in Test := true @@ -315,7 +313,7 @@ object AkkaBuild extends Build { id = "akka-file-mailbox", base = file("akka-durable-mailboxes/akka-file-mailbox"), dependencies = Seq(mailboxesCommon % "compile;test->test", testkit % "test"), - settings = defaultSettings ++ scaladocSettings ++ javadocSettings ++ OSGi.fileMailbox ++ Seq( + settings = defaultSettings ++ formatSettings ++ scaladocSettings ++ javadocSettings ++ OSGi.fileMailbox ++ Seq( libraryDependencies ++= Dependencies.fileMailbox, previousArtifact := akkaPreviousArtifact("akka-file-mailbox") ) @@ -325,7 +323,7 @@ object AkkaBuild extends Build { id = "akka-zeromq", base = file("akka-zeromq"), dependencies = Seq(actor, testkit % "test;test->test"), - settings = defaultSettings ++ scaladocSettings ++ javadocSettings ++ OSGi.zeroMQ ++ Seq( + settings = defaultSettings ++ formatSettings ++ scaladocSettings ++ javadocSettings ++ OSGi.zeroMQ ++ Seq( libraryDependencies ++= Dependencies.zeroMQ, previousArtifact := akkaPreviousArtifact("akka-zeromq") ) @@ -335,7 +333,7 @@ object AkkaBuild extends Build { id = "akka-kernel", base = file("akka-kernel"), dependencies = Seq(actor, testkit % "test->test"), - settings = defaultSettings ++ scaladocSettings ++ javadocSettings ++ Seq( + settings = defaultSettings ++ formatSettings ++ scaladocSettings ++ javadocSettings ++ Seq( libraryDependencies ++= Dependencies.kernel, previousArtifact := akkaPreviousArtifact("akka-kernel") ) @@ -345,7 +343,7 @@ object AkkaBuild extends Build { id = "akka-camel", base = file("akka-camel"), dependencies = Seq(actor, slf4j, testkit % "test->test"), - settings = defaultSettings ++ scaladocSettings ++ javadocSettings ++ OSGi.camel ++ Seq( + settings = defaultSettings ++ formatSettings ++ scaladocSettings ++ javadocSettings ++ OSGi.camel ++ Seq( libraryDependencies ++= Dependencies.camel, testOptions += Tests.Argument(TestFrameworks.JUnit, "-v", "-a"), previousArtifact := akkaPreviousArtifact("akka-camel") @@ -406,7 +404,7 @@ object AkkaBuild extends Build { id = "akka-osgi", base = file("akka-osgi"), dependencies = Seq(actor), - settings = defaultSettings ++ scaladocSettings ++ javadocSettings ++ OSGi.osgi ++ Seq( + settings = defaultSettings ++ formatSettings ++ scaladocSettings ++ javadocSettings ++ OSGi.osgi ++ Seq( libraryDependencies ++= Dependencies.osgi, cleanFiles <+= baseDirectory { base => base / "src/main/resources" } , ActorOsgiConfigurationReference <<= ActorOsgiConfigurationReferenceAction(projects.filter(p => !p.id.contains("test") && !p.id.contains("sample"))), @@ -421,7 +419,7 @@ object AkkaBuild extends Build { id = "akka-osgi-aries", base = file("akka-osgi-aries"), dependencies = Seq(osgi % "compile;test->test"), - settings = defaultSettings ++ scaladocSettings ++ javadocSettings ++ OSGi.osgiAries ++ Seq( + settings = defaultSettings ++ formatSettings ++ scaladocSettings ++ javadocSettings ++ OSGi.osgiAries ++ Seq( libraryDependencies ++= Dependencies.osgiAries, parallelExecution in Test := false, reportBinaryIssues := () // disable bin comp check @@ -431,7 +429,7 @@ object AkkaBuild extends Build { lazy val akkaSbtPlugin = Project( id = "akka-sbt-plugin", base = file("akka-sbt-plugin"), - settings = defaultSettings ++ Seq( + settings = defaultSettings ++ formatSettings ++ Seq( sbtPlugin := true, publishMavenStyle := false, // SBT Plugins should be published as Ivy publishTo <<= Publish.akkaPluginPublishTo, @@ -445,13 +443,23 @@ object AkkaBuild extends Build { lazy val samples = Project( id = "akka-samples", base = file("akka-samples"), - settings = parentSettings, - aggregate = Seq(camelSample, fsmSample, helloSample, helloKernelSample, remoteSample, persistenceSample, clusterSample, multiNodeSample, osgiDiningHakkersSample) + settings = parentSettings ++ ActivatorDist.settings, + aggregate = Seq(camelSampleJava, camelSampleScala, mainSampleJava, mainSampleScala, + remoteSampleJava, remoteSampleScala, clusterSampleJava, clusterSampleScala, + fsmSample, persistenceSample, + multiNodeSample, helloKernelSample, osgiDiningHakkersSample) ) - lazy val camelSample = Project( - id = "akka-sample-camel", - base = file("akka-samples/akka-sample-camel"), + lazy val camelSampleJava = Project( + id = "akka-sample-camel-java", + base = file("akka-samples/akka-sample-camel-java"), + dependencies = Seq(actor, camel), + settings = sampleSettings ++ Seq(libraryDependencies ++= Dependencies.camelSample) + ) + + lazy val camelSampleScala = Project( + id = "akka-sample-camel-scala", + base = file("akka-samples/akka-sample-camel-scala"), dependencies = Seq(actor, camel), settings = sampleSettings ++ Seq(libraryDependencies ++= Dependencies.camelSample) ) @@ -463,9 +471,16 @@ object AkkaBuild extends Build { settings = sampleSettings ) - lazy val helloSample = Project( - id = "akka-sample-hello", - base = file("akka-samples/akka-sample-hello"), + lazy val mainSampleJava = Project( + id = "akka-sample-main-java", + base = file("akka-samples/akka-sample-main-java"), + dependencies = Seq(actor), + settings = sampleSettings + ) + + lazy val mainSampleScala = Project( + id = "akka-sample-main-scala", + base = file("akka-samples/akka-sample-main-scala"), dependencies = Seq(actor), settings = sampleSettings ) @@ -477,10 +492,17 @@ object AkkaBuild extends Build { settings = sampleSettings ) - lazy val remoteSample = Project( - id = "akka-sample-remote", - base = file("akka-samples/akka-sample-remote"), - dependencies = Seq(actor, remote, kernel), + lazy val remoteSampleJava = Project( + id = "akka-sample-remote-java", + base = file("akka-samples/akka-sample-remote-java"), + dependencies = Seq(actor, remote), + settings = sampleSettings + ) + + lazy val remoteSampleScala = Project( + id = "akka-sample-remote-scala", + base = file("akka-samples/akka-sample-remote-scala"), + dependencies = Seq(actor, remote), settings = sampleSettings ) @@ -491,11 +513,11 @@ object AkkaBuild extends Build { settings = sampleSettings ) - lazy val clusterSample = Project( - id = "akka-sample-cluster", - base = file("akka-samples/akka-sample-cluster"), + lazy val clusterSampleJava = Project( + id = "akka-sample-cluster-java", + base = file("akka-samples/akka-sample-cluster-java"), dependencies = Seq(cluster, contrib, remoteTests % "test", testkit % "test"), - settings = sampleSettings ++ multiJvmSettings ++ Seq( + settings = multiJvmSettings ++ sampleSettings ++ Seq( libraryDependencies ++= Dependencies.clusterSample, javaOptions in run ++= Seq( "-Djava.library.path=./sigar", @@ -508,12 +530,30 @@ object AkkaBuild extends Build { } ) ) configs (MultiJvm) - + + lazy val clusterSampleScala = Project( + id = "akka-sample-cluster-scala", + base = file("akka-samples/akka-sample-cluster-scala"), + dependencies = Seq(cluster, contrib, remoteTests % "test", testkit % "test"), + settings = multiJvmSettings ++ sampleSettings ++ Seq( + libraryDependencies ++= Dependencies.clusterSample, + javaOptions in run ++= Seq( + "-Djava.library.path=./sigar", + "-Xms128m", "-Xmx1024m"), + Keys.fork in run := true, + // disable parallel tests + parallelExecution in Test := false, + extraOptions in MultiJvm <<= (sourceDirectory in MultiJvm) { src => + (name: String) => (src ** (name + ".conf")).get.headOption.map("-Dakka.config=" + _.absolutePath).toSeq + } + ) + ) configs (MultiJvm) + lazy val multiNodeSample = Project( id = "akka-sample-multi-node", base = file("akka-samples/akka-sample-multi-node"), dependencies = Seq(multiNodeTestkit % "test", testkit % "test"), - settings = sampleSettings ++ multiJvmSettings ++ experimentalSettings ++ Seq( + settings = multiJvmSettings ++ sampleSettings ++ experimentalSettings ++ Seq( libraryDependencies ++= Dependencies.multiNodeSample, // disable parallel tests parallelExecution in Test := false, @@ -581,7 +621,7 @@ object AkkaBuild extends Build { dependencies = Seq(actor, testkit % "test->test", mailboxesCommon % "compile;test->test", channels, remote % "compile;test->test", cluster, slf4j, agent, dataflow, transactor, fileMailbox, zeroMQ, camel, osgi, osgiAries, persistence % "compile;test->test"), - settings = defaultSettings ++ site.settings ++ site.sphinxSupport() ++ site.publishSite ++ sphinxPreprocessing ++ cpsPlugin ++ Seq( + settings = defaultSettings ++ docFormatSettings ++ site.settings ++ site.sphinxSupport() ++ site.publishSite ++ sphinxPreprocessing ++ cpsPlugin ++ Seq( sourceDirectory in Sphinx <<= baseDirectory / "rst", sphinxPackages in Sphinx <+= baseDirectory { _ / "_sphinx" / "pygments" }, // copy akka-contrib/docs into our rst_preprocess/contrib (and apply substitutions) @@ -611,7 +651,7 @@ object AkkaBuild extends Build { id = "akka-contrib", base = file("akka-contrib"), dependencies = Seq(remote, remoteTests % "test->test", cluster), - settings = defaultSettings ++ scaladocSettings ++ javadocSettings ++ multiJvmSettings ++ Seq( + settings = defaultSettings ++ formatSettings ++ scaladocSettings ++ javadocSettings ++ multiJvmSettings ++ Seq( libraryDependencies ++= Dependencies.contrib, testOptions += Tests.Argument(TestFrameworks.JUnit, "-v"), reportBinaryIssues := (), // disable bin comp check @@ -632,7 +672,7 @@ object AkkaBuild extends Build { id = "akka-channels-experimental", base = file("akka-channels"), dependencies = Seq(actor), - settings = defaultSettings ++ scaladocSettings ++ experimentalSettings ++ Seq( + settings = defaultSettings ++ formatSettings ++ scaladocSettings ++ experimentalSettings ++ Seq( libraryDependencies +=("org.scala-lang" % "scala-reflect" % scalaVersion.value), reportBinaryIssues := () // disable bin comp check ) @@ -648,7 +688,7 @@ object AkkaBuild extends Build { id = "akka-channels-tests", base = file("akka-channels-tests"), dependencies = Seq(channels, testkit % "compile;test->test"), - settings = defaultSettings ++ experimentalSettings ++ Seq( + settings = defaultSettings ++ formatSettings ++ experimentalSettings ++ Seq( publishArtifact in Compile := false, libraryDependencies += excludeOldModules("org.scala-lang" % "scala-compiler" % scalaVersion.value), reportBinaryIssues := () // disable bin comp check @@ -672,7 +712,7 @@ object AkkaBuild extends Build { reportBinaryIssues := () // disable bin comp check ) - lazy val sampleSettings = defaultSettings ++ Seq( + lazy val sampleSettings = defaultSettings ++ docFormatSettings ++ Seq( publishArtifact in (Compile, packageBin) := false, reportBinaryIssues := () // disable bin comp check ) @@ -758,7 +798,7 @@ object AkkaBuild extends Build { else Seq.empty } - lazy val defaultSettings = baseSettings ++ formatSettings ++ mimaSettings ++ lsSettings ++ resolverSettings ++ + lazy val defaultSettings = baseSettings ++ mimaSettings ++ lsSettings ++ resolverSettings ++ Protobuf.settings ++ Seq( // compile options scalacOptions in Compile ++= Seq("-encoding", "UTF-8", "-target:jvm-1.6", "-deprecation", "-feature", "-unchecked", "-Xlog-reflective-calls", "-Xlint"), @@ -868,8 +908,14 @@ object AkkaBuild extends Build { ) lazy val formatSettings = SbtScalariform.scalariformSettings ++ Seq( - ScalariformKeys.preferences in Compile := formattingPreferences, - ScalariformKeys.preferences in Test := formattingPreferences + ScalariformKeys.preferences in Compile := formattingPreferences, + ScalariformKeys.preferences in Test := formattingPreferences + ) + + lazy val docFormatSettings = SbtScalariform.scalariformSettings ++ Seq( + ScalariformKeys.preferences in Compile := docFormattingPreferences, + ScalariformKeys.preferences in Test := docFormattingPreferences, + ScalariformKeys.preferences in MultiJvm := docFormattingPreferences ) def formattingPreferences = { @@ -879,6 +925,14 @@ object AkkaBuild extends Build { .setPreference(AlignParameters, true) .setPreference(AlignSingleLineCaseStatements, true) } + + def docFormattingPreferences = { + import scalariform.formatter.preferences._ + FormattingPreferences() + .setPreference(RewriteArrowSymbols, false) + .setPreference(AlignParameters, true) + .setPreference(AlignSingleLineCaseStatements, true) + } lazy val multiJvmSettings = SbtMultiJvm.multiJvmSettings ++ inConfig(MultiJvm)(SbtScalariform.configScalariformSettings) ++ Seq( jvmOptions in MultiJvm := defaultMultiJvmOptions, diff --git a/project/Release.scala b/project/Release.scala index d6ff181a99..f35b228bab 100644 --- a/project/Release.scala +++ b/project/Release.scala @@ -28,6 +28,8 @@ object Release { val (state2, (api, japi)) = extracted.runTask(Unidoc.unidoc, state1) val (state3, docs) = extracted.runTask(generate in Sphinx, state2) val (state4, dist) = extracted.runTask(Dist.dist, state3) + val (state5, activatorDist) = extracted.runTask(ActivatorDist.activatorDist in LocalProject(AkkaBuild.samples.id), state4) + IO.delete(release) IO.createDirectory(release) IO.copyDirectory(repo, release / "releases") @@ -35,7 +37,9 @@ object Release { IO.copyDirectory(japi, release / "japi" / "akka" / releaseVersion) IO.copyDirectory(docs, release / "docs" / "akka" / releaseVersion) IO.copyFile(dist, release / "downloads" / dist.name) - state4 + for (f <- (activatorDist * "*.zip").get) + IO.copyFile(f, release / "downloads" / f.name) + state5 } def uploadReleaseCommand = Command.command("upload-release") { state =>