diff --git a/akka-actor-tests/src/test/scala/akka/routing/RoutingSpec.scala b/akka-actor-tests/src/test/scala/akka/routing/RoutingSpec.scala index 96f93fc160..51b805f69e 100644 --- a/akka-actor-tests/src/test/scala/akka/routing/RoutingSpec.scala +++ b/akka-actor-tests/src/test/scala/akka/routing/RoutingSpec.scala @@ -425,7 +425,7 @@ class RoutingSpec extends WordSpec with MustMatchers { }) def limit = 1 - def selectionCount = 2 + def selectionCount = 1 def rampupRate = 0.1 def partialFill = true def instance = factory @@ -458,7 +458,7 @@ class RoutingSpec extends WordSpec with MustMatchers { }) def limit = 2 - def selectionCount = 2 + def selectionCount = 1 def rampupRate = 0.1 def partialFill = false def instance = factory diff --git a/akka-actor/src/main/scala/akka/actor/ActorRef.scala b/akka-actor/src/main/scala/akka/actor/ActorRef.scala index 8affac2037..6fa44452e0 100644 --- a/akka-actor/src/main/scala/akka/actor/ActorRef.scala +++ b/akka-actor/src/main/scala/akka/actor/ActorRef.scala @@ -109,6 +109,7 @@ trait ActorRef extends ActorRefShared with java.lang.Comparable[ActorRef] { scal * Defines the default timeout for '!!' and '!!!' invocations, * e.g. the timeout for the future returned by the call to '!!' and '!!!'. */ + @deprecated("Will be replaced by implicit-scoped timeout on all methods that needs it, will default to timeout specified in config") @BeanProperty @volatile var timeout: Long = Actor.TIMEOUT @@ -183,11 +184,13 @@ trait ActorRef extends ActorRefShared with java.lang.Comparable[ActorRef] { scal /** * Returns on which node this actor lives if None it lives in the local ActorRegistry */ + @deprecated("Remoting will become fully transparent in the future") def homeAddress: Option[InetSocketAddress] /** * Java API.
*/ + @deprecated("Remoting will become fully transparent in the future") def getHomeAddress(): InetSocketAddress = homeAddress getOrElse null /** @@ -253,6 +256,7 @@ trait ActorRef extends ActorRefShared with java.lang.Comparable[ActorRef] { scal /** * Is the actor able to handle the message passed in as arguments? */ + @deprecated("Will be removed without replacement, it's just not reliable in the face of `become` and `unbecome`") def isDefinedAt(message: Any): Boolean = actor.isDefinedAt(message) /** @@ -378,23 +382,27 @@ trait ActorRef extends ActorRefShared with java.lang.Comparable[ActorRef] { scal /** * Returns the class for the Actor instance that is managed by the ActorRef. */ + @deprecated("Will be removed without replacement, doesn't make any sense to have in the face of `become` and `unbecome`") def actorClass: Class[_ <: Actor] /** * Akka Java API. * Returns the class for the Actor instance that is managed by the ActorRef. */ + @deprecated("Will be removed without replacement, doesn't make any sense to have in the face of `become` and `unbecome`") def getActorClass(): Class[_ <: Actor] = actorClass /** * Returns the class name for the Actor instance that is managed by the ActorRef. */ + @deprecated("Will be removed without replacement, doesn't make any sense to have in the face of `become` and `unbecome`") def actorClassName: String /** * Akka Java API. * Returns the class name for the Actor instance that is managed by the ActorRef. */ + @deprecated("Will be removed without replacement, doesn't make any sense to have in the face of `become` and `unbecome`") def getActorClassName(): String = actorClassName /** @@ -636,11 +644,13 @@ class LocalActorRef private[akka] ( /** * Returns the class for the Actor instance that is managed by the ActorRef. */ + @deprecated("Will be removed without replacement, doesn't make any sense to have in the face of `become` and `unbecome`") def actorClass: Class[_ <: Actor] = actor.getClass.asInstanceOf[Class[_ <: Actor]] /** * Returns the class name for the Actor instance that is managed by the ActorRef. */ + @deprecated("Will be removed without replacement, doesn't make any sense to have in the face of `become` and `unbecome`") def actorClassName: String = actorClass.getName /** @@ -1174,6 +1184,7 @@ private[akka] case class RemoteActorRef private[akka] ( protected[akka] def registerSupervisorAsRemoteActor: Option[Uuid] = None // ==== NOT SUPPORTED ==== + @deprecated("Will be removed without replacement, doesn't make any sense to have in the face of `become` and `unbecome`") def actorClass: Class[_ <: Actor] = unsupported def dispatcher_=(md: MessageDispatcher): Unit = unsupported def dispatcher: MessageDispatcher = unsupported diff --git a/akka-actor/src/main/scala/akka/routing/Pool.scala b/akka-actor/src/main/scala/akka/routing/Pool.scala index 8d431541f7..6ab6aa0c4d 100644 --- a/akka-actor/src/main/scala/akka/routing/Pool.scala +++ b/akka-actor/src/main/scala/akka/routing/Pool.scala @@ -104,7 +104,7 @@ trait DefaultActorPool extends ActorPool { this: Actor => /** * Selectors * These traits define how, when a message needs to be routed, delegate(s) are chosen from the pool - **/ + */ /** * Returns the set of delegates with the least amount of message backlog. @@ -141,7 +141,7 @@ trait RoundRobinSelector { else selectionCount val set = - for (i <- 0 to take) yield { + for (i <- 0 until take) yield { _last = (_last + 1) % length delegates(_last) } diff --git a/akka-docs/Makefile b/akka-docs/Makefile index 7b803258cb..49f649367f 100644 --- a/akka-docs/Makefile +++ b/akka-docs/Makefile @@ -8,7 +8,7 @@ PAPER = BUILDDIR = _build EASYINSTALL = easy_install LOCALPACKAGES = $(shell pwd)/$(BUILDDIR)/site-packages -PYGMENTSDIR = pygments +PYGMENTSDIR = _sphinx/pygments # Internal variables. PAPEROPT_a4 = -D latex_paper_size=a4 diff --git a/akka-docs/exts/includecode.py b/akka-docs/_sphinx/exts/includecode.py similarity index 100% rename from akka-docs/exts/includecode.py rename to akka-docs/_sphinx/exts/includecode.py diff --git a/akka-docs/pygments/setup.py b/akka-docs/_sphinx/pygments/setup.py similarity index 100% rename from akka-docs/pygments/setup.py rename to akka-docs/_sphinx/pygments/setup.py diff --git a/akka-docs/pygments/styles/__init__.py b/akka-docs/_sphinx/pygments/styles/__init__.py similarity index 100% rename from akka-docs/pygments/styles/__init__.py rename to akka-docs/_sphinx/pygments/styles/__init__.py diff --git a/akka-docs/pygments/styles/simple.py b/akka-docs/_sphinx/pygments/styles/simple.py similarity index 100% rename from akka-docs/pygments/styles/simple.py rename to akka-docs/_sphinx/pygments/styles/simple.py diff --git a/akka-docs/_static/akka.png b/akka-docs/_sphinx/static/akka.png similarity index 100% rename from akka-docs/_static/akka.png rename to akka-docs/_sphinx/static/akka.png diff --git a/akka-docs/_static/logo.png b/akka-docs/_sphinx/static/logo.png similarity index 100% rename from akka-docs/_static/logo.png rename to akka-docs/_sphinx/static/logo.png diff --git a/akka-docs/themes/akka/layout.html b/akka-docs/_sphinx/themes/akka/layout.html similarity index 100% rename from akka-docs/themes/akka/layout.html rename to akka-docs/_sphinx/themes/akka/layout.html diff --git a/akka-docs/themes/akka/static/akka.css_t b/akka-docs/_sphinx/themes/akka/static/akka.css_t similarity index 99% rename from akka-docs/themes/akka/static/akka.css_t rename to akka-docs/_sphinx/themes/akka/static/akka.css_t index 7c417e9917..f05e86bb6a 100644 --- a/akka-docs/themes/akka/static/akka.css_t +++ b/akka-docs/_sphinx/themes/akka/static/akka.css_t @@ -30,13 +30,11 @@ div.footer { /* link colors and text decoration */ a:link { - font-weight: bold; text-decoration: none; color: {{ theme_linkcolor }}; } a:visited { - font-weight: bold; text-decoration: none; color: {{ theme_visitedlinkcolor }}; } diff --git a/akka-docs/themes/akka/static/alert_info_32.png b/akka-docs/_sphinx/themes/akka/static/alert_info_32.png similarity index 100% rename from akka-docs/themes/akka/static/alert_info_32.png rename to akka-docs/_sphinx/themes/akka/static/alert_info_32.png diff --git a/akka-docs/themes/akka/static/alert_warning_32.png b/akka-docs/_sphinx/themes/akka/static/alert_warning_32.png similarity index 100% rename from akka-docs/themes/akka/static/alert_warning_32.png rename to akka-docs/_sphinx/themes/akka/static/alert_warning_32.png diff --git a/akka-docs/themes/akka/static/bg-page.png b/akka-docs/_sphinx/themes/akka/static/bg-page.png similarity index 100% rename from akka-docs/themes/akka/static/bg-page.png rename to akka-docs/_sphinx/themes/akka/static/bg-page.png diff --git a/akka-docs/themes/akka/static/bullet_orange.png b/akka-docs/_sphinx/themes/akka/static/bullet_orange.png similarity index 100% rename from akka-docs/themes/akka/static/bullet_orange.png rename to akka-docs/_sphinx/themes/akka/static/bullet_orange.png diff --git a/akka-docs/themes/akka/theme.conf b/akka-docs/_sphinx/themes/akka/theme.conf similarity index 66% rename from akka-docs/themes/akka/theme.conf rename to akka-docs/_sphinx/themes/akka/theme.conf index 7f45fd1718..620c88f2ae 100644 --- a/akka-docs/themes/akka/theme.conf +++ b/akka-docs/_sphinx/themes/akka/theme.conf @@ -7,6 +7,6 @@ pygments_style = friendly full_logo = false textcolor = #333333 headingcolor = #0c3762 -linkcolor = #dc3c01 -visitedlinkcolor = #892601 -hoverlinkcolor = #ff4500 +linkcolor = #0c3762 +visitedlinkcolor = #0c3762 +hoverlinkcolor = #0c3762 diff --git a/akka-docs/conf.py b/akka-docs/conf.py index f0dd997167..209f747afc 100644 --- a/akka-docs/conf.py +++ b/akka-docs/conf.py @@ -7,7 +7,7 @@ import sys, os # -- General configuration ----------------------------------------------------- -sys.path.append(os.path.abspath('exts')) +sys.path.append(os.path.abspath('_sphinx/exts')) extensions = ['sphinx.ext.todo', 'includecode'] templates_path = ['_templates'] @@ -31,13 +31,13 @@ html_theme = 'akka' html_theme_options = { 'full_logo': 'true' } -html_theme_path = ['themes'] +html_theme_path = ['_sphinx/themes'] html_title = 'Akka Documentation' -html_logo = '_static/logo.png' +html_logo = '_sphinx/static/logo.png' #html_favicon = None -html_static_path = ['_static'] +html_static_path = ['_sphinx/static'] html_last_updated_fmt = '%b %d, %Y' #html_sidebars = {} @@ -65,4 +65,4 @@ latex_elements = { 'preamble': '\\definecolor{VerbatimColor}{rgb}{0.935,0.935,0.935}' } -# latex_logo = '_static/akka.png' +# latex_logo = '_sphinx/static/akka.png' diff --git a/akka-docs/dev/documentation.rst b/akka-docs/dev/documentation.rst new file mode 100644 index 0000000000..d5678c58ff --- /dev/null +++ b/akka-docs/dev/documentation.rst @@ -0,0 +1,18 @@ +Documentation +============= + +The Akka documentation uses `reStructuredText +`_
-val m = Message(1.4)
-val b = m.bodyAs[String]
-``_
+.. code-block:: scala
+
+ val m = Message(1.4)
+ val b = m.bodyAs[String]
+
diff --git a/akka-docs/pending/migration-guide-0.9.x-0.10.x.rst b/akka-docs/general/migration-guide-0.9.x-0.10.x.rst
similarity index 100%
rename from akka-docs/pending/migration-guide-0.9.x-0.10.x.rst
rename to akka-docs/general/migration-guide-0.9.x-0.10.x.rst
diff --git a/akka-docs/general/migration-guide-1.0.x-1.1.x.rst b/akka-docs/general/migration-guide-1.0.x-1.1.x.rst
new file mode 100644
index 0000000000..c32b2545ac
--- /dev/null
+++ b/akka-docs/general/migration-guide-1.0.x-1.1.x.rst
@@ -0,0 +1,37 @@
+Akka has now moved to Scala 2.9.x
+^^^^^^^^^^^^^^^^^^^^
+
+Akka HTTP
+=========
+
+# akka.servlet.Initializer has been moved to ``akka-kernel`` to be able to have ``akka-http`` not depend on ``akka-remote``, if you don't want to use the class for kernel, just create your own version of ``akka.servlet.Initializer``, it's just a couple of lines of code and there is instructions here: `Akka Http Docs `_
+# akka.http.ListWriter has been removed in full, if you use it and want to keep using it, here's the code: `ListWriter `_
+# Jersey-server is now a "provided" dependency for ``akka-http``, so you'll need to add the dependency to your project, it's built against Jersey 1.3
+
+Akka Actor
+==========
+
+# is now dependency free, with the exception of the dependency on the ``scala-library.jar``
+# does not bundle any logging anymore, but you can subscribe to events within Akka by registering an event handler on akka.aevent.EventHandler or by specifying the ``FQN`` of an Actor in the akka.conf under akka.event-handlers; there is an ``akka-slf4j`` module which still provides the Logging trait and a default ``SLF4J`` logger adapter.
+Don't forget to add a SLF4J backend though, we recommend:
+
+.. code-block:: scala
+ lazy val logback = "ch.qos.logback" % "logback-classic" % "0.9.28"
+
+# If you used HawtDispatcher and want to continue using it, you need to include akka-dispatcher-extras.jar from Akka Modules, in your akka.conf you need to specify: ``akka.dispatch.HawtDispatcherConfigurator`` instead of ``HawtDispatcher``
+# FSM: the onTransition method changed from Function1 to PartialFunction; there is an implicit conversion for the precise types in place, but it may be necessary to add an underscore if you are passing an eta-expansion (using a method as function value).
+
+Akka Typed Actor
+================
+
+All methods starting with 'get*' are deprecated and will be removed in post 1.1 release.
+
+Akka Remote
+===========
+
+# ``UnparsebleException`` has been renamed to ``CannotInstantiateRemoteExceptionDueToRemoteProtocolParsingErrorException(exception, classname, message)``
+
+Akka Testkit
+============
+
+The TestKit moved into the akka-testkit subproject and correspondingly into the ``akka.testkit` package.
diff --git a/akka-docs/pending/migration-guides.rst b/akka-docs/general/migration-guides.rst
similarity index 85%
rename from akka-docs/pending/migration-guides.rst
rename to akka-docs/general/migration-guides.rst
index 4c44977d2f..361f8e3c7a 100644
--- a/akka-docs/pending/migration-guides.rst
+++ b/akka-docs/general/migration-guides.rst
@@ -5,4 +5,4 @@ Here are migration guides for the latest releases
* `Migrate 0.8.x -> 0.9.x `_
* `Migrate 0.9.x -> 0.10.x `_
* `Migrate 0.10.x -> 1.0.x `_
-* `Migrate 1.0.x -> 1.1.x `_
+* `Migrate 1.0.x -> 1.1.x `_
diff --git a/akka-docs/index.rst b/akka-docs/index.rst
index 6f31717995..fbb2506fab 100644
--- a/akka-docs/index.rst
+++ b/akka-docs/index.rst
@@ -4,80 +4,12 @@ Contents
.. toctree::
:maxdepth: 2
- manual/getting-started-first-scala
- manual/getting-started-first-java
- manual/fsm-scala
-
-.. pending/actor-registry-java
-.. pending/actor-registry-scala
-.. pending/actors-scala
-.. pending/agents-scala
-.. pending/articles
-.. pending/benchmarks
-.. pending/building-akka
-.. pending/buildr
-.. pending/cluster-membership
-.. pending/companies-using-akka
-.. pending/configuration
-.. pending/dataflow-java
-.. pending/dataflow-scala
-.. pending/deployment-scenarios
-.. pending/developer-guidelines
-.. pending/dispatchers-java
-.. pending/dispatchers-scala
-.. pending/event-handler
-.. pending/external-sample-projects
-.. pending/fault-tolerance-java
-.. pending/fault-tolerance-scala
-.. pending/Feature Stability Matrix
-.. pending/futures-scala
-.. pending/getting-started
-.. pending/guice-integration
-.. pending/Home
-.. pending/http
-.. pending/issue-tracking
-.. pending/language-bindings
-.. pending/licenses
-.. pending/logging
-.. pending/Migration-1.0-1.1
-.. pending/migration-guide-0.10.x-1.0.x
-.. pending/migration-guide-0.7.x-0.8.x
-.. pending/migration-guide-0.8.x-0.9.x
-.. pending/migration-guide-0.9.x-0.10.x
-.. pending/migration-guides
-.. pending/Recipes
-.. pending/release-notes
-.. pending/remote-actors-java
-.. pending/remote-actors-scala
-.. pending/routing-java
-.. pending/routing-scala
-.. pending/scheduler
-.. pending/security
-.. pending/serialization-java
-.. pending/serialization-scala
-.. pending/servlet
-.. pending/slf4j
-.. pending/sponsors
-.. pending/stm
-.. pending/stm-java
-.. pending/stm-scala
-.. pending/team
-.. pending/test
-.. pending/testkit
-.. pending/testkit-example
-.. pending/third-party-integrations
-.. pending/transactors-java
-.. pending/transactors-scala
-.. pending/tutorial-chat-server-java
-.. pending/tutorial-chat-server-scala
-.. pending/typed-actors-java
-.. pending/typed-actors-scala
-.. pending/untyped-actors-java
-.. pending/use-cases
-.. pending/web
+ intro/index
+ general/index
+ scala/index
+ dev/index
Links
=====
-* `Akka Documentation `_
* `Support `_
diff --git a/akka-docs/intro/build-path.png b/akka-docs/intro/build-path.png
new file mode 100644
index 0000000000..60f469e6d2
Binary files /dev/null and b/akka-docs/intro/build-path.png differ
diff --git a/akka-docs/pending/building-akka.rst b/akka-docs/intro/building-akka.rst
similarity index 54%
rename from akka-docs/pending/building-akka.rst
rename to akka-docs/intro/building-akka.rst
index 31af34c687..3d4f4ca1a0 100644
--- a/akka-docs/pending/building-akka.rst
+++ b/akka-docs/intro/building-akka.rst
@@ -3,171 +3,185 @@ Building Akka
This page describes how to build and run Akka from the latest source code.
+.. contents:: :local:
+
+
Get the source code
-------------------
-Akka uses `Git `_ and is hosted at `Github `_.
+Akka uses `Git `_ and is hosted at `Github
+`_.
-You first need Git installed on your machine. You can then clone the source repositories:
-* Akka repository from ``_
-* Akka Modules repository from ``_
+You first need Git installed on your machine. You can then clone the source
+repositories:
-For example:
+- Akka repository from ``_
+- Akka Modules repository from ``_
-::
+For example::
- git clone git://github.com/jboner/akka.git
- git clone git://github.com/jboner/akka-modules.git
+ git clone git://github.com/jboner/akka.git
+ git clone git://github.com/jboner/akka-modules.git
-If you have already cloned the repositories previously then you can update the code with ``git pull``:
+If you have already cloned the repositories previously then you can update the
+code with ``git pull``::
-::
+ git pull origin master
- git pull origin master
SBT - Simple Build Tool
-----------------------
-Akka is using the excellent `SBT `_ build system. So the first thing you have to do is to download and install SBT. You can read more about how to do that `here `_ .
+Akka is using the excellent `SBT `_
+build system. So the first thing you have to do is to download and install
+SBT. You can read more about how to do that `here
+`_ .
-The SBT commands that you'll need to build Akka are all included below. If you want to find out more about SBT and using it for your own projects do read the `SBT documentation `_.
+The SBT commands that you'll need to build Akka are all included below. If you
+want to find out more about SBT and using it for your own projects do read the
+`SBT documentation
+`_.
-The Akka SBT build file is ``project/build/AkkaProject.scala`` with some properties defined in ``project/build.properties``.
+The Akka SBT build file is ``project/build/AkkaProject.scala`` with some
+properties defined in ``project/build.properties``.
-----
Building Akka
-------------
-First make sure that you are in the akka code directory:
+First make sure that you are in the akka code directory::
-::
+ cd akka
- cd akka
Fetching dependencies
^^^^^^^^^^^^^^^^^^^^^
-SBT does not fetch dependencies automatically. You need to manually do this with the ``update`` command:
+SBT does not fetch dependencies automatically. You need to manually do this with
+the ``update`` command::
-::
+ sbt update
- sbt update
+Once finished, all the dependencies for Akka will be in the ``lib_managed``
+directory under each module: akka-actor, akka-stm, and so on.
-Once finished, all the dependencies for Akka will be in the ``lib_managed`` directory under each module: akka-actor, akka-stm, and so on.
+*Note: you only need to run update the first time you are building the code,
+or when the dependencies have changed.*
-*Note: you only need to run {{update}} the first time you are building the code, or when the dependencies have changed.*
Building
^^^^^^^^
-To compile all the Akka core modules use the ``compile`` command:
+To compile all the Akka core modules use the ``compile`` command::
-::
+ sbt compile
- sbt compile
+You can run all tests with the ``test`` command::
-You can run all tests with the ``test`` command:
+ sbt test
-::
+If compiling and testing are successful then you have everything working for the
+latest Akka development version.
- sbt test
-
-If compiling and testing are successful then you have everything working for the latest Akka development version.
Publish to local Ivy repository
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
-If you want to deploy the artifacts to your local Ivy repository (for example, to use from an SBT project) use the ``publish-local`` command:
+If you want to deploy the artifacts to your local Ivy repository (for example,
+to use from an SBT project) use the ``publish-local`` command::
-::
+ sbt publish-local
- sbt publish-local
Publish to local Maven repository
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
-If you want to deploy the artifacts to your local Maven repository use:
+If you want to deploy the artifacts to your local Maven repository use::
-::
+ sbt publish-local publish
- sbt publish-local publish
SBT interactive mode
^^^^^^^^^^^^^^^^^^^^
-Note that in the examples above we are calling ``sbt compile`` and ``sbt test`` and so on. SBT also has an interactive mode. If you just run ``sbt`` you enter the interactive SBT prompt and can enter the commands directly. This saves starting up a new JVM instance for each command and can be much faster and more convenient.
+Note that in the examples above we are calling ``sbt compile`` and ``sbt test``
+and so on. SBT also has an interactive mode. If you just run ``sbt`` you enter
+the interactive SBT prompt and can enter the commands directly. This saves
+starting up a new JVM instance for each command and can be much faster and more
+convenient.
For example, building Akka as above is more commonly done like this:
-::
+.. code-block:: none
+
+ % sbt
+ [info] Building project akka 1.1-SNAPSHOT against Scala 2.9.0.RC1
+ [info] using AkkaParentProject with sbt 0.7.6.RC0 and Scala 2.7.7
+ > update
+ [info]
+ [info] == akka-actor / update ==
+ ...
+ [success] Successful.
+ [info]
+ [info] Total time ...
+ > compile
+ ...
+ > test
+ ...
- % sbt
- [info] Building project akka 1.1-SNAPSHOT against Scala 2.8.1
- [info] using AkkaParentProject with sbt 0.7.5.RC0 and Scala 2.7.7
- > update
- [info]
- [info] == akka-actor / update ==
- ...
- [success] Successful.
- [info]
- [info] Total time ...
- > compile
- ...
- > test
- ...
SBT batch mode
^^^^^^^^^^^^^^
-It's also possible to combine commands in a single call. For example, updating, testing, and publishing Akka to the local Ivy repository can be done with:
+It's also possible to combine commands in a single call. For example, updating,
+testing, and publishing Akka to the local Ivy repository can be done with::
-::
+ sbt update test publish-local
- sbt update test publish-local
-
-----
Building Akka Modules
---------------------
-To build Akka Modules first build and publish Akka to your local Ivy repository as described above. Or using:
+To build Akka Modules first build and publish Akka to your local Ivy repository
+as described above. Or using::
-::
+ cd akka
+ sbt update publish-local
- cd akka
- sbt update publish-local
+Then you can build Akka Modules using the same steps as building Akka. First
+update to get all dependencies (including the Akka core modules), then compile,
+test, or publish-local as needed. For example::
-Then you can build Akka Modules using the same steps as building Akka. First update to get all dependencies (including the Akka core modules), then compile, test, or publish-local as needed. For example:
+ cd akka-modules
+ sbt update publish-local
-::
-
- cd akka-modules
- sbt update publish-local
Microkernel distribution
^^^^^^^^^^^^^^^^^^^^^^^^
-To build the Akka Modules microkernel (the same as the Akka Modules distribution download) use the ``dist`` command:
+To build the Akka Modules microkernel (the same as the Akka Modules distribution
+download) use the ``dist`` command::
-::
+ sbt dist
- sbt dist
+The distribution zip can be found in the dist directory and is called
+``akka-modules-{version}.zip``.
-The distribution zip can be found in the dist directory and is called ``akka-modules-{version}.zip``.
+To run the mircokernel, unzip the zip file, change into the unzipped directory,
+set the ``AKKA_HOME`` environment variable, and run the main jar file. For
+example:
-To run the mircokernel, unzip the zip file, change into the unzipped directory, set the ``AKKA_HOME`` environment variable, and run the main jar file. For example:
+.. code-block:: none
-::
+ unzip dist/akka-modules-1.1-SNAPSHOT.zip
+ cd akka-modules-1.1-SNAPSHOT
+ export AKKA_HOME=`pwd`
+ java -jar akka-modules-1.1-SNAPSHOT.jar
- unzip dist/akka-modules-1.1-SNAPSHOT.zip
- cd akka-modules-1.1-SNAPSHOT
- export AKKA_HOME=`pwd`
- java -jar akka-modules-1.1-SNAPSHOT.jar
+The microkernel will boot up and install the sample applications that reside in
+the distribution's ``deploy`` directory. You can deploy your own applications
+into the ``deploy`` directory as well.
-The microkernel will boot up and install the sample applications that reside in the distribution's ``deploy`` directory. You can deploy your own applications into the ``deploy`` directory as well.
-
-----
Scripts
-------
@@ -177,32 +191,38 @@ Linux/Unix init script
Here is a Linux/Unix init script that can be very useful:
-``_
+http://github.com/jboner/akka/blob/master/scripts/akka-init-script.sh
Copy and modify as needed.
+
Simple startup shell script
^^^^^^^^^^^^^^^^^^^^^^^^^^^
-This little script might help a bit. Just make sure you have the Akka distribution in the '$AKKA_HOME/dist' directory and then invoke this script to start up the kernel. The distribution is created in the './dist' dir for you if you invoke 'sbt dist'.
+This little script might help a bit. Just make sure you have the Akka
+distribution in the '$AKKA_HOME/dist' directory and then invoke this script to
+start up the kernel. The distribution is created in the './dist' dir for you if
+you invoke 'sbt dist'.
-``_
+http://github.com/jboner/akka/blob/master/scripts/run_akka.sh
Copy and modify as needed.
-----
Dependencies
------------
-If you are managing dependencies by hand you can find out what all the compile dependencies are for each module by looking in the ``lib_managed/compile`` directories. For example, you can run this to create a listing of dependencies (providing you have the source code and have run ``sbt update``):
+If you are managing dependencies by hand you can find out what all the compile
+dependencies are for each module by looking in the ``lib_managed/compile``
+directories. For example, you can run this to create a listing of dependencies
+(providing you have the source code and have run ``sbt update``)::
-::
+ cd akka
+ ls -1 */lib_managed/compile
- cd akka
- ls -1 */lib_managed/compile
-Here are the dependencies used by the Akka core modules.
+Dependencies used by the Akka core modules
+------------------------------------------
akka-actor
^^^^^^^^^^
@@ -247,8 +267,9 @@ akka-http
* jsr250-api-1.0.jar
* jsr311-api-1.1.jar
-----
-Here are the dependencies used by the Akka modules.
+
+Dependencies used by the Akka modules
+-------------------------------------
akka-amqp
^^^^^^^^^
diff --git a/akka-docs/intro/configuration.rst b/akka-docs/intro/configuration.rst
new file mode 100644
index 0000000000..fd19b71db4
--- /dev/null
+++ b/akka-docs/intro/configuration.rst
@@ -0,0 +1,31 @@
+Configuration
+=============
+
+Specifying the configuration file
+---------------------------------
+
+If you don't specify a configuration file then Akka uses default values. If
+you want to override these then you should edit the ``akka.conf`` file in the
+``AKKA_HOME/config`` directory. This config inherits from the
+``akka-reference.conf`` file that you see below. Use your ``akka.conf`` to override
+any property in the reference config.
+
+The config can be specified in various ways:
+
+* Define the ``-Dakka.config=...`` system property option
+
+* Put an ``akka.conf`` file on the classpath
+
+* Define the ``AKKA_HOME`` environment variable pointing to the root of the Akka
+ distribution. The config is taken from the ``AKKA_HOME/config`` directory. You
+ can also point to the AKKA_HOME by specifying the ``-Dakka.home=...`` system
+ property option.
+
+
+Defining the configuration file
+-------------------------------
+
+Here is the reference configuration file:
+
+.. literalinclude:: ../../config/akka-reference.conf
+ :language: none
diff --git a/akka-docs/intro/diagnostics-window.png b/akka-docs/intro/diagnostics-window.png
new file mode 100644
index 0000000000..7036fd96fb
Binary files /dev/null and b/akka-docs/intro/diagnostics-window.png differ
diff --git a/akka-docs/intro/example-code.png b/akka-docs/intro/example-code.png
new file mode 100644
index 0000000000..cd7e09f880
Binary files /dev/null and b/akka-docs/intro/example-code.png differ
diff --git a/akka-docs/manual/examples/Pi.scala b/akka-docs/intro/examples/Pi.scala
similarity index 100%
rename from akka-docs/manual/examples/Pi.scala
rename to akka-docs/intro/examples/Pi.scala
diff --git a/akka-docs/manual/getting-started-first-java.rst b/akka-docs/intro/getting-started-first-java.rst
similarity index 95%
rename from akka-docs/manual/getting-started-first-java.rst
rename to akka-docs/intro/getting-started-first-java.rst
index b4889a42e3..df032a8970 100644
--- a/akka-docs/manual/getting-started-first-java.rst
+++ b/akka-docs/intro/getting-started-first-java.rst
@@ -184,12 +184,6 @@ We also need to edit the ``pom.xml`` build file. Let's add the dependency we nee
-So, now we are all set. Just one final thing to do; make Maven download the dependencies it needs. That can be done by invoking::
-
- $ mvn package
-
-Maven itself needs a whole bunch of dependencies but our project will only need one; ``akka-actor-1.1.jar``. Maven downloads that as well.
-
Start writing the code
----------------------
@@ -235,15 +229,15 @@ Messages sent to actors should always be immutable to avoid sharing mutable stat
static class Calculate {}
static class Work {
- private final int arg;
+ private final int start;
private final int nrOfElements;
- public Work(int arg, int nrOfElements) {
- this.arg = arg;
+ public Work(int start, int nrOfElements) {
+ this.start = start;
this.nrOfElements = nrOfElements;
}
- public int getArg() { return arg; }
+ public int getStart() { return start; }
public int getNrOfElements() { return nrOfElements; }
}
@@ -271,7 +265,7 @@ Now we can create the worker actor. This is done by extending in the ``UntypedA
Work work = (Work) message;
// perform the work
- double result = calculatePiFor(work.getArg(), work.getNrOfElements())
+ double result = calculatePiFor(work.getStart(), work.getNrOfElements())
// reply with the result
getContext().replyUnsafe(new Result(result));
@@ -285,10 +279,10 @@ As you can see we have now created an ``UntypedActor`` with a ``onReceive`` meth
The only thing missing in our ``Worker`` actor is the implementation on the ``calculatePiFor(..)`` method::
// define the work
- private double calculatePiFor(int arg, int nrOfElements) {
+ private double calculatePiFor(int start, int nrOfElements) {
double acc = 0.0;
- for (int i = arg * nrOfElements; i <= ((arg + 1) * nrOfElements - 1); i++) {
- acc += 4 * Math.pow(-1, i) / (2 * i + 1);
+ for (int i = start * nrOfElements; i <= ((start + 1) * nrOfElements - 1); i++) {
+ acc += 4 * (1 - (i % 2) * 2) / (2 * i + 1);
}
return acc;
}
@@ -438,8 +432,8 @@ Let's capture this in code::
if (message instanceof Calculate) {
// schedule work
- for (int arg = 0; arg < nrOfMessages; arg++) {
- router.sendOneWay(new Work(arg, nrOfElements), getContext());
+ for (int start = 0; start < nrOfMessages; start++) {
+ router.sendOneWay(new Work(start, nrOfElements), getContext());
}
// send a PoisonPill to all workers telling them to shut down themselves
@@ -525,15 +519,15 @@ Before we package it up and run it, let's take a look at the full code now, with
static class Calculate {}
static class Work {
- private final int arg;
+ private final int start;
private final int nrOfElements;
- public Work(int arg, int nrOfElements) {
- this.arg = arg;
+ public Work(int start, int nrOfElements) {
+ this.start = start;
this.nrOfElements = nrOfElements;
}
- public int getArg() { return arg; }
+ public int getStart() { return start; }
public int getNrOfElements() { return nrOfElements; }
}
@@ -553,10 +547,10 @@ Before we package it up and run it, let's take a look at the full code now, with
static class Worker extends UntypedActor {
// define the work
- private double calculatePiFor(int arg, int nrOfElements) {
+ private double calculatePiFor(int start, int nrOfElements) {
double acc = 0.0;
- for (int i = arg * nrOfElements; i <= ((arg + 1) * nrOfElements - 1); i++) {
- acc += 4 * Math.pow(-1, i) / (2 * i + 1);
+ for (int i = start * nrOfElements; i <= ((start + 1) * nrOfElements - 1); i++) {
+ acc += 4 * (1 - (i % 2) * 2) / (2 * i + 1);
}
return acc;
}
@@ -567,7 +561,7 @@ Before we package it up and run it, let's take a look at the full code now, with
Work work = (Work) message;
// perform the work
- double result = calculatePiFor(work.getArg(), work.getNrOfElements())
+ double result = calculatePiFor(work.getStart(), work.getNrOfElements())
// reply with the result
getContext().replyUnsafe(new Result(result));
@@ -628,8 +622,8 @@ Before we package it up and run it, let's take a look at the full code now, with
if (message instanceof Calculate) {
// schedule work
- for (int arg = 0; arg < nrOfMessages; arg++) {
- router.sendOneWay(new Work(arg, nrOfElements), getContext());
+ for (int start = 0; start < nrOfMessages; start++) {
+ router.sendOneWay(new Work(start, nrOfElements), getContext());
}
// send a PoisonPill to all workers telling them to shut down themselves
@@ -741,6 +735,8 @@ Conclusion
We have learned how to create our first Akka project using Akka's actors to speed up a computation-intensive problem by scaling out on multi-core processors (also known as scaling up). We have also learned to compile and run an Akka project using either the tools on the command line or the SBT build system.
+If you have a multi-core machine then I encourage you to try out different number of workers (number of working actors) by tweaking the ``nrOfWorkers`` variable to for example; 2, 4, 6, 8 etc. to see performance improvement by scaling up.
+
Now we are ready to take on more advanced problems. In the next tutorial we will build on this one, refactor it into more idiomatic Akka and Scala code, and introduce a few new concepts and abstractions. Whenever you feel ready, join me in the `Getting Started Tutorial: Second Chapter `_.
Happy hakking.
diff --git a/akka-docs/intro/getting-started-first-scala-eclipse.rst b/akka-docs/intro/getting-started-first-scala-eclipse.rst
new file mode 100644
index 0000000000..d34f242e7d
--- /dev/null
+++ b/akka-docs/intro/getting-started-first-scala-eclipse.rst
@@ -0,0 +1,419 @@
+Getting Started Tutorial (Scala with Eclipse): First Chapter
+============================================================
+
+Introduction
+------------
+
+Welcome to the first tutorial on how to get started with Akka and Scala. We assume that you already know what Akka and Scala are and will now focus on the steps necessary to start your first project. We will be using `Eclipse `_, and the `Scala plugin for Eclipse `_.
+
+The sample application that we will create is using actors to calculate the value of Pi. Calculating Pi is a CPU intensive operation and we will utilize Akka Actors to write a concurrent solution that scales out to multi-core processors. This sample will be extended in future tutorials to use Akka Remote Actors to scale out on multiple machines in a cluster.
+
+We will be using an algorithm that is called "embarrassingly parallel" which just means that each job is completely isolated and not coupled with any other job. Since this algorithm is so parallelizable it suits the actor model very well.
+
+Here is the formula for the algorithm we will use:
+
+.. image:: pi-formula.png
+
+In this particular algorithm the master splits the series into chunks which are sent out to each worker actor to be processed. When each worker has processed its chunk it sends a result back to the master which aggregates the total result.
+
+Tutorial source code
+--------------------
+
+If you want don't want to type in the code and/or set up an SBT project then you can check out the full tutorial from the Akka GitHub repository. It is in the ``akka-tutorials/akka-tutorial-first`` module. You can also browse it online `here `_, with the actual source code `here `_.
+
+Prerequisites
+-------------
+
+This tutorial assumes that you have Java 1.6 or later installed on you machine and ``java`` on your ``PATH``. You also need to know how to run commands in a shell (ZSH, Bash, DOS etc.) and a recent version of Eclipse (at least `3.6 - Helios `_).
+
+If you want to run the example from the command line as well, you need to make sure that ``$JAVA_HOME`` environment variable is set to the root of the Java distribution. You also need to make sure that the ``$JAVA_HOME/bin`` is on your ``PATH``::
+
+ $ export JAVA_HOME=..root of java distribution..
+ $ export PATH=$PATH:$JAVA_HOME/bin
+
+You can test your installation by invoking ``java``::
+
+ $ java -version
+ java version "1.6.0_24"
+ Java(TM) SE Runtime Environment (build 1.6.0_24-b07-334-10M3326)
+ Java HotSpot(TM) 64-Bit Server VM (build 19.1-b02-334, mixed mode)
+
+Downloading and installing Akka
+-------------------------------
+
+To build and run the tutorial sample from the command line, you have to download Akka. If you prefer to use SBT to build and run the sample then you can skip this section and jump to the next one.
+
+Let's get the ``akka-1.1`` distribution of Akka core (not Akka Modules) from `http://akka.io/downloads `_. Once you have downloaded the distribution unzip it in the folder you would like to have Akka installed in, in my case I choose to install it in ``/Users/jboner/tools/``, simply by unzipping it to this directory.
+
+You need to do one more thing in order to install Akka properly: set the ``AKKA_HOME`` environment variable to the root of the distribution. In my case I'm opening up a shell, navigating down to the distribution, and setting the ``AKKA_HOME`` variable::
+
+ $ cd /Users/jboner/tools/akka-1.1
+ $ export AKKA_HOME=`pwd`
+ $ echo $AKKA_HOME
+ /Users/jboner/tools/akka-1.1
+
+The distribution looks like this::
+
+ $ ls -l
+ total 16944
+ drwxr-xr-x 7 jboner staff 238 Apr 6 11:15 .
+ drwxr-xr-x 28 jboner staff 952 Apr 6 11:16 ..
+ drwxr-xr-x 17 jboner staff 578 Apr 6 11:16 deploy
+ drwxr-xr-x 26 jboner staff 884 Apr 6 11:16 dist
+ drwxr-xr-x 3 jboner staff 102 Apr 6 11:15 lib_managed
+ -rwxr-xr-x 1 jboner staff 8674105 Apr 6 11:15 scala-library.jar
+ drwxr-xr-x 4 jboner staff 136 Apr 6 11:16 scripts
+
+- In the ``dist`` directory we have the Akka JARs, including sources and docs.
+- In the ``lib_managed/compile`` directory we have Akka's dependency JARs.
+- In the ``deploy`` directory we have the sample JARs.
+- In the ``scripts`` directory we have scripts for running Akka.
+- Finally ``scala-library.jar`` is the JAR for the latest Scala distribution that Akka depends on.
+
+The only JAR we will need for this tutorial (apart from the ``scala-library.jar`` JAR) is the ``akka-actor-1.1.jar`` JAR in the ``dist`` directory. This is a self-contained JAR with zero dependencies and contains everything we need to write a system using Actors.
+
+Akka is very modular and has many JARs for containing different features. The core distribution has seven modules:
+
+- ``akka-actor-1.1.jar`` -- Standard Actors
+- ``akka-typed-actor-1.1.jar`` -- Typed Actors
+- ``akka-remote-1.1.jar`` -- Remote Actors
+- ``akka-stm-1.1.jar`` -- STM (Software Transactional Memory), transactors and transactional datastructures
+- ``akka-http-1.1.jar`` -- Akka Mist for continuation-based asynchronous HTTP and also Jersey integration
+- ``akka-slf4j-1.1.jar`` -- SLF4J Event Handler Listener
+- ``akka-testkit-1.1.jar`` -- Toolkit for testing Actors
+
+We also have Akka Modules containing add-on modules outside the core of Akka. You can download the Akka Modules distribution from TODO. It contains Akka core as well. We will not be needing any modules there today, but for your information the module JARs are these:
+
+- ``akka-kernel-1.1.jar`` -- Akka microkernel for running a bare-bones mini application server (embeds Jetty etc.)
+- ``akka-amqp-1.1.jar`` -- AMQP integration
+- ``akka-camel-1.1.jar`` -- Apache Camel Actors integration (it's the best way to have your Akka application communicate with the rest of the world)
+- ``akka-camel-typed-1.1.jar`` -- Apache Camel Typed Actors integration
+- ``akka-scalaz-1.1.jar`` -- Support for the Scalaz library
+- ``akka-spring-1.1.jar`` -- Spring framework integration
+- ``akka-osgi-dependencies-bundle-1.1.jar`` -- OSGi support
+
+Downloading and installing the Scala IDE for Eclipse
+----------------------------------------------------
+
+If you want to use Eclipse for coding your Akka tutorial, you need to install the Scala plugin for Eclipse. This plugin comes with its own version of Scala, so if you don't plan to run the example from the command line, you don't need to download the Scala distribution (and you can skip the next section).
+
+You can install this plugin using the regular update mechanism. First choose a version of the IDE from `http://download.scala-ide.org `_. We recommend you choose 2.0.x, which comes with Scala 2.9. Copy the corresponding URL and then choose ``Help/Install New Software`` and paste the URL you just copied. You should see something similar to the following image.
+
+.. image:: install-beta2-updatesite.png
+
+Make sure you select both the ``JDT Weaving for Scala`` and the ``Scala IDE for Eclipse`` plugins. The other plugin is optional, and contains the source code of the plugin itself.
+
+Once the installation is finished, you need to restart Eclipse. The first time the plugin starts it will open a diagnostics window and offer to fix several settings, such as the delay for content assist (code-completion) or the shown completion proposal types.
+
+.. image:: diagnostics-window.png
+
+Accept the recommended settings, and follow the instructions if you need to increase the heap size of Eclipse.
+
+Check that the installation succeeded by creating a new Scala project (``File/New>Scala Project``), and typing some code. You should have content-assist, hyperlinking to definitions, instant error reporting, and so on.
+
+.. image:: example-code.png
+
+You are ready to code now!
+
+Downloading and installing Scala
+--------------------------------
+
+To build and run the tutorial sample from the command line, you have to install the Scala distribution. If you prefer to use Eclipse to build and run the sample then you can skip this section and jump to the next one.
+
+Scala can be downloaded from `http://www.scala-lang.org/downloads `_. Browse there and download the Scala 2.9.0.RC1 release. If you pick the ``tgz`` or ``zip`` distribution then just unzip it where you want it installed. If you pick the IzPack Installer then double click on it and follow the instructions.
+
+You also need to make sure that the ``scala-2.9.0.RC1/bin`` (if that is the directory where you installed Scala) is on your ``PATH``::
+
+ $ export PATH=$PATH:scala-2.9.0.RC1/bin
+
+You can test your installation by invoking scala::
+
+ $ scala -version
+ Scala code runner version 2.9.0.RC1 -- Copyright 2002-2011, LAMP/EPFL
+
+Looks like we are all good. Finally let's create a source file ``Pi.scala`` for the tutorial and put it in the root of the Akka distribution in the ``tutorial`` directory (you have to create it first).
+
+Some tools require you to set the ``SCALA_HOME`` environment variable to the root of the Scala distribution, however Akka does not require that.
+
+Creating an Akka project in Eclipse
+---------------------------------------
+
+If you have not already done so, now is the time to create an Eclipse project for our tutorial. Use the ``New Scala Project`` wizard and accept the default settings. Once the project is open, we need to add the akka libraries to the *build path*. Right click on the project and choose ``Properties``, then click on ``Java Build Path``. Go to ``Libraries`` and click on ``Add External Jars..``, then navigate to the location where you installed akka and choose ``akka-actor.jar``. You should see something similar to this:
+
+.. image:: build-path.png
+
+Using SBT in Eclipse
+^^^^^^^^^^^^^^^^^^^^
+
+If you are an `SBT `_ user, you can follow the :doc:`Akka Tutorial in Scala ` and additionally install the ``sbt-eclipse`` plugin. This adds support for generating Eclipse project files from your SBT project. You need to update your SBT plugins definition in ``project/plugins``::
+
+ import sbt._
+
+ class TutorialPlugins(info: ProjectInfo) extends PluginDefinition(info) {
+ // eclipsify plugin
+ lazy val eclipse = "de.element34" % "sbt-eclipsify" % "0.7.0"
+
+ val akkaRepo = "Akka Repo" at "http://akka.io/repository"
+ val akkaPlugin = "se.scalablesolutions.akka" % "akka-sbt-plugin" % "1.1"
+ }
+
+and then update your SBT project definition by mixing in ``Eclipsify`` in your project definition::
+
+ import sbt._
+ import de.element34.sbteclipsify._
+
+ class MySbtProject(info: ProjectInfo) extends DefaultProject(info)
+ with Eclipsify with AkkaProject {
+ // the project definition here
+ // akka dependencies
+ }
+
+Then run the ``eclipse`` target to generate the Eclipse project::
+
+ dragos@dragos-imac pi $ sbt eclipse
+ [info] Building project AkkaPi 1.0 against Scala 2.9.0.RC1
+ [info] using MySbtProject with sbt 0.7.4 and Scala 2.7.7
+ [info]
+ [info] == eclipse ==
+ [info] Creating eclipse project...
+ [info] == eclipse ==
+ [success] Successful.
+ [info]
+ [info] Total time: 0 s, completed Apr 20, 2011 2:48:03 PM
+ [info]
+ [info] Total session time: 1 s, completed Apr 20, 2011 2:48:03 PM
+ [success] Build completed successfully.
+
+Next you need to import this project in Eclipse, by choosing ``Eclipse/Import.. Existing Projects into Workspace``. Navigate to the directory where you defined your SBT project and choose import:
+
+.. image:: import-project.png
+
+Now we have the basis for an Akka Eclipse application, so we can..
+
+Start writing the code
+----------------------
+
+The design we are aiming for is to have one ``Master`` actor initiating the computation, creating a set of ``Worker`` actors. Then it splits up the work into discrete chunks, and sends these chunks to the different workers in a round-robin fashion. The master waits until all the workers have completed their work and sent back results for aggregation. When computation is completed the master prints out the result, shuts down all workers and then itself.
+
+With this in mind, let's now create the messages that we want to have flowing in the system.
+
+Creating the messages
+---------------------
+
+We start by creating a package for our application, let's call it ``akka.tutorial.first.scala``. We start by creating case classes for each type of message in our application, so we can place them in a hierarchy, call it ``PiMessage``. Right click on the package and choose ``New Scala Class``, and enter ``PiMessage`` for the name of the class.
+
+We need three different messages:
+
+- ``Calculate`` -- sent to the ``Master`` actor to start the calculation
+- ``Work`` -- sent from the ``Master`` actor to the ``Worker`` actors containing the work assignment
+- ``Result`` -- sent from the ``Worker`` actors to the ``Master`` actor containing the result from the worker's calculation
+
+Messages sent to actors should always be immutable to avoid sharing mutable state. In Scala we have 'case classes' which make excellent messages. So let's start by creating three messages as case classes. We also create a common base trait for our messages (that we define as being ``sealed`` in order to prevent creating messages outside our control)::
+
+ package akka.tutorial.first.scala
+
+ sealed trait PiMessage
+
+ case object Calculate extends PiMessage
+
+ case class Work(start: Int, nrOfElements: Int) extends PiMessage
+
+ case class Result(value: Double) extends PiMessage
+
+Creating the worker
+-------------------
+
+Now we can create the worker actor. Create a new class called ``Worker`` as before. We need to mix in the ``Actor`` trait and defining the ``receive`` method. The ``receive`` method defines our message handler. We expect it to be able to handle the ``Work`` message so we need to add a handler for this message::
+
+ class Worker extends Actor {
+ def receive = {
+ case Work(start, nrOfElements) =>
+ self reply Result(calculatePiFor(start, nrOfElements)) // perform the work
+ }
+ }
+
+The ``Actor`` trait is defined in ``akka.actor`` and you can either import it explicitly, or let Eclipse do it for you when it cannot resolve the ``Actor`` trait. The quick fix option (``Ctrl-F1``) will offer two options:
+
+.. image:: quickfix.png
+
+Choose the Akka Actor and move on.
+
+As you can see we have now created an ``Actor`` with a ``receive`` method as a handler for the ``Work`` message. In this handler we invoke the ``calculatePiFor(..)`` method, wrap the result in a ``Result`` message and send it back to the original sender using ``self.reply``. In Akka the sender reference is implicitly passed along with the message so that the receiver can always reply or store away the sender reference for future use.
+
+The only thing missing in our ``Worker`` actor is the implementation on the ``calculatePiFor(..)`` method. While there are many ways we can implement this algorithm in Scala, in this introductory tutorial we have chosen an imperative style using a for comprehension and an accumulator::
+
+ def calculatePiFor(start: Int, nrOfElements: Int): Double = {
+ var acc = 0.0
+ for (i <- start until (start + nrOfElements))
+ acc += 4 * (1 - (i % 2) * 2) / (2 * i + 1)
+ acc
+ }
+
+Creating the master
+-------------------
+
+Now create a new class for the master actor. The master actor is a little bit more involved. In its constructor we need to create the workers (the ``Worker`` actors) and start them. We will also wrap them in a load-balancing router to make it easier to spread out the work evenly between the workers. First we need to add some imports::
+
+ import akka.actor.{Actor, PoisonPill}
+ import akka.routing.{Routing, CyclicIterator}
+ import Routing._
+ import akka.dispatch.Dispatchers
+
+ import java.util.concurrent.CountDownLatch
+
+and then we can create the workers::
+
+ // create the workers
+ val workers = Vector.fill(nrOfWorkers)(actorOf[Worker].start())
+
+ // wrap them with a load-balancing router
+ val router = Routing.loadBalancerActor(CyclicIterator(workers)).start()
+
+As you can see we are using the ``actorOf`` factory method to create actors, this method returns as an ``ActorRef`` which is a reference to our newly created actor. This method is available in the ``Actor`` object but is usually imported::
+
+ import akka.actor.Actor.actorOf
+
+There are two versions of ``actorOf``; one of them taking a actor type and the other one an instance of an actor. The former one (``actorOf[MyActor]``) is used when the actor class has a no-argument constructor while the second one (``actorOf(new MyActor(..))``) is used when the actor class has a constructor that takes arguments. This is the only way to create an instance of an Actor and the ``actorOf`` method ensures this. The latter version is using call-by-name and lazily creates the actor within the scope of the ``actorOf`` method. The ``actorOf`` method instantiates the actor and returns, not an instance to the actor, but an instance to an ``ActorRef``. This reference is the handle through which you communicate with the actor. It is immutable, serializable and location-aware meaning that it "remembers" its original actor even if it is sent to other nodes across the network and can be seen as the equivalent to the Erlang actor's PID.
+
+The actor's life-cycle is:
+
+- Created -- ``Actor.actorOf[MyActor]`` -- can **not** receive messages
+- Started -- ``actorRef.start()`` -- can receive messages
+- Stopped -- ``actorRef.stop()`` -- can **not** receive messages
+
+Once the actor has been stopped it is dead and can not be started again.
+
+Now we have a router that is representing all our workers in a single abstraction. If you paid attention to the code above, you saw that we were using the ``nrOfWorkers`` variable. This variable and others we have to pass to the ``Master`` actor in its constructor. So now let's create the master actor. We have to pass in three integer variables:
+
+- ``nrOfWorkers`` -- defining how many workers we should start up
+- ``nrOfMessages`` -- defining how many number chunks to send out to the workers
+- ``nrOfElements`` -- defining how big the number chunks sent to each worker should be
+
+Here is the master actor::
+
+ class Master(
+ nrOfWorkers: Int, nrOfMessages: Int, nrOfElements: Int, latch: CountDownLatch)
+ extends Actor {
+
+ var pi: Double = _
+ var nrOfResults: Int = _
+ var start: Long = _
+
+ // create the workers
+ val workers = Vector.fill(nrOfWorkers)(actorOf[Worker].start())
+
+ // wrap them with a load-balancing router
+ val router = Routing.loadBalancerActor(CyclicIterator(workers)).start()
+
+ def receive = { ... }
+
+ override def preStart {
+ start = System.currentTimeMillis
+ }
+
+ override def postStop {
+ // tell the world that the calculation is complete
+ println(
+ "\n\tPi estimate: \t\t%s\n\tCalculation time: \t%s millis"
+ .format(pi, (System.currentTimeMillis - start)))
+ latch.countDown()
+ }
+ }
+
+A couple of things are worth explaining further.
+
+First, we are passing in a ``java.util.concurrent.CountDownLatch`` to the ``Master`` actor. This latch is only used for plumbing (in this specific tutorial), to have a simple way of letting the outside world knowing when the master can deliver the result and shut down. In more idiomatic Akka code, as we will see in part two of this tutorial series, we would not use a latch but other abstractions and functions like ``Channel``, ``Future`` and ``!!!`` to achieve the same thing in a non-blocking way. But for simplicity let's stick to a ``CountDownLatch`` for now.
+
+Second, we are adding a couple of life-cycle callback methods; ``preStart`` and ``postStop``. In the ``preStart`` callback we are recording the time when the actor is started and in the ``postStop`` callback we are printing out the result (the approximation of Pi) and the time it took to calculate it. In this call we also invoke ``latch.countDown`` to tell the outside world that we are done.
+
+But we are not done yet. We are missing the message handler for the ``Master`` actor. This message handler needs to be able to react to two different messages:
+
+- ``Calculate`` -- which should start the calculation
+- ``Result`` -- which should aggregate the different results
+
+The ``Calculate`` handler is sending out work to all the ``Worker`` actors and after doing that it also sends a ``Broadcast(PoisonPill)`` message to the router, which will send out the ``PoisonPill`` message to all the actors it is representing (in our case all the ``Worker`` actors). ``PoisonPill`` is a special kind of message that tells the receiver to shut itself down using the normal shutdown method; ``self.stop``. We also send a ``PoisonPill`` to the router itself (since it's also an actor that we want to shut down).
+
+The ``Result`` handler is simpler, here we get the value from the ``Result`` message and aggregate it to our ``pi`` member variable. We also keep track of how many results we have received back, and if that matches the number of tasks sent out, the ``Master`` actor considers itself done and shuts down.
+
+Let's capture this in code::
+
+ // message handler
+ def receive = {
+ case Calculate =>
+ // schedule work
+ for (i <- 0 until nrOfMessages) router ! Work(i * nrOfElements, nrOfElements)
+
+ // send a PoisonPill to all workers telling them to shut down themselves
+ router ! Broadcast(PoisonPill)
+
+ // send a PoisonPill to the router, telling him to shut himself down
+ router ! PoisonPill
+
+ case Result(value) =>
+ // handle result from the worker
+ pi += value
+ nrOfResults += 1
+ if (nrOfResults == nrOfMessages) self.stop()
+ }
+
+Bootstrap the calculation
+-------------------------
+
+Now the only thing that is left to implement is the runner that should bootstrap and run the calculation for us. We do that by creating an object that we call ``Pi``, here we can extend the ``App`` trait in Scala, which means that we will be able to run this as an application directly from the command line or using the Eclipse Runner.
+
+The ``Pi`` object is a perfect container module for our actors and messages, so let's put them all there. We also create a method ``calculate`` in which we start up the ``Master`` actor and wait for it to finish::
+
+ object Pi extends App {
+
+ calculate(nrOfWorkers = 4, nrOfElements = 10000, nrOfMessages = 10000)
+
+ ... // actors and messages
+
+ def calculate(nrOfWorkers: Int, nrOfElements: Int, nrOfMessages: Int) {
+
+ // this latch is only plumbing to know when the calculation is completed
+ val latch = new CountDownLatch(1)
+
+ // create the master
+ val master = actorOf(
+ new Master(nrOfWorkers, nrOfMessages, nrOfElements, latch)).start()
+
+ // start the calculation
+ master ! Calculate
+
+ // wait for master to shut down
+ latch.await()
+ }
+ }
+
+That's it. Now we are done.
+
+Run it from Eclipse
+-------------------
+
+Eclipse builds your project on every save when ``Project/Build Automatically`` is set. If not, bring you project up to date by clicking ``Project/Build Project``. If there are no compilation errors, you can right-click in the editor where ``Pi`` is defined, and choose ``Run as.. /Scala application``. If everything works fine, you should see::
+
+ AKKA_HOME is defined as [/Users/jboner/tools/akka-modules-1.1-M1/]
+ loading config from [/Users/jboner/tools/akka-modules-1.1-M1/config/akka.conf].
+
+ Pi estimate: 3.1435501812459323
+ Calculation time: 858 millis
+
+If you have not defined an the ``AKKA_HOME`` environment variable then Akka can't find the ``akka.conf`` configuration file and will print out a ``Can’t load akka.conf`` warning. This is ok since it will then just use the defaults.
+
+You can also define a new Run configuration, by going to ``Run/Run Configurations``. Create a new ``Scala application`` and choose the tutorial project and the main class to be ``akkatutorial.Pi``. You can pass additional command line arguments to the JVM on the ``Arguments`` page, for instance to define where ``akka.conf`` is:
+
+.. image:: run-config.png
+
+Once you finished your run configuration, click ``Run``. You should see the same output in the ``Console`` window. You can use the same configuration for debugging the application, by choosing ``Run/Debug History`` or just ``Debug As``.
+
+Conclusion
+----------
+
+We have learned how to create our first Akka project using Akka's actors to speed up a computation-intensive problem by scaling out on multi-core processors (also known as scaling up). We have also learned to compile and run an Akka project using Eclipse.
+
+If you have a multi-core machine then I encourage you to try out different number of workers (number of working actors) by tweaking the ``nrOfWorkers`` variable to for example; 2, 4, 6, 8 etc. to see performance improvement by scaling up.
+
+Now we are ready to take on more advanced problems. In the next tutorial we will build on this one, refactor it into more idiomatic Akka and Scala code, and introduce a few new concepts and abstractions. Whenever you feel ready, join me in the `Getting Started Tutorial: Second Chapter `_.
+
+Happy hakking.
diff --git a/akka-docs/manual/getting-started-first-scala.rst b/akka-docs/intro/getting-started-first-scala.rst
similarity index 98%
rename from akka-docs/manual/getting-started-first-scala.rst
rename to akka-docs/intro/getting-started-first-scala.rst
index 3bd445c390..17c4d265c4 100644
--- a/akka-docs/manual/getting-started-first-scala.rst
+++ b/akka-docs/intro/getting-started-first-scala.rst
@@ -238,7 +238,7 @@ The only thing missing in our ``Worker`` actor is the implementation on the ``ca
def calculatePiFor(start: Int, nrOfElements: Int): Double = {
var acc = 0.0
for (i <- start until (start + nrOfElements))
- acc += 4 * math.pow(-1, i) / (2 * i + 1)
+ acc += 4 * (1 - (i % 2) * 2) / (2 * i + 1)
acc
}
@@ -404,7 +404,7 @@ But before we package it up and run it, let's take a look at the full code now,
def calculatePiFor(start: Int, nrOfElements: Int): Double = {
var acc = 0.0
for (i <- start until (start + nrOfElements))
- acc += 4 * math.pow(-1, i) / (2 * i + 1)
+ acc += 4 * (1 - (i % 2) * 2) / (2 * i + 1)
acc
}
@@ -435,7 +435,7 @@ But before we package it up and run it, let's take a look at the full code now,
def receive = {
case Calculate =>
// schedule work
- //for (arg <- 0 until nrOfMessages) router ! Work(arg, nrOfElements)
+ //for (start <- 0 until nrOfMessages) router ! Work(start, nrOfElements)
for (i <- 0 until nrOfMessages) router ! Work(i * nrOfElements, nrOfElements)
// send a PoisonPill to all workers telling them to shut down themselves
@@ -535,6 +535,8 @@ Conclusion
We have learned how to create our first Akka project using Akka's actors to speed up a computation-intensive problem by scaling out on multi-core processors (also known as scaling up). We have also learned to compile and run an Akka project using either the tools on the command line or the SBT build system.
+If you have a multi-core machine then I encourage you to try out different number of workers (number of working actors) by tweaking the ``nrOfWorkers`` variable to for example; 2, 4, 6, 8 etc. to see performance improvement by scaling up.
+
Now we are ready to take on more advanced problems. In the next tutorial we will build on this one, refactor it into more idiomatic Akka and Scala code, and introduce a few new concepts and abstractions. Whenever you feel ready, join me in the `Getting Started Tutorial: Second Chapter `_.
Happy hakking.
diff --git a/akka-docs/manual/getting-started-first.rst b/akka-docs/intro/getting-started-first.rst
similarity index 100%
rename from akka-docs/manual/getting-started-first.rst
rename to akka-docs/intro/getting-started-first.rst
diff --git a/akka-docs/intro/import-project.png b/akka-docs/intro/import-project.png
new file mode 100644
index 0000000000..5774e9d412
Binary files /dev/null and b/akka-docs/intro/import-project.png differ
diff --git a/akka-docs/intro/index.rst b/akka-docs/intro/index.rst
new file mode 100644
index 0000000000..8df1a87a5d
--- /dev/null
+++ b/akka-docs/intro/index.rst
@@ -0,0 +1,12 @@
+Introduction
+============
+
+.. toctree::
+ :maxdepth: 2
+
+ why-akka
+ getting-started-first-scala
+ getting-started-first-scala-eclipse
+ getting-started-first-java
+ building-akka
+ configuration
diff --git a/akka-docs/intro/install-beta2-updatesite.png b/akka-docs/intro/install-beta2-updatesite.png
new file mode 100644
index 0000000000..4eb85682ad
Binary files /dev/null and b/akka-docs/intro/install-beta2-updatesite.png differ
diff --git a/akka-docs/manual/pi-formula.png b/akka-docs/intro/pi-formula.png
similarity index 100%
rename from akka-docs/manual/pi-formula.png
rename to akka-docs/intro/pi-formula.png
diff --git a/akka-docs/intro/quickfix.png b/akka-docs/intro/quickfix.png
new file mode 100644
index 0000000000..f4f2811e52
Binary files /dev/null and b/akka-docs/intro/quickfix.png differ
diff --git a/akka-docs/intro/run-config.png b/akka-docs/intro/run-config.png
new file mode 100644
index 0000000000..912f958223
Binary files /dev/null and b/akka-docs/intro/run-config.png differ
diff --git a/akka-docs/intro/why-akka.rst b/akka-docs/intro/why-akka.rst
new file mode 100644
index 0000000000..512a669b2f
--- /dev/null
+++ b/akka-docs/intro/why-akka.rst
@@ -0,0 +1,68 @@
+Why Akka?
+=========
+
+What features can the Akka platform offer, over the competition?
+----------------------------------------------------------------
+
+Akka is an unified runtime and programming model for:
+
+- Scale up (Concurrency)
+- Scale out (Remoting)
+- Fault tolerance
+
+One thing to learn and admin, with high cohesion and coherent semantics.
+
+Akka is a very scalable piece of software, not only in the performance sense,
+but in the size of applications it is useful for. The core of Akka, akka-actor,
+is very small and easily dropped into an existing project where you need
+asynchronicity and lockless concurrency without hassle.
+
+You can choose to include only the parts of akka you need in your application
+and then there's the whole package, the Akka Microkernel, which is a standalone
+container to deploy your Akka application in. With CPUs growing more and more
+cores every cycle, Akka is the alternative that provides outstanding performance
+even if you're only running it on one machine. Akka also supplies a wide array
+of concurrency-paradigms, allowing for users to choose the right tool for the
+job.
+
+The integration possibilities for Akka Actors are immense through the Apache
+Camel integration. We provide Software Transactional Memory concurrency control
+through the excellent Multiverse project, and have integrated that with Actors,
+creating Transactors for coordinated concurrent transactions. We have Agents and
+Dataflow concurrency as well.
+
+
+What's a good use-case for Akka?
+--------------------------------
+
+(Web, Cloud, Application) Services - Actors lets you manage service failures
+(Supervisors), load management (back-off strategies, timeouts and
+processing-isolation), both horizontal and vertical scalability (add more cores
+and/or add more machines). Think payment processing, invoicing, order matching,
+datacrunching, messaging. Really any highly transactional systems like banking,
+betting, games.
+
+Here's what some of the Akka users have to say about how they are using Akka:
+http://stackoverflow.com/questions/4493001/good-use-case-for-akka
+
+
+Cloudy Akka
+-----------
+
+And that's all in the ApacheV2-licensed open source project. On top of that we
+have a commercial product called Cloudy Akka which provides the following
+features:
+
+#. Dynamically clustered ActorRegistry with both automatic and manual migration
+ of actors
+
+#. Cluster membership and cluster event subscriptions
+
+#. Durable actor mailboxes of different sizes and shapes - file-backed,
+ Redis-backed, ZooKeeper-backed, Beanstalkd-backed and with AMQP and JMS-based
+ in the works
+
+#. Monitoring influenced by Dapper for cross-machine message tracing and
+ JMX-exposed statistics
+
+Read more `here `_.
diff --git a/akka-docs/manual/more.png b/akka-docs/manual/more.png
deleted file mode 100644
index 3eb7b05c84..0000000000
Binary files a/akka-docs/manual/more.png and /dev/null differ
diff --git a/akka-docs/pending/Migration-1.0-1.1.rst b/akka-docs/pending/Migration-1.0-1.1.rst
deleted file mode 100644
index b9f88bf4fc..0000000000
--- a/akka-docs/pending/Migration-1.0-1.1.rst
+++ /dev/null
@@ -1,32 +0,0 @@
-Moved to Scala 2.9.x
-^^^^^^^^^^^^^^^^^^^^
-
-Akka HTTP
-=========
-
-# akka.servlet.Initializer has been moved to akka-kernel to be able to have akka-http not depend on akka-remote, if you don't want to use the class for kernel, just create your own version of akka.servlet.Initializer, it's just a couple of lines of code and there is instructions here: `Akka Http Docs `_
-# akka.http.ListWriter has been removed in full, if you use it and want to keep using it, here's the code: `ListWriter `_
-# Jersey-server is now a "provided" dependency for Akka-http, so you'll need to add the dependency to your project, it's built against Jersey 1.3
-
-Akka Actor
-==========
-
-# is now dependency free, with the exception of the dependency on the scala-library.jar
-# does not bundle any logging anymore, but you can subscribe to events within Akka by registering an event handler on akka.aevent.EventHandler or by specifying the FQN of an Actor in the akka.conf under akka.event-handlers; there is an akka-slf4j module which still provides the Logging trait and a default SLF4J logger adapter.
-# If you used HawtDispatcher and want to continue using it, you need to include akka-dispatcher-extras.jar from Akka Modules, in your akka.conf you need to specify: "akka.dispatch.HawtDispatcherConfigurator" instead of "HawtDispatcher"
-# FSM: the onTransition method changed from Function1 to PartialFunction; there is an implicit conversion for the precise types in place, but it may be necessary to add an underscore if you are passing an eta-expansion (using a method as function value).
-
-Akka Typed Actor
-================
-
-All methods starting with 'get*' are deprecated and will be removed in post 1.1 release.
-
-Akka Remote
-===========
-
-# UnparsebleException => CannotInstantiateRemoteExceptionDueToRemoteProtocolParsingErrorException(exception, classname, message)
-
-Akka Testkit
-============
-
-The TestKit moved into the akka-testkit subproject and correspondingly into the akka.testkit package.
diff --git a/akka-docs/pending/articles.rst b/akka-docs/pending/articles.rst
index 2b2e4b1b8b..06f01f9a7d 100644
--- a/akka-docs/pending/articles.rst
+++ b/akka-docs/pending/articles.rst
@@ -16,6 +16,8 @@ Videos
`Akka talk at Scala Days - March 2010 `_
+`Devoxx 2010 talk "Akka: Simpler Scalability, Fault-Tolerance, Concurrency" by Viktor Klang `_
+
Articles
--------
diff --git a/akka-docs/pending/cluster-membership.rst b/akka-docs/pending/cluster-membership.rst
deleted file mode 100644
index 6aa70e8bce..0000000000
--- a/akka-docs/pending/cluster-membership.rst
+++ /dev/null
@@ -1,89 +0,0 @@
-Cluster Membership (Scala)
-==========================
-
-Module stability: **IN PROGRESS**
-
-Akka supports a Cluster Membership through a `JGroups `_ based implementation. JGroups is is a `P2P `_ clustering API
-
-Configuration
--------------
-
-The cluster is configured in 'akka.conf' by adding the Fully Qualified Name (FQN) of the actor class and serializer:
-
-.. code-block:: ruby
-
- remote {
- cluster {
- service = on
- name = "default" # The name of the cluster
- serializer = "akka.serialization.Serializer$Java" # FQN of the serializer class
- }
- }
-
-How to join the cluster
------------------------
-
-The node joins the cluster when the 'RemoteNode' and/or 'RemoteServer' servers are started.
-
-Cluster API
------------
-
-Interaction with the cluster is done through the 'akka.remote.Cluster' object.
-
-To send a message to all actors of a specific type on other nodes in the cluster use the 'relayMessage' function:
-
-.. code-block:: scala
-
- def relayMessage(to: Class[_ <: Actor], msg: AnyRef): Unit
-
-Here is an example:
-
-.. code-block:: scala
-
- Cluster.relayMessage(classOf[ATypeOfActor], message)
-
-Traversing the remote nodes in the cluster to spawn remote actors:
-
-Cluster.foreach:
-
-.. code-block:: scala
-
- def foreach(f : (RemoteAddress) => Unit) : Unit
-
-Here's an example:
-
-.. code-block:: scala
-
- for(endpoint <- Cluster) spawnRemote[KungFuActor](endpoint.hostname,endpoint.port)
-
-and:
-
-.. code-block:: scala
-
- Cluster.foreach( endpoint => spawnRemote[KungFuActor](endpoint.hostname,endpoint.port) )
-
-Cluster.lookup:
-
-.. code-block:: scala
-
- def lookup[T](handleRemoteAddress : PartialFunction[RemoteAddress, T]) : Option[T]
-
-Here is an example:
-
-.. code-block:: scala
-
- val myRemoteActor: Option[SomeActorType] = Cluster.lookup({
- case RemoteAddress(hostname, port) => spawnRemote[SomeActorType](hostname, port)
- })
-
- myRemoteActor.foreach(remoteActor => ...)
-
-Here is another example:
-
-.. code-block:: scala
- Cluster.lookup({
- case remoteAddress @ RemoteAddress(_,_) => remoteAddress
- }) match {
- case Some(remoteAddress) => spawnAllRemoteActors(remoteAddress)
- case None => handleNoRemoteNodeFound
- }
diff --git a/akka-docs/pending/configuration.rst b/akka-docs/pending/configuration.rst
deleted file mode 100644
index 19d4a1a566..0000000000
--- a/akka-docs/pending/configuration.rst
+++ /dev/null
@@ -1,180 +0,0 @@
-Configuration
-=============
-
-Specifying the configuration file
----------------------------------
-
-If you don't specify a configuration file then Akka is using default values. If you want to override these then you should edit the 'akka.conf' file in the 'AKKA_HOME/config' directory. This config inherits from the 'akka-reference.conf' file that you see below, use your 'akka.conf' to override any property in the reference config.
-
-The config can be specified in a various of ways:
-
-* Define the '-Dakka.config=...' system property option.
-* Put the 'akka.conf' file on the classpath.
-* Define 'AKKA_HOME' environment variable pointing to the root of the Akka distribution, in which the config is taken from the 'AKKA_HOME/config' directory, you can also point to the AKKA_HOME by specifying the '-Dakka.home=...' system property option.
-
-Defining the configuration file
--------------------------------
-
-``_
-####################
-# Akka Config File #
-####################
-
-# This file has all the default settings, so all these could be removed with no visible effect.
-# Modify as needed.
-
-akka {
- version = "1.1-SNAPSHOT" # Akka version, checked against the runtime version of Akka.
-
- enabled-modules = [] # Comma separated list of the enabled modules. Options: ["remote", "camel", "http"]
-
- time-unit = "seconds" # Time unit for all timeout properties throughout the config
-
- event-handlers = ["akka.event.EventHandler$DefaultListener"] # event handlers to register at boot time (EventHandler$DefaultListener logs to STDOUT)
- event-handler-level = "DEBUG" # Options: ERROR, WARNING, INFO, DEBUG
-
- # These boot classes are loaded (and created) automatically when the Akka Microkernel boots up
- # Can be used to bootstrap your application(s)
- # Should be the FQN (Fully Qualified Name) of the boot class which needs to have a default constructor
- # boot = ["sample.camel.Boot",
- # "sample.rest.java.Boot",
- # "sample.rest.scala.Boot",
- # "sample.security.Boot"]
- boot = []
-
- actor {
- timeout = 5 # Default timeout for Future based invocations
- # - Actor: !! && !!!
- # - UntypedActor: sendRequestReply && sendRequestReplyFuture
- # - TypedActor: methods with non-void return type
- serialize-messages = off # Does a deep clone of (non-primitive) messages to ensure immutability
- throughput = 5 # Default throughput for all ExecutorBasedEventDrivenDispatcher, set to 1 for complete fairness
- throughput-deadline-time = -1 # Default throughput deadline for all ExecutorBasedEventDrivenDispatcher, set to 0 or negative for no deadline
- dispatcher-shutdown-timeout = 1 # Using the akka.time-unit, how long dispatchers by default will wait for new actors until they shut down
-
- default-dispatcher {
- type = "GlobalExecutorBasedEventDriven" # Must be one of the following, all "Global*" are non-configurable
- # - ExecutorBasedEventDriven
- # - ExecutorBasedEventDrivenWorkStealing
- # - GlobalExecutorBasedEventDriven
- keep-alive-time = 60 # Keep alive time for threads
- core-pool-size-factor = 1.0 # No of core threads ... ceil(available processors * factor)
- max-pool-size-factor = 4.0 # Max no of threads ... ceil(available processors * factor)
- executor-bounds = -1 # Makes the Executor bounded, -1 is unbounded
- allow-core-timeout = on # Allow core threads to time out
- rejection-policy = "caller-runs" # abort, caller-runs, discard-oldest, discard
- throughput = 5 # Throughput for ExecutorBasedEventDrivenDispatcher, set to 1 for complete fairness
- throughput-deadline-time = -1 # Throughput deadline for ExecutorBasedEventDrivenDispatcher, set to 0 or negative for no deadline
- mailbox-capacity = -1 # If negative (or zero) then an unbounded mailbox is used (default)
- # If positive then a bounded mailbox is used and the capacity is set using the property
- # NOTE: setting a mailbox to 'blocking' can be a bit dangerous,
- # could lead to deadlock, use with care
- #
- # The following are only used for ExecutorBasedEventDriven
- # and only if mailbox-capacity > 0
- mailbox-push-timeout-time = 10 # Specifies the timeout to add a new message to a mailbox that is full - negative number means infinite timeout
- # (in unit defined by the time-unit property)
- }
- }
-
- stm {
- fair = on # Should global transactions be fair or non-fair (non fair yield better performance)
- max-retries = 1000
- timeout = 5 # Default timeout for blocking transactions and transaction set (in unit defined by
- # the time-unit property)
- write-skew = true
- blocking-allowed = false
- interruptible = false
- speculative = true
- quick-release = true
- propagation = "requires"
- trace-level = "none"
- }
-
- jta {
- provider = "from-jndi" # Options: - "from-jndi" (means that Akka will try to detect a TransactionManager in the JNDI)
- # - "atomikos" (means that Akka will use the Atomikos based JTA impl in 'akka-jta',
- # e.g. you need the akka-jta JARs on classpath).
- timeout = 60
- }
-
- http {
- hostname = "localhost"
- port = 9998
-
- #If you are using akka.http.AkkaRestServlet
- filters = ["se.scalablesolutions.akka.security.AkkaSecurityFilterFactory"] # List with all jersey filters to use
- # resource-packages = ["sample.rest.scala",
- # "sample.rest.java",
- # "sample.security"] # List with all resource packages for your Jersey services
- resource-packages = []
-
- # The authentication service to use. Need to be overridden (sample now)
- # authenticator = "sample.security.BasicAuthenticationService"
- authenticator = "N/A"
-
- # Uncomment if you are using the KerberosAuthenticationActor
- # kerberos {
- # servicePrincipal = "HTTP/localhost@EXAMPLE.COM"
- # keyTabLocation = "URL to keytab"
- # kerberosDebug = "true"
- # realm = "EXAMPLE.COM"
- # }
- kerberos {
- servicePrincipal = "N/A"
- keyTabLocation = "N/A"
- kerberosDebug = "N/A"
- realm = ""
- }
-
- #If you are using akka.http.AkkaMistServlet
- mist-dispatcher {
- #type = "GlobalExecutorBasedEventDriven" # Uncomment if you want to use a different dispatcher than the default one for Comet
- }
- connection-close = true # toggles the addition of the "Connection" response header with a "close" value
- root-actor-id = "_httproot" # the id of the actor to use as the root endpoint
- root-actor-builtin = true # toggles the use of the built-in root endpoint base class
- timeout = 1000 # the default timeout for all async requests (in ms)
- expired-header-name = "Async-Timeout" # the name of the response header to use when an async request expires
- expired-header-value = "expired" # the value of the response header to use when an async request expires
- }
-
- remote {
-
- # secure-cookie = "050E0A0D0D06010A00000900040D060F0C09060B" # generate your own with '$AKKA_HOME/scripts/generate_secure_cookie.sh' or using 'Crypt.generateSecureCookie'
- secure-cookie = ""
-
- compression-scheme = "zlib" # Options: "zlib" (lzf to come), leave out for no compression
- zlib-compression-level = 6 # Options: 0-9 (1 being fastest and 9 being the most compressed), default is 6
-
- layer = "akka.remote.netty.NettyRemoteSupport"
-
- server {
- hostname = "localhost" # The hostname or IP that clients should connect to
- port = 2552 # The port clients should connect to. Default is 2552 (AKKA)
- message-frame-size = 1048576 # Increase this if you want to be able to send messages with large payloads
- connection-timeout = 1
- require-cookie = off # Should the remote server require that it peers share the same secure-cookie (defined in the 'remote' section)?
- untrusted-mode = off # Enable untrusted mode for full security of server managed actors, allows untrusted clients to connect.
- backlog = 4096 # Sets the size of the connection backlog
- execution-pool-keepalive = 60# Length in akka.time-unit how long core threads will be kept alive if idling
- execution-pool-size = 16# Size of the core pool of the remote execution unit
- max-channel-memory-size = 0 # Maximum channel size, 0 for off
- max-total-memory-size = 0 # Maximum total size of all channels, 0 for off
- }
-
- client {
- buffering {
- retry-message-send-on-failure = on
- capacity = -1 # If negative (or zero) then an unbounded mailbox is used (default)
- # If positive then a bounded mailbox is used and the capacity is set using the property
- }
- reconnect-delay = 5
- read-timeout = 10
- message-frame-size = 1048576
- reap-futures-delay = 5
- reconnection-time-window = 600 # Maximum time window that a client should try to reconnect for
- }
- }
-}
-``_
diff --git a/akka-docs/pending/actors-scala.rst b/akka-docs/scala/actors.rst
similarity index 99%
rename from akka-docs/pending/actors-scala.rst
rename to akka-docs/scala/actors.rst
index fc456ba71e..70f9e0cfcc 100644
--- a/akka-docs/pending/actors-scala.rst
+++ b/akka-docs/scala/actors.rst
@@ -1,5 +1,5 @@
-Actors (Scala)
-==============
+Actors
+======
Module stability: **SOLID**
diff --git a/akka-docs/manual/fsm-scala.rst b/akka-docs/scala/fsm.rst
similarity index 100%
rename from akka-docs/manual/fsm-scala.rst
rename to akka-docs/scala/fsm.rst
diff --git a/akka-docs/scala/index.rst b/akka-docs/scala/index.rst
new file mode 100644
index 0000000000..645efccf41
--- /dev/null
+++ b/akka-docs/scala/index.rst
@@ -0,0 +1,8 @@
+Scala API
+=========
+
+.. toctree::
+ :maxdepth: 2
+
+ actors
+ fsm
diff --git a/akka-docs/scala/migration-guide-0.7.x-0.8.x.rst b/akka-docs/scala/migration-guide-0.7.x-0.8.x.rst
new file mode 100644
index 0000000000..5c45eb76c1
--- /dev/null
+++ b/akka-docs/scala/migration-guide-0.7.x-0.8.x.rst
@@ -0,0 +1,94 @@
+Migrate from 0.7.x to 0.8.x
+===========================
+
+This is a case-by-case migration guide from Akka 0.7.x (on Scala 2.7.7) to Akka 0.8.x (on Scala 2.8.x)
+------------------------------------------------------------------------------------------------------
+
+Cases:
+======
+
+Actor.send is removed and replaced in full with Actor.!
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+
+.. code-block:: scala
+
+ myActor send "test"
+
+becomes
+
+.. code-block:: scala
+
+ myActor ! "test"
+
+Actor.! now has it's implicit sender defaulted to None
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+
+.. code-block:: scala
+
+ def !(message: Any)(implicit sender: Option[Actor] = None)
+
+"import Actor.Sender.Self" has been removed because it's not needed anymore
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+
+Remove
+
+.. code-block:: scala
+
+ import Actor.Sender.Self
+
+Actor.spawn now uses manifests instead of concrete class types
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+
+.. code-block:: scala
+
+ val someActor = spawn(classOf[MyActor])
+
+becomes
+
+.. code-block:: scala
+
+ val someActor = spawn[MyActor]
+
+Actor.spawnRemote now uses manifests instead of concrete class types
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+
+.. code-block:: scala
+
+ val someActor = spawnRemote(classOf[MyActor],"somehost",1337)
+
+becomes
+
+.. code-block:: scala
+
+ val someActor = spawnRemote[MyActor]("somehost",1337)
+
+Actor.spawnLink now uses manifests instead of concrete class types
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+
+.. code-block:: scala
+
+ val someActor = spawnLink(classOf[MyActor])
+
+becomes
+
+.. code-block:: scala
+
+ val someActor = spawnLink[MyActor]
+
+Actor.spawnLinkRemote now uses manifests instead of concrete class types
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+
+.. code-block:: scala
+
+ val someActor = spawnLinkRemote(classOf[MyActor],"somehost",1337)
+
+becomes
+
+.. code-block:: scala
+
+ val someActor = spawnLinkRemote[MyActor]("somehost",1337)
+
+**Transaction.atomic and friends are moved into Transaction.Local._ and Transaction.Global._**
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+
+We now make a difference between transaction management that are local within a thread and global across many threads (and actors).
diff --git a/akka-docs/scala/migration-guide-0.8.x-0.9.x.rst b/akka-docs/scala/migration-guide-0.8.x-0.9.x.rst
new file mode 100644
index 0000000000..359cb01602
--- /dev/null
+++ b/akka-docs/scala/migration-guide-0.8.x-0.9.x.rst
@@ -0,0 +1,170 @@
+**This document describes between the 0.8.x and the 0.9 release.**
+
+Background for the new ActorRef
+===============================
+
+In the work towards 0.9 release we have now done a major change to how Actors are created. In short we have separated identity and value, created an 'ActorRef' that holds the actual Actor instance. This allows us to do many great things such as for example:
+
+* Create serializable, immutable, network-aware Actor references that can be freely shared across the network. They "remember" their origin and will always work as expected.
+* Not only kill and restart the same supervised Actor instance when it has crashed (as we do now), but dereference it, throw it away and make it eligible for garbage collection.
+* etc. much more
+
+These work very much like the 'PID' (process id) in Erlang.
+
+These changes means that there is no difference in defining Actors. You still use the old Actor trait, all methods are there etc. But you can't just new this Actor up and send messages to it since all its public API methods are gone. They now reside in a new class; 'ActorRef' and use need to use instances of this class to interact with the Actor (sending messages etc.).
+
+Here is a short migration guide with the things that you have to change. It is a big conceptual change but in practice you don't have to change much.
+
+Migration Guide
+===============
+
+Creating Actors with default constructor
+----------------------------------------
+
+From:
+
+.. code-block:: scala
+
+ val a = new MyActor
+ a ! msg
+
+To:
+
+.. code-block:: scala
+
+ import Actor._
+ val a = actorOf[MyActor]
+ a ! msg
+
+You can also start it in the same statement:
+
+.. code-block:: scala
+
+ val a = actorOf[MyActor].start
+
+Creating Actors with non-default constructor
+--------------------------------------------
+
+From:
+
+.. code-block:: scala
+
+ val a = new MyActor(..)
+ a ! msg
+
+To:
+
+.. code-block:: scala
+
+ import Actor._
+ val a = actorOf(new MyActor(..))
+ a ! msg
+
+Use of 'self' ActorRef API
+--------------------------
+
+Where you have used 'this' to refer to the Actor from within itself now use 'self':
+
+.. code-block:: scala
+
+ self ! MessageToMe
+
+Now the Actor trait only has the callbacks you can implement:
+* receive
+* postRestart/preRestart
+* init/shutdown
+
+It has no state at all.
+
+All API has been moved to ActorRef. The Actor is given its ActorRef through the 'self' member variable.
+Here you find functions like:
+* !, !!, !!! and forward
+* link, unlink, startLink, spawnLink etc
+* makeTransactional, makeRemote etc.
+* start, stop
+* etc.
+
+Here you also find fields like
+* dispatcher = ...
+* id = ...
+* lifeCycle = ...
+* faultHandler = ...
+* trapExit = ...
+* etc.
+
+This means that to use them you have to prefix them with 'self', like this:
+
+.. code-block:: scala
+
+ self ! Message
+
+However, for convenience you can import these functions and fields like below, which will allow you do drop the 'self' prefix:
+
+.. code-block:: scala
+
+ class MyActor extends Actor {
+ import self._
+ id = ...
+ dispatcher = ...
+ spawnLink[OtherActor]
+ ...
+ }
+
+Serialization
+=============
+
+If you want to serialize it yourself, here is how to do it:
+
+.. code-block:: scala
+
+ val actorRef1 = actorOf[MyActor]
+
+ val bytes = actorRef1.toBinary
+
+ val actorRef2 = ActorRef.fromBinary(bytes)
+
+If you are also using Protobuf then you can use the methods that work with Protobuf's Messages directly.
+
+.. code-block:: scala
+
+ val actorRef1 = actorOf[MyActor]
+
+ val protobufMessage = actorRef1.toProtocol
+
+ val actorRef2 = ActorRef.fromProtocol(protobufMessage)
+
+Camel
+======
+
+Some methods of the se.scalablesolutions.akka.camel.Message class have been deprecated in 0.9. These are
+
+.. code-block:: scala
+
+ package se.scalablesolutions.akka.camel
+
+ case class Message(...) {
+ // ...
+ @deprecated def bodyAs[T](clazz: Class[T]): T
+ @deprecated def setBodyAs[T](clazz: Class[T]): Message
+ // ...
+ }
+
+They will be removed in 1.0. Instead use
+
+.. code-block:: scala
+
+ package se.scalablesolutions.akka.camel
+
+ case class Message(...) {
+ // ...
+ def bodyAs[T](implicit m: Manifest[T]): T =
+ def setBodyAs[T](implicit m: Manifest[T]): Message
+ // ...
+ }
+
+Usage example:
+.. code-block:: scala
+
+ val m = Message(1.4)
+ val b = m.bodyAs[String]
+
diff --git a/akka-docs/scala/migration-guide-0.9.x-0.10.x.rst b/akka-docs/scala/migration-guide-0.9.x-0.10.x.rst
new file mode 100644
index 0000000000..68ec0cb087
--- /dev/null
+++ b/akka-docs/scala/migration-guide-0.9.x-0.10.x.rst
@@ -0,0 +1,45 @@
+Migration Guide from Akka 0.9.x to Akka 0.10.x
+==============================================
+
+Module akka-camel
+-----------------
+
+The following list summarizes the breaking changes since Akka 0.9.1.
+
+* CamelService moved from package se.scalablesolutions.akka.camel.service one level up to se.scalablesolutions.akka.camel.
+* CamelService.newInstance removed. For starting and stopping a CamelService, applications should use
+** CamelServiceManager.startCamelService and
+** CamelServiceManager.stopCamelService.
+* Existing def receive = produce method definitions from Producer implementations must be removed (resolves compile error: method receive needs override modifier).
+* The Producer.async method and the related Sync trait have been removed. This is now fully covered by Camel's `asynchronous routing engine `_.
+* @consume annotation can not placed any longer on actors (i.e. on type-level), only on typed actor methods. Consumer actors must mixin the Consumer trait.
+* @consume annotation moved to package se.scalablesolutions.akka.camel.
+
+Logging
+-------
+
+We've switched to Logback (SLF4J compatible) for the logging, if you're having trouble seeing your log output you'll need to make sure that there's a logback.xml available on the classpath or you'll need to specify the location of the logback.xml file via the system property, ex: -Dlogback.configurationFile=/path/to/logback.xml
+
+Configuration
+-------------
+
+* The configuration is now JSON-style (see below).
+* Now you can define the time-unit to be used throughout the config file:
+
+.. code-block:: ruby
+
+ akka {
+ version = "0.10"
+ time-unit = "seconds" # default timeout time unit for all timeout properties throughout the config
+
+ actor {
+ timeout = 5 # default timeout for future based invocations
+ throughput = 5 # default throughput for ExecutorBasedEventDrivenDispatcher
+ }
+ ...
+ }
+
+RemoteClient events
+-------------------
+
+All events now has a reference to the RemoteClient instance instead of 'hostname' and 'port'. This is more flexible. Enables simpler reconnecting etc.
diff --git a/akka-docs/scala/migration-guide-1.0.x-1.1.x.rst b/akka-docs/scala/migration-guide-1.0.x-1.1.x.rst
new file mode 100644
index 0000000000..c32b2545ac
--- /dev/null
+++ b/akka-docs/scala/migration-guide-1.0.x-1.1.x.rst
@@ -0,0 +1,37 @@
+Akka has now moved to Scala 2.9.x
+^^^^^^^^^^^^^^^^^^^^
+
+Akka HTTP
+=========
+
+# akka.servlet.Initializer has been moved to ``akka-kernel`` to be able to have ``akka-http`` not depend on ``akka-remote``, if you don't want to use the class for kernel, just create your own version of ``akka.servlet.Initializer``, it's just a couple of lines of code and there is instructions here: `Akka Http Docs `_
+# akka.http.ListWriter has been removed in full, if you use it and want to keep using it, here's the code: `ListWriter `_
+# Jersey-server is now a "provided" dependency for ``akka-http``, so you'll need to add the dependency to your project, it's built against Jersey 1.3
+
+Akka Actor
+==========
+
+# is now dependency free, with the exception of the dependency on the ``scala-library.jar``
+# does not bundle any logging anymore, but you can subscribe to events within Akka by registering an event handler on akka.aevent.EventHandler or by specifying the ``FQN`` of an Actor in the akka.conf under akka.event-handlers; there is an ``akka-slf4j`` module which still provides the Logging trait and a default ``SLF4J`` logger adapter.
+Don't forget to add a SLF4J backend though, we recommend:
+
+.. code-block:: scala
+ lazy val logback = "ch.qos.logback" % "logback-classic" % "0.9.28"
+
+# If you used HawtDispatcher and want to continue using it, you need to include akka-dispatcher-extras.jar from Akka Modules, in your akka.conf you need to specify: ``akka.dispatch.HawtDispatcherConfigurator`` instead of ``HawtDispatcher``
+# FSM: the onTransition method changed from Function1 to PartialFunction; there is an implicit conversion for the precise types in place, but it may be necessary to add an underscore if you are passing an eta-expansion (using a method as function value).
+
+Akka Typed Actor
+================
+
+All methods starting with 'get*' are deprecated and will be removed in post 1.1 release.
+
+Akka Remote
+===========
+
+# ``UnparsebleException`` has been renamed to ``CannotInstantiateRemoteExceptionDueToRemoteProtocolParsingErrorException(exception, classname, message)``
+
+Akka Testkit
+============
+
+The TestKit moved into the akka-testkit subproject and correspondingly into the ``akka.testkit` package.
diff --git a/akka-docs/scala/migration-guides.rst b/akka-docs/scala/migration-guides.rst
new file mode 100644
index 0000000000..361f8e3c7a
--- /dev/null
+++ b/akka-docs/scala/migration-guides.rst
@@ -0,0 +1,8 @@
+Here are migration guides for the latest releases
+=================================================
+
+* `Migrate 0.7.x -> 0.8.x `_
+* `Migrate 0.8.x -> 0.9.x `_
+* `Migrate 0.9.x -> 0.10.x `_
+* `Migrate 0.10.x -> 1.0.x `_
+* `Migrate 1.0.x -> 1.1.x `_
diff --git a/akka-remote/src/test/scala/remote/ServerInitiatedRemoteActorSpec.scala b/akka-remote/src/test/scala/remote/ServerInitiatedRemoteActorSpec.scala
index b8f4eb2748..a47c895027 100644
--- a/akka-remote/src/test/scala/remote/ServerInitiatedRemoteActorSpec.scala
+++ b/akka-remote/src/test/scala/remote/ServerInitiatedRemoteActorSpec.scala
@@ -189,7 +189,7 @@ class ServerInitiatedRemoteActorSpec extends AkkaRemoteTest {
while(!testDone()) {
if (latch.await(200, TimeUnit.MILLISECONDS))
- error("Test didn't complete within 100 cycles")
+ sys.error("Test didn't complete within 100 cycles")
else
latch.countDown()
}
diff --git a/akka-tutorials/akka-tutorial-first/src/main/java/akka/tutorial/first/java/Pi.java b/akka-tutorials/akka-tutorial-first/src/main/java/akka/tutorial/first/java/Pi.java
index cd5ffc9c06..8c0085fb97 100644
--- a/akka-tutorials/akka-tutorial-first/src/main/java/akka/tutorial/first/java/Pi.java
+++ b/akka-tutorials/akka-tutorial-first/src/main/java/akka/tutorial/first/java/Pi.java
@@ -56,15 +56,15 @@ public class Pi {
static class Calculate {}
static class Work {
- private final int arg;
+ private final int start;
private final int nrOfElements;
- public Work(int arg, int nrOfElements) {
- this.arg = arg;
+ public Work(int start, int nrOfElements) {
+ this.start = start;
this.nrOfElements = nrOfElements;
}
- public int getArg() { return arg; }
+ public int getStart() { return start; }
public int getNrOfElements() { return nrOfElements; }
}
@@ -84,10 +84,10 @@ public class Pi {
static class Worker extends UntypedActor {
// define the work
- private double calculatePiFor(int arg, int nrOfElements) {
+ private double calculatePiFor(int start, int nrOfElements) {
double acc = 0.0;
- for (int i = arg * nrOfElements; i <= ((arg + 1) * nrOfElements - 1); i++) {
- acc += 4 * Math.pow(-1, i) / (2 * i + 1);
+ for (int i = start * nrOfElements; i <= ((start + 1) * nrOfElements - 1); i++) {
+ acc += 4 * (1 - (i % 2) * 2) / (2 * i + 1);
}
return acc;
}
@@ -98,7 +98,7 @@ public class Pi {
Work work = (Work) message;
// perform the work
- double result = calculatePiFor(work.getArg(), work.getNrOfElements());
+ double result = calculatePiFor(work.getStart(), work.getNrOfElements());
// reply with the result
getContext().replyUnsafe(new Result(result));
@@ -157,8 +157,8 @@ public class Pi {
if (message instanceof Calculate) {
// schedule work
- for (int arg = 0; arg < nrOfMessages; arg++) {
- router.sendOneWay(new Work(arg, nrOfElements), getContext());
+ for (int start = 0; start < nrOfMessages; start++) {
+ router.sendOneWay(new Work(start, nrOfElements), getContext());
}
// send a PoisonPill to all workers telling them to shut down themselves
diff --git a/akka-tutorials/akka-tutorial-first/src/main/scala/Pi.scala b/akka-tutorials/akka-tutorial-first/src/main/scala/Pi.scala
index 117287f949..c16c53f995 100644
--- a/akka-tutorials/akka-tutorial-first/src/main/scala/Pi.scala
+++ b/akka-tutorials/akka-tutorial-first/src/main/scala/Pi.scala
@@ -59,7 +59,7 @@ object Pi extends App {
def calculatePiFor(start: Int, nrOfElements: Int): Double = {
var acc = 0.0
for (i <- start until (start + nrOfElements))
- acc += 4 * math.pow(-1, i) / (2 * i + 1)
+ acc += 4 * (1 - (i % 2) * 2) / (2 * i + 1)
acc
}
diff --git a/project/build/AkkaProject.scala b/project/build/AkkaProject.scala
index 319883e1a6..e3eeac4fb2 100644
--- a/project/build/AkkaProject.scala
+++ b/project/build/AkkaProject.scala
@@ -159,8 +159,8 @@ class AkkaParentProject(info: ProjectInfo) extends DefaultProject(info) {
lazy val sjson = "net.debasishg" % "sjson_2.9.0.RC1" % "0.11" % "compile" //ApacheV2
lazy val sjson_test = "net.debasishg" % "sjson_2.9.0.RC1" % "0.11" % "test" //ApacheV2
- lazy val slf4j = "org.slf4j" % "slf4j-api" % "1.6.0"
- lazy val logback = "ch.qos.logback" % "logback-classic" % "0.9.24"
+ lazy val slf4j = "org.slf4j" % "slf4j-api" % SLF4J_VERSION
+ lazy val logback = "ch.qos.logback" % "logback-classic" % "0.9.28" % "runtime"
// Test